]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge tag 'iwlwifi-next-for-kalle-2015-10-25' of https://git.kernel.org/pub/scm/linux...
authorKalle Valo <kvalo@codeaurora.org>
Wed, 28 Oct 2015 18:48:26 +0000 (20:48 +0200)
committerKalle Valo <kvalo@codeaurora.org>
Wed, 28 Oct 2015 18:48:26 +0000 (20:48 +0200)
* bug fix for TDLS
* fixes and cleanups in scan
* support of several scan plans
* improvements in FTM
* fixes in FW API
* improvements in the failure paths when the bus is dead
* other various small things here and there

1319 files changed:
Documentation/device-mapper/snapshot.txt
Documentation/devicetree/bindings/input/cypress,cyapa.txt
Documentation/devicetree/bindings/net/cpsw.txt
Documentation/devicetree/bindings/net/hisilicon-hns-mdio.txt
Documentation/devicetree/bindings/net/smsc-lan87xx.txt [new file with mode: 0644]
Documentation/devicetree/bindings/spi/sh-msiof.txt
Documentation/devicetree/bindings/usb/renesas_usbhs.txt
Documentation/filesystems/nfs/nfsroot.txt
Documentation/input/multi-touch-protocol.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/vrf.txt
MAINTAINERS
Makefile
arch/alpha/include/asm/word-at-a-time.h
arch/arc/include/asm/Kbuild
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/exynos4412.dtsi
arch/arm/boot/dts/exynos5250-smdk5250.dts
arch/arm/boot/dts/exynos5420.dtsi
arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
arch/arm/boot/dts/imx53-qsrb.dts
arch/arm/boot/dts/imx53.dtsi
arch/arm/boot/dts/imx6qdl-rex.dtsi
arch/arm/boot/dts/r8a7790.dtsi
arch/arm/boot/dts/r8a7791.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/mach-exynos/mcpm-exynos.c
arch/arm/mach-exynos/regs-pmu.h
arch/arm/mach-gemini/board-nas4220b.c
arch/arm/mach-gemini/board-wbd111.c
arch/arm/mach-gemini/board-wbd222.c
arch/arm/net/bpf_jit_32.c
arch/arm64/Makefile
arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/unistd.h
arch/arm64/include/asm/unistd32.h
arch/arm64/include/uapi/asm/signal.h
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/efi.c
arch/arm64/kernel/entry-ftrace.S
arch/arm64/kernel/insn.c
arch/arm64/kernel/setup.c
arch/arm64/mm/fault.c
arch/avr32/include/asm/Kbuild
arch/blackfin/include/asm/Kbuild
arch/c6x/include/asm/Kbuild
arch/cris/include/asm/Kbuild
arch/frv/include/asm/Kbuild
arch/h8300/include/asm/Kbuild
arch/hexagon/include/asm/Kbuild
arch/ia64/include/asm/Kbuild
arch/m32r/include/asm/Kbuild
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/linkage.h
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/metag/include/asm/Kbuild
arch/microblaze/include/asm/Kbuild
arch/mips/cavium-octeon/setup.c
arch/mips/include/asm/Kbuild
arch/mips/include/asm/io.h
arch/mips/include/uapi/asm/swab.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/jz4740/board-qi_lb60.c
arch/mips/jz4740/gpio.c
arch/mips/kernel/cps-vec.S
arch/mips/kernel/octeon_switch.S
arch/mips/kernel/r2300_switch.S
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/mm/dma-default.c
arch/mips/net/bpf_jit_asm.S
arch/mn10300/include/asm/Kbuild
arch/nios2/include/asm/Kbuild
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/pseries_defconfig
arch/powerpc/include/asm/machdep.h
arch/powerpc/include/asm/word-at-a-time.h
arch/powerpc/mm/hash_native_64.c
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/ps3/os-area.c
arch/s390/boot/compressed/Makefile
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/include/asm/Kbuild
arch/s390/include/asm/numa.h
arch/s390/include/asm/topology.h
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/entry.S
arch/s390/kernel/vtime.c
arch/s390/numa/mode_emu.c
arch/s390/numa/numa.c
arch/score/include/asm/Kbuild
arch/sh/include/asm/page.h
arch/sparc/crypto/aes_glue.c
arch/sparc/crypto/camellia_glue.c
arch/sparc/crypto/des_glue.c
arch/tile/gxio/mpipe.c
arch/tile/include/asm/word-at-a-time.h
arch/um/include/asm/Kbuild
arch/unicore32/include/asm/Kbuild
arch/x86/Kconfig
arch/x86/crypto/camellia_aesni_avx_glue.c
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/xen/hypercall.h
arch/x86/include/uapi/asm/bitsperlong.h
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/crash.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kvm/emulate.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/init_64.c
arch/x86/platform/efi/efi.c
arch/x86/xen/enlighten.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
arch/xtensa/include/asm/Kbuild
block/blk-mq-cpumap.c
block/blk-mq-sysfs.c
block/blk-mq-tag.c
block/blk-mq-tag.h
block/blk-mq.c
block/blk-mq.h
crypto/ahash.c
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/actables.h
drivers/acpi/acpica/evxfevnt.c
drivers/acpi/acpica/tbfadt.c
drivers/acpi/acpica/tbutils.c
drivers/base/power/domain_governor.c
drivers/base/regmap/regmap-debugfs.c
drivers/block/loop.c
drivers/block/null_blk.c
drivers/block/nvme-core.c
drivers/block/rbd.c
drivers/block/virtio_blk.c
drivers/block/xen-blkback/xenbus.c
drivers/block/xen-blkfront.c
drivers/bluetooth/Kconfig
drivers/bluetooth/ath3k.c
drivers/bluetooth/btbcm.c
drivers/bluetooth/btintel.c
drivers/bluetooth/btintel.h
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_ath.c
drivers/bluetooth/hci_bcm.c
drivers/bluetooth/hci_intel.c
drivers/bluetooth/hci_ldisc.c
drivers/bluetooth/hci_qca.c
drivers/bluetooth/hci_uart.h
drivers/bus/Kconfig
drivers/clk/mvebu/clk-cpu.c
drivers/clk/samsung/clk-cpu.c
drivers/clk/ti/clk-3xxx.c
drivers/clk/ti/clk-7xx.c
drivers/clk/ti/clkt_dflt.c
drivers/clocksource/rockchip_timer.c
drivers/clocksource/timer-keystone.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/devfreq/devfreq.c
drivers/dma/at_xdmac.c
drivers/dma/dmaengine.c
drivers/dma/dw/core.c
drivers/dma/idma64.c
drivers/dma/pxa_dma.c
drivers/dma/sun4i-dma.c
drivers/dma/xgene-dma.c
drivers/dma/zx296702_dma.c
drivers/firmware/efi/libstub/arm-stub.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/cz_dpm.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/kv_dpm.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/include/cgs_linux.h
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/exynos/exynos7_drm_decon.c
drivers/gpu/drm/exynos/exynos_dp_core.c
drivers/gpu/drm/exynos/exynos_drm_core.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_hotplug.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_fb.c
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_dp_mst.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/virtio/virtgpu_debugfs.c
drivers/gpu/drm/virtio/virtgpu_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/i2c-core.c
drivers/infiniband/core/cma.c
drivers/infiniband/hw/nes/nes_nic.c
drivers/infiniband/hw/usnic/usnic.h
drivers/infiniband/hw/usnic/usnic_abi.h
drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h
drivers/infiniband/hw/usnic/usnic_common_util.h
drivers/infiniband/hw/usnic/usnic_debugfs.c
drivers/infiniband/hw/usnic/usnic_debugfs.h
drivers/infiniband/hw/usnic/usnic_fwd.c
drivers/infiniband/hw/usnic/usnic_fwd.h
drivers/infiniband/hw/usnic/usnic_ib.h
drivers/infiniband/hw/usnic/usnic_ib_main.c
drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
drivers/infiniband/hw/usnic/usnic_ib_verbs.h
drivers/infiniband/hw/usnic/usnic_log.h
drivers/infiniband/hw/usnic/usnic_transport.c
drivers/infiniband/hw/usnic/usnic_transport.h
drivers/infiniband/hw/usnic/usnic_uiom.c
drivers/infiniband/hw/usnic/usnic_uiom.h
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
drivers/infiniband/hw/usnic/usnic_vnic.c
drivers/infiniband/hw/usnic/usnic_vnic.h
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/input/joystick/walkera0701.c
drivers/input/keyboard/omap4-keypad.c
drivers/input/misc/pm8941-pwrkey.c
drivers/input/misc/uinput.c
drivers/input/mouse/cyapa_gen6.c
drivers/input/mouse/elan_i2c.h
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/elan_i2c_i2c.c
drivers/input/mouse/elan_i2c_smbus.c
drivers/input/mouse/synaptics.c
drivers/input/serio/libps2.c
drivers/input/serio/parkbd.c
drivers/input/touchscreen/ads7846.c
drivers/input/touchscreen/imx6ul_tsc.c
drivers/input/touchscreen/mms114.c
drivers/iommu/Kconfig
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/intel-iommu.c
drivers/iommu/io-pgtable-arm.c
drivers/irqchip/irq-gic-v3-its-pci-msi.c
drivers/irqchip/irq-gic-v3-its.c
drivers/isdn/hisax/isdnl2.c
drivers/isdn/mISDN/dsp_pipeline.c
drivers/isdn/mISDN/layer2.c
drivers/mcb/mcb-pci.c
drivers/md/bitmap.c
drivers/md/dm-cache-policy-cleaner.c
drivers/md/dm-exception-store.c
drivers/md/dm-exception-store.h
drivers/md/dm-raid.c
drivers/md/dm-snap-persistent.c
drivers/md/dm-snap-transient.c
drivers/md/dm-snap.c
drivers/md/dm-thin.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/mfd/intel-lpss.h
drivers/mfd/max77843.c
drivers/misc/cxl/api.c
drivers/misc/cxl/context.c
drivers/misc/cxl/cxl.h
drivers/misc/cxl/file.c
drivers/misc/cxl/irq.c
drivers/misc/cxl/native.c
drivers/misc/cxl/pci.c
drivers/misc/mei/hbm.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-of-at91.c
drivers/mmc/host/sdhci-pxav3.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/sdhci.h
drivers/mtd/nand/mxc_nand.c
drivers/mtd/nand/sunxi_nand.c
drivers/net/Kconfig
drivers/net/arcnet/arcdevice.h
drivers/net/arcnet/arcnet.c
drivers/net/arcnet/com20020-pci.c
drivers/net/arcnet/com20020.c
drivers/net/arcnet/com20020.h
drivers/net/bonding/bond_main.c
drivers/net/can/at91_can.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/can/sun4i_can.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/bcm_sf2.h
drivers/net/dsa/bcm_sf2_regs.h
drivers/net/dsa/mv88e6060.c
drivers/net/dsa/mv88e6171.c
drivers/net/dsa/mv88e6352.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/dsa/mv88e6xxx.h
drivers/net/dummy.c
drivers/net/ethernet/aeroflex/greth.c
drivers/net/ethernet/allwinner/sun4i-emac.c
drivers/net/ethernet/amd/au1000_eth.c
drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
drivers/net/ethernet/atheros/atlx/atl1.c
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/Makefile
drivers/net/ethernet/broadcom/bcm63xx_enet.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnxt/Makefile [new file with mode: 0644]
drivers/net/ethernet/broadcom/bnxt/bnxt.c [new file with mode: 0644]
drivers/net/ethernet/broadcom/bnxt/bnxt.h [new file with mode: 0644]
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h [new file with mode: 0644]
drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h [new file with mode: 0644]
drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h [new file with mode: 0644]
drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h [new file with mode: 0644]
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c [new file with mode: 0644]
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h [new file with mode: 0644]
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/cavium/Kconfig
drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/dec/tulip/de2104x.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/freescale/ucc_geth_ethtool.c
drivers/net/ethernet/hisilicon/Kconfig
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
drivers/net/ethernet/hisilicon/hns/hnae.c
drivers/net/ethernet/hisilicon/hns/hnae.h
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
drivers/net/ethernet/hisilicon/hns_mdio.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_dcb.c
drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_devids.h [new file with mode: 0644]
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_fcoe.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/i40evf/i40e_adminq.c
drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40evf/i40e_common.c
drivers/net/ethernet/intel/i40evf/i40e_devids.h [new file with mode: 0644]
drivers/net/ethernet/intel/i40evf/i40e_prototype.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_type.h
drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
drivers/net/ethernet/intel/i40evf/i40evf.h
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igbvf/ethtool.c
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/mbx.h
drivers/net/ethernet/intel/ixgbevf/vf.c
drivers/net/ethernet/intel/ixgbevf/vf.h
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlxsw/Kconfig
drivers/net/ethernet/mellanox/mlxsw/Makefile
drivers/net/ethernet/mellanox/mlxsw/cmd.h
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/core.h
drivers/net/ethernet/mellanox/mlxsw/item.h
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/pci.h
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/switchx2.c
drivers/net/ethernet/mellanox/mlxsw/txheader.h
drivers/net/ethernet/microchip/encx24j600.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/octeon/octeon_mgmt.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
drivers/net/ethernet/qlogic/Kconfig
drivers/net/ethernet/qlogic/Makefile
drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
drivers/net/ethernet/qlogic/qed/Makefile [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_cxt.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_cxt.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_dev.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_dev_api.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_hsi.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_hw.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_hw.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_init_ops.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_init_ops.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_int.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_int.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_l2.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_main.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_mcp.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_mcp.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_reg_addr.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_sp.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_spq.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qede/Makefile [new file with mode: 0644]
drivers/net/ethernet/qlogic/qede/qede.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qede/qede_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qede/qede_main.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
drivers/net/ethernet/realtek/8139too.c
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/ethernet/ti/cpmac.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/tlan.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/fjes/fjes_ethtool.c
drivers/net/geneve.c
drivers/net/ipvlan/ipvlan_core.c
drivers/net/macvtap.c
drivers/net/phy/Kconfig
drivers/net/phy/Makefile
drivers/net/phy/aquantia.c
drivers/net/phy/bcm-phy-lib.c
drivers/net/phy/dp83848.c [new file with mode: 0644]
drivers/net/phy/mdio-gpio.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/micrel.c
drivers/net/phy/smsc.c
drivers/net/phy/teranetics.c
drivers/net/ppp/pppoe.c
drivers/net/usb/Kconfig
drivers/net/usb/asix_common.c
drivers/net/usb/asix_devices.c
drivers/net/usb/dm9601.c
drivers/net/usb/mcs7830.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/sr9800.c
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wireless/Kconfig
drivers/net/wireless/Makefile
drivers/net/wireless/ath/ath10k/bmi.h
drivers/net/wireless/ath/ath10k/ce.c
drivers/net/wireless/ath/ath10k/ce.h
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/debug.h
drivers/net/wireless/ath/ath10k/hif.h
drivers/net/wireless/ath/ath10k/htc.c
drivers/net/wireless/ath/ath10k/htc.h
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/htt_tx.c
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/pci.h
drivers/net/wireless/ath/ath10k/thermal.c
drivers/net/wireless/ath/ath10k/txrx.c
drivers/net/wireless/ath/ath10k/wmi-ops.h
drivers/net/wireless/ath/ath10k/wmi-tlv.c
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath6kl/cfg80211.c
drivers/net/wireless/ath/ath6kl/htc_mbox.c
drivers/net/wireless/ath/ath9k/ar9002_phy.h
drivers/net/wireless/ath/ath9k/ar9003_hw.c
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/dfs_pattern_detector.c
drivers/net/wireless/ath/wil6210/Kconfig
drivers/net/wireless/ath/wil6210/Makefile
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/ath/wil6210/interrupt.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/pmc.c
drivers/net/wireless/ath/wil6210/rx_reorder.c
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/txrx.h
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wil_crash_dump.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/brcm80211/Kconfig
drivers/net/wireless/brcm80211/brcmfmac/bus.h
drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
drivers/net/wireless/brcm80211/brcmfmac/chip.c
drivers/net/wireless/brcm80211/brcmfmac/common.h
drivers/net/wireless/brcm80211/brcmfmac/core.c
drivers/net/wireless/brcm80211/brcmfmac/core.h
drivers/net/wireless/brcm80211/brcmfmac/debug.c
drivers/net/wireless/brcm80211/brcmfmac/debug.h
drivers/net/wireless/brcm80211/brcmfmac/firmware.c
drivers/net/wireless/brcm80211/brcmfmac/fweh.c
drivers/net/wireless/brcm80211/brcmfmac/fwil.h
drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
drivers/net/wireless/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/brcm80211/brcmfmac/usb.c
drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/iwlwifi/dvm/lib.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/mwifiex/Kconfig
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/debugfs.c
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/sta_cmd.c
drivers/net/wireless/mwifiex/sta_cmdresp.c
drivers/net/wireless/mwifiex/uap_cmd.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/usb.h
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/realtek/Makefile [new file with mode: 0644]
drivers/net/wireless/realtek/rtl818x/Kconfig [moved from drivers/net/wireless/rtl818x/Kconfig with 100% similarity]
drivers/net/wireless/realtek/rtl818x/Makefile [moved from drivers/net/wireless/rtl818x/Makefile with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile [moved from drivers/net/wireless/rtl818x/rtl8180/Makefile with 69% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c [moved from drivers/net/wireless/rtl818x/rtl8180/dev.c with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.c [moved from drivers/net/wireless/rtl818x/rtl8180/grf5101.c with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.h [moved from drivers/net/wireless/rtl818x/rtl8180/grf5101.h with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.c [moved from drivers/net/wireless/rtl818x/rtl8180/max2820.c with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.h [moved from drivers/net/wireless/rtl818x/rtl8180/max2820.h with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h [moved from drivers/net/wireless/rtl818x/rtl8180/rtl8180.h with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c [moved from drivers/net/wireless/rtl818x/rtl8180/rtl8225.c with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.h [moved from drivers/net/wireless/rtl818x/rtl8180/rtl8225.h with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c [moved from drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.h [moved from drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c [moved from drivers/net/wireless/rtl818x/rtl8180/sa2400.c with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.h [moved from drivers/net/wireless/rtl818x/rtl8180/sa2400.h with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile [moved from drivers/net/wireless/rtl818x/rtl8187/Makefile with 62% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c [moved from drivers/net/wireless/rtl818x/rtl8187/dev.c with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c [moved from drivers/net/wireless/rtl818x/rtl8187/leds.c with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.h [moved from drivers/net/wireless/rtl818x/rtl8187/leds.h with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.c [moved from drivers/net/wireless/rtl818x/rtl8187/rfkill.c with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.h [moved from drivers/net/wireless/rtl818x/rtl8187/rfkill.h with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h [moved from drivers/net/wireless/rtl818x/rtl8187/rtl8187.h with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c [moved from drivers/net/wireless/rtl818x/rtl8187/rtl8225.c with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.h [moved from drivers/net/wireless/rtl818x/rtl8187/rtl8225.h with 100% similarity]
drivers/net/wireless/realtek/rtl818x/rtl818x.h [moved from drivers/net/wireless/rtl818x/rtl818x.h with 100% similarity]
drivers/net/wireless/realtek/rtl8xxxu/Kconfig [new file with mode: 0644]
drivers/net/wireless/realtek/rtl8xxxu/Makefile [new file with mode: 0644]
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c [new file with mode: 0644]
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h [new file with mode: 0644]
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h [new file with mode: 0644]
drivers/net/wireless/realtek/rtlwifi/Kconfig [moved from drivers/net/wireless/rtlwifi/Kconfig with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/Makefile [moved from drivers/net/wireless/rtlwifi/Makefile with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/base.c [moved from drivers/net/wireless/rtlwifi/base.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/base.h [moved from drivers/net/wireless/rtlwifi/base.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile [moved from drivers/net/wireless/rtlwifi/btcoexist/Makefile with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h [moved from drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c [moved from drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h [moved from drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c [moved from drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h [moved from drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c [moved from drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h [moved from drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c [moved from drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h [moved from drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c [moved from drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h [moved from drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c [moved from drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h [moved from drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c [moved from drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h [moved from drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/cam.c [moved from drivers/net/wireless/rtlwifi/cam.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/cam.h [moved from drivers/net/wireless/rtlwifi/cam.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/core.c [moved from drivers/net/wireless/rtlwifi/core.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/core.h [moved from drivers/net/wireless/rtlwifi/core.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/debug.c [moved from drivers/net/wireless/rtlwifi/debug.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/debug.h [moved from drivers/net/wireless/rtlwifi/debug.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/efuse.c [moved from drivers/net/wireless/rtlwifi/efuse.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/efuse.h [moved from drivers/net/wireless/rtlwifi/efuse.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/pci.c [moved from drivers/net/wireless/rtlwifi/pci.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/pci.h [moved from drivers/net/wireless/rtlwifi/pci.h with 99% similarity]
drivers/net/wireless/realtek/rtlwifi/ps.c [moved from drivers/net/wireless/rtlwifi/ps.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/ps.h [moved from drivers/net/wireless/rtlwifi/ps.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h [moved from drivers/net/wireless/rtlwifi/pwrseqcmd.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rc.c [moved from drivers/net/wireless/rtlwifi/rc.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rc.h [moved from drivers/net/wireless/rtlwifi/rc.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/regd.c [moved from drivers/net/wireless/rtlwifi/regd.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/regd.h [moved from drivers/net/wireless/rtlwifi/regd.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile [moved from drivers/net/wireless/rtlwifi/rtl8188ee/Makefile with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h [moved from drivers/net/wireless/rtlwifi/rtl8188ee/def.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c [moved from drivers/net/wireless/rtlwifi/rtl8188ee/dm.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h [moved from drivers/net/wireless/rtlwifi/rtl8188ee/dm.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c [moved from drivers/net/wireless/rtlwifi/rtl8188ee/fw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h [moved from drivers/net/wireless/rtlwifi/rtl8188ee/fw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c [moved from drivers/net/wireless/rtlwifi/rtl8188ee/hw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h [moved from drivers/net/wireless/rtlwifi/rtl8188ee/hw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c [moved from drivers/net/wireless/rtlwifi/rtl8188ee/led.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h [moved from drivers/net/wireless/rtlwifi/rtl8188ee/led.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c [moved from drivers/net/wireless/rtlwifi/rtl8188ee/phy.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h [moved from drivers/net/wireless/rtlwifi/rtl8188ee/phy.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c [moved from drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h [moved from drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h [moved from drivers/net/wireless/rtlwifi/rtl8188ee/reg.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c [moved from drivers/net/wireless/rtlwifi/rtl8188ee/rf.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h [moved from drivers/net/wireless/rtlwifi/rtl8188ee/rf.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c [moved from drivers/net/wireless/rtlwifi/rtl8188ee/sw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h [moved from drivers/net/wireless/rtlwifi/rtl8188ee/sw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c [moved from drivers/net/wireless/rtlwifi/rtl8188ee/table.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h [moved from drivers/net/wireless/rtlwifi/rtl8188ee/table.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c [moved from drivers/net/wireless/rtlwifi/rtl8188ee/trx.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h [moved from drivers/net/wireless/rtlwifi/rtl8188ee/trx.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile [moved from drivers/net/wireless/rtlwifi/rtl8192c/Makefile with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c [moved from drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h [moved from drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c [moved from drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h [moved from drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c [moved from drivers/net/wireless/rtlwifi/rtl8192c/main.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c [moved from drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h [moved from drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile [moved from drivers/net/wireless/rtlwifi/rtl8192ce/Makefile with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h [moved from drivers/net/wireless/rtlwifi/rtl8192ce/def.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c [moved from drivers/net/wireless/rtlwifi/rtl8192ce/dm.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h [moved from drivers/net/wireless/rtlwifi/rtl8192ce/dm.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c [moved from drivers/net/wireless/rtlwifi/rtl8192ce/hw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h [moved from drivers/net/wireless/rtlwifi/rtl8192ce/hw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c [moved from drivers/net/wireless/rtlwifi/rtl8192ce/led.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h [moved from drivers/net/wireless/rtlwifi/rtl8192ce/led.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c [moved from drivers/net/wireless/rtlwifi/rtl8192ce/phy.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h [moved from drivers/net/wireless/rtlwifi/rtl8192ce/phy.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h [moved from drivers/net/wireless/rtlwifi/rtl8192ce/reg.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c [moved from drivers/net/wireless/rtlwifi/rtl8192ce/rf.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h [moved from drivers/net/wireless/rtlwifi/rtl8192ce/rf.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c [moved from drivers/net/wireless/rtlwifi/rtl8192ce/sw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h [moved from drivers/net/wireless/rtlwifi/rtl8192ce/sw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c [moved from drivers/net/wireless/rtlwifi/rtl8192ce/table.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h [moved from drivers/net/wireless/rtlwifi/rtl8192ce/table.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c [moved from drivers/net/wireless/rtlwifi/rtl8192ce/trx.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h [moved from drivers/net/wireless/rtlwifi/rtl8192ce/trx.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile [moved from drivers/net/wireless/rtlwifi/rtl8192cu/Makefile with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h [moved from drivers/net/wireless/rtlwifi/rtl8192cu/def.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c [moved from drivers/net/wireless/rtlwifi/rtl8192cu/dm.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h [moved from drivers/net/wireless/rtlwifi/rtl8192cu/dm.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c [moved from drivers/net/wireless/rtlwifi/rtl8192cu/hw.c with 99% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h [moved from drivers/net/wireless/rtlwifi/rtl8192cu/hw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c [moved from drivers/net/wireless/rtlwifi/rtl8192cu/led.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h [moved from drivers/net/wireless/rtlwifi/rtl8192cu/led.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c [moved from drivers/net/wireless/rtlwifi/rtl8192cu/mac.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h [moved from drivers/net/wireless/rtlwifi/rtl8192cu/mac.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c [moved from drivers/net/wireless/rtlwifi/rtl8192cu/phy.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h [moved from drivers/net/wireless/rtlwifi/rtl8192cu/phy.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h [moved from drivers/net/wireless/rtlwifi/rtl8192cu/reg.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c [moved from drivers/net/wireless/rtlwifi/rtl8192cu/rf.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h [moved from drivers/net/wireless/rtlwifi/rtl8192cu/rf.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c [moved from drivers/net/wireless/rtlwifi/rtl8192cu/sw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h [moved from drivers/net/wireless/rtlwifi/rtl8192cu/sw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c [moved from drivers/net/wireless/rtlwifi/rtl8192cu/table.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h [moved from drivers/net/wireless/rtlwifi/rtl8192cu/table.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c [moved from drivers/net/wireless/rtlwifi/rtl8192cu/trx.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h [moved from drivers/net/wireless/rtlwifi/rtl8192cu/trx.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile [moved from drivers/net/wireless/rtlwifi/rtl8192de/Makefile with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h [moved from drivers/net/wireless/rtlwifi/rtl8192de/def.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c [moved from drivers/net/wireless/rtlwifi/rtl8192de/dm.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h [moved from drivers/net/wireless/rtlwifi/rtl8192de/dm.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c [moved from drivers/net/wireless/rtlwifi/rtl8192de/fw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h [moved from drivers/net/wireless/rtlwifi/rtl8192de/fw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c [moved from drivers/net/wireless/rtlwifi/rtl8192de/hw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h [moved from drivers/net/wireless/rtlwifi/rtl8192de/hw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c [moved from drivers/net/wireless/rtlwifi/rtl8192de/led.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h [moved from drivers/net/wireless/rtlwifi/rtl8192de/led.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c [moved from drivers/net/wireless/rtlwifi/rtl8192de/phy.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h [moved from drivers/net/wireless/rtlwifi/rtl8192de/phy.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h [moved from drivers/net/wireless/rtlwifi/rtl8192de/reg.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c [moved from drivers/net/wireless/rtlwifi/rtl8192de/rf.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h [moved from drivers/net/wireless/rtlwifi/rtl8192de/rf.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c [moved from drivers/net/wireless/rtlwifi/rtl8192de/sw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h [moved from drivers/net/wireless/rtlwifi/rtl8192de/sw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c [moved from drivers/net/wireless/rtlwifi/rtl8192de/table.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h [moved from drivers/net/wireless/rtlwifi/rtl8192de/table.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c [moved from drivers/net/wireless/rtlwifi/rtl8192de/trx.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h [moved from drivers/net/wireless/rtlwifi/rtl8192de/trx.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile [moved from drivers/net/wireless/rtlwifi/rtl8192ee/Makefile with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h [moved from drivers/net/wireless/rtlwifi/rtl8192ee/def.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c [moved from drivers/net/wireless/rtlwifi/rtl8192ee/dm.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h [moved from drivers/net/wireless/rtlwifi/rtl8192ee/dm.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c [moved from drivers/net/wireless/rtlwifi/rtl8192ee/fw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h [moved from drivers/net/wireless/rtlwifi/rtl8192ee/fw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c [moved from drivers/net/wireless/rtlwifi/rtl8192ee/hw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h [moved from drivers/net/wireless/rtlwifi/rtl8192ee/hw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c [moved from drivers/net/wireless/rtlwifi/rtl8192ee/led.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h [moved from drivers/net/wireless/rtlwifi/rtl8192ee/led.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c [moved from drivers/net/wireless/rtlwifi/rtl8192ee/phy.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h [moved from drivers/net/wireless/rtlwifi/rtl8192ee/phy.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c [moved from drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h [moved from drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h [moved from drivers/net/wireless/rtlwifi/rtl8192ee/reg.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c [moved from drivers/net/wireless/rtlwifi/rtl8192ee/rf.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h [moved from drivers/net/wireless/rtlwifi/rtl8192ee/rf.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c [moved from drivers/net/wireless/rtlwifi/rtl8192ee/sw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h [moved from drivers/net/wireless/rtlwifi/rtl8192ee/sw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c [moved from drivers/net/wireless/rtlwifi/rtl8192ee/table.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h [moved from drivers/net/wireless/rtlwifi/rtl8192ee/table.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c [moved from drivers/net/wireless/rtlwifi/rtl8192ee/trx.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h [moved from drivers/net/wireless/rtlwifi/rtl8192ee/trx.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile [moved from drivers/net/wireless/rtlwifi/rtl8192se/Makefile with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h [moved from drivers/net/wireless/rtlwifi/rtl8192se/def.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c [moved from drivers/net/wireless/rtlwifi/rtl8192se/dm.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h [moved from drivers/net/wireless/rtlwifi/rtl8192se/dm.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c [moved from drivers/net/wireless/rtlwifi/rtl8192se/fw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h [moved from drivers/net/wireless/rtlwifi/rtl8192se/fw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c [moved from drivers/net/wireless/rtlwifi/rtl8192se/hw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h [moved from drivers/net/wireless/rtlwifi/rtl8192se/hw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c [moved from drivers/net/wireless/rtlwifi/rtl8192se/led.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h [moved from drivers/net/wireless/rtlwifi/rtl8192se/led.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c [moved from drivers/net/wireless/rtlwifi/rtl8192se/phy.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h [moved from drivers/net/wireless/rtlwifi/rtl8192se/phy.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h [moved from drivers/net/wireless/rtlwifi/rtl8192se/reg.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c [moved from drivers/net/wireless/rtlwifi/rtl8192se/rf.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h [moved from drivers/net/wireless/rtlwifi/rtl8192se/rf.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c [moved from drivers/net/wireless/rtlwifi/rtl8192se/sw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h [moved from drivers/net/wireless/rtlwifi/rtl8192se/sw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c [moved from drivers/net/wireless/rtlwifi/rtl8192se/table.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h [moved from drivers/net/wireless/rtlwifi/rtl8192se/table.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c [moved from drivers/net/wireless/rtlwifi/rtl8192se/trx.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h [moved from drivers/net/wireless/rtlwifi/rtl8192se/trx.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile [moved from drivers/net/wireless/rtlwifi/rtl8723ae/Makefile with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h [moved from drivers/net/wireless/rtlwifi/rtl8723ae/btc.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h [moved from drivers/net/wireless/rtlwifi/rtl8723ae/def.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c [moved from drivers/net/wireless/rtlwifi/rtl8723ae/dm.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h [moved from drivers/net/wireless/rtlwifi/rtl8723ae/dm.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c [moved from drivers/net/wireless/rtlwifi/rtl8723ae/fw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h [moved from drivers/net/wireless/rtlwifi/rtl8723ae/fw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c [moved from drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h [moved from drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c [moved from drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h [moved from drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c [moved from drivers/net/wireless/rtlwifi/rtl8723ae/hw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h [moved from drivers/net/wireless/rtlwifi/rtl8723ae/hw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c [moved from drivers/net/wireless/rtlwifi/rtl8723ae/led.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h [moved from drivers/net/wireless/rtlwifi/rtl8723ae/led.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c [moved from drivers/net/wireless/rtlwifi/rtl8723ae/phy.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h [moved from drivers/net/wireless/rtlwifi/rtl8723ae/phy.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c [moved from drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h [moved from drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h [moved from drivers/net/wireless/rtlwifi/rtl8723ae/reg.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c [moved from drivers/net/wireless/rtlwifi/rtl8723ae/rf.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h [moved from drivers/net/wireless/rtlwifi/rtl8723ae/rf.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c [moved from drivers/net/wireless/rtlwifi/rtl8723ae/sw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h [moved from drivers/net/wireless/rtlwifi/rtl8723ae/sw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c [moved from drivers/net/wireless/rtlwifi/rtl8723ae/table.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h [moved from drivers/net/wireless/rtlwifi/rtl8723ae/table.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c [moved from drivers/net/wireless/rtlwifi/rtl8723ae/trx.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h [moved from drivers/net/wireless/rtlwifi/rtl8723ae/trx.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile [moved from drivers/net/wireless/rtlwifi/rtl8723be/Makefile with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h [moved from drivers/net/wireless/rtlwifi/rtl8723be/def.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c [moved from drivers/net/wireless/rtlwifi/rtl8723be/dm.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h [moved from drivers/net/wireless/rtlwifi/rtl8723be/dm.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c [moved from drivers/net/wireless/rtlwifi/rtl8723be/fw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h [moved from drivers/net/wireless/rtlwifi/rtl8723be/fw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c [moved from drivers/net/wireless/rtlwifi/rtl8723be/hw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h [moved from drivers/net/wireless/rtlwifi/rtl8723be/hw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c [moved from drivers/net/wireless/rtlwifi/rtl8723be/led.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h [moved from drivers/net/wireless/rtlwifi/rtl8723be/led.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c [moved from drivers/net/wireless/rtlwifi/rtl8723be/phy.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h [moved from drivers/net/wireless/rtlwifi/rtl8723be/phy.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c [moved from drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h [moved from drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h [moved from drivers/net/wireless/rtlwifi/rtl8723be/reg.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c [moved from drivers/net/wireless/rtlwifi/rtl8723be/rf.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h [moved from drivers/net/wireless/rtlwifi/rtl8723be/rf.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c [moved from drivers/net/wireless/rtlwifi/rtl8723be/sw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h [moved from drivers/net/wireless/rtlwifi/rtl8723be/sw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c [moved from drivers/net/wireless/rtlwifi/rtl8723be/table.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h [moved from drivers/net/wireless/rtlwifi/rtl8723be/table.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c [moved from drivers/net/wireless/rtlwifi/rtl8723be/trx.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h [moved from drivers/net/wireless/rtlwifi/rtl8723be/trx.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile [moved from drivers/net/wireless/rtlwifi/rtl8723com/Makefile with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c [moved from drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h [moved from drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c [moved from drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h [moved from drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c [moved from drivers/net/wireless/rtlwifi/rtl8723com/main.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c [moved from drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h [moved from drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile [moved from drivers/net/wireless/rtlwifi/rtl8821ae/Makefile with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h [moved from drivers/net/wireless/rtlwifi/rtl8821ae/def.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c [moved from drivers/net/wireless/rtlwifi/rtl8821ae/dm.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h [moved from drivers/net/wireless/rtlwifi/rtl8821ae/dm.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c [moved from drivers/net/wireless/rtlwifi/rtl8821ae/fw.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h [moved from drivers/net/wireless/rtlwifi/rtl8821ae/fw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c [moved from drivers/net/wireless/rtlwifi/rtl8821ae/hw.c with 99% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h [moved from drivers/net/wireless/rtlwifi/rtl8821ae/hw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c [moved from drivers/net/wireless/rtlwifi/rtl8821ae/led.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h [moved from drivers/net/wireless/rtlwifi/rtl8821ae/led.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c [moved from drivers/net/wireless/rtlwifi/rtl8821ae/phy.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h [moved from drivers/net/wireless/rtlwifi/rtl8821ae/phy.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c [moved from drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h [moved from drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h [moved from drivers/net/wireless/rtlwifi/rtl8821ae/reg.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c [moved from drivers/net/wireless/rtlwifi/rtl8821ae/rf.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h [moved from drivers/net/wireless/rtlwifi/rtl8821ae/rf.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c [moved from drivers/net/wireless/rtlwifi/rtl8821ae/sw.c with 97% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h [moved from drivers/net/wireless/rtlwifi/rtl8821ae/sw.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c [moved from drivers/net/wireless/rtlwifi/rtl8821ae/table.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h [moved from drivers/net/wireless/rtlwifi/rtl8821ae/table.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c [moved from drivers/net/wireless/rtlwifi/rtl8821ae/trx.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h [moved from drivers/net/wireless/rtlwifi/rtl8821ae/trx.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/stats.c [moved from drivers/net/wireless/rtlwifi/stats.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/stats.h [moved from drivers/net/wireless/rtlwifi/stats.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/usb.c [moved from drivers/net/wireless/rtlwifi/usb.c with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/usb.h [moved from drivers/net/wireless/rtlwifi/usb.h with 100% similarity]
drivers/net/wireless/realtek/rtlwifi/wifi.h [moved from drivers/net/wireless/rtlwifi/wifi.h with 99% similarity]
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/nvmem/core.c
drivers/nvmem/sunxi_sid.c
drivers/pci/msi.c
drivers/phy/phy-berlin-sata.c
drivers/phy/phy-qcom-ufs.c
drivers/phy/phy-rockchip-usb.c
drivers/pinctrl/freescale/pinctrl-imx25.c
drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c
drivers/regulator/axp20x-regulator.c
drivers/regulator/core.c
drivers/scsi/3w-9xxx.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/libiscsi.c
drivers/scsi/scsi_dh.c
drivers/scsi/scsi_lib.c
drivers/spi/spi-davinci.c
drivers/staging/lustre/lustre/llite/dir.c
drivers/staging/speakup/fakekey.c
drivers/tty/n_tty.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/imx.c
drivers/tty/tty_buffer.c
drivers/tty/tty_io.c
drivers/usb/core/quirks.c
drivers/usb/gadget/udc/bdc/bdc_ep.c
drivers/usb/misc/chaoskey.c
drivers/usb/renesas_usbhs/common.c
drivers/video/fbdev/broadsheetfb.c
drivers/video/fbdev/fsl-diu-fb.c
drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
drivers/video/fbdev/omap2/displays-new/connector-dvi.c
drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
drivers/video/fbdev/tridentfb.c
drivers/video/of_display_timing.c
fs/btrfs/backref.c
fs/btrfs/disk-io.c
fs/btrfs/export.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/ioctl.c
fs/btrfs/send.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/volumes.h
fs/cifs/cifsfs.h
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/smb2pdu.c
fs/dax.c
fs/ext4/Kconfig
fs/ext4/readpage.c
fs/mpage.c
fs/namei.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4trace.h
fs/nfs/write.c
fs/nfsd/blocklayout.c
fs/ramfs/file-nommu.c
include/asm-generic/word-at-a-time.h
include/drm/drm_crtc_helper.h
include/drm/drm_dp_helper.h
include/drm/drm_dp_mst_helper.h
include/linux/blk-mq.h
include/linux/blkdev.h
include/linux/bpf.h
include/linux/compiler-gcc.h
include/linux/if_link.h
include/linux/irqdomain.h
include/linux/leds.h
include/linux/mlx4/device.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/overflow-arith.h [new file with mode: 0644]
include/linux/phy.h
include/linux/platform_data/atmel.h
include/linux/platform_data/mdio-gpio.h [moved from include/linux/mdio-gpio.h with 100% similarity]
include/linux/qed/common_hsi.h [new file with mode: 0644]
include/linux/qed/eth_common.h [new file with mode: 0644]
include/linux/qed/qed_chain.h [new file with mode: 0644]
include/linux/qed/qed_eth_if.h [new file with mode: 0644]
include/linux/qed/qed_if.h [new file with mode: 0644]
include/linux/seccomp.h
include/linux/skbuff.h
include/linux/string.h
include/linux/tcp.h
include/linux/usb/renesas_usbhs.h
include/net/6lowpan.h
include/net/af_unix.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/dsa.h
include/net/dst_metadata.h
include/net/inet_connection_sock.h
include/net/inet_hashtables.h
include/net/inet_timewait_sock.h
include/net/l3mdev.h
include/net/mac802154.h
include/net/mpls_iptunnel.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_conntrack_timeout.h
include/net/netfilter/nf_queue.h
include/net/netfilter/nfnetlink_queue.h [deleted file]
include/net/netns/ipv4.h
include/net/request_sock.h
include/net/rtnetlink.h
include/net/sock.h
include/net/switchdev.h
include/net/tcp.h
include/net/tso.h
include/uapi/asm-generic/signal.h
include/uapi/linux/Kbuild
include/uapi/linux/bpf.h
include/uapi/linux/can/bcm.h
include/uapi/linux/if_link.h
include/uapi/linux/netfilter/nfnetlink_log.h
include/uapi/linux/openvswitch.h
include/uapi/linux/perf_event.h
include/uapi/linux/ptrace.h
include/uapi/linux/rtnetlink.h
include/xen/interface/sched.h
kernel/bpf/arraymap.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/events/core.c
kernel/irq/handle.c
kernel/irq/msi.c
kernel/irq/proc.c
kernel/ptrace.c
kernel/sched/core.c
kernel/sched/sched.h
kernel/seccomp.c
kernel/time/clocksource.c
kernel/time/timekeeping.c
kernel/trace/bpf_trace.c
kernel/workqueue.c
lib/Kconfig
lib/string.c
mm/filemap.c
mm/memcontrol.c
mm/memory.c
mm/readahead.c
mm/vmstat.c
net/6lowpan/iphc.c
net/6lowpan/nhc.c
net/6lowpan/nhc.h
net/6lowpan/nhc_udp.c
net/bluetooth/6lowpan.c
net/bluetooth/af_bluetooth.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_request.c
net/bluetooth/hci_request.h
net/bluetooth/hci_sock.c
net/bluetooth/hidp/core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/bluetooth/smp.c
net/bluetooth/smp.h
net/bridge/br_device.c
net/bridge/br_fdb.c
net/bridge/br_forward.c
net/bridge/br_if.c
net/bridge/br_input.c
net/bridge/br_netfilter_hooks.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_stp.c
net/bridge/br_stp_if.c
net/bridge/br_sysfs_br.c
net/bridge/br_vlan.c
net/bridge/netfilter/ebtable_filter.c
net/bridge/netfilter/ebtable_nat.c
net/can/bcm.c
net/ceph/osd_client.c
net/core/dev.c
net/core/ethtool.c
net/core/filter.c
net/core/rtnetlink.c
net/core/sock.c
net/core/tso.c
net/dccp/dccp.h
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dccp/minisocks.c
net/decnet/dn_route.c
net/dsa/dsa.c
net/dsa/slave.c
net/ieee802154/6lowpan/rx.c
net/ieee802154/6lowpan/tx.c
net/ipv4/Makefile
net/ipv4/arp.c
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/icmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/ipconfig.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/netfilter/ipt_ah.c
net/ipv4/netfilter/ipt_rpfilter.c
net/ipv4/netfilter/iptable_nat.c
net/ipv4/netfilter/iptable_security.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/netfilter/nf_nat_snmp_basic.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_dctcp.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_recovery.c [new file with mode: 0644]
net/ipv4/xfrm4_output.c
net/ipv6/addrconf.c
net/ipv6/fib6_rules.c
net/ipv6/icmp.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ndisc.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/netfilter/ip6table_nat.c
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
net/ipv6/netfilter/nf_reject_ipv6.c
net/ipv6/netfilter/nft_chain_route_ipv6.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/xfrm6_output.c
net/ipv6/xfrm6_policy.c
net/irda/irlmp.c
net/key/af_key.c
net/mac80211/debugfs.c
net/mac80211/status.c
net/mac80211/tx.c
net/mac802154/llsec.c
net/mpls/af_mpls.c
net/mpls/internal.h
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/core.c
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_queue.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue.c [moved from net/netfilter/nfnetlink_queue_core.c with 95% similarity]
net/netfilter/nfnetlink_queue_ct.c [deleted file]
net/netfilter/x_tables.c
net/netfilter/xt_CT.c
net/netlink/af_netlink.c
net/openvswitch/actions.c
net/openvswitch/conntrack.c
net/openvswitch/conntrack.h
net/openvswitch/datapath.c
net/openvswitch/datapath.h
net/openvswitch/flow.h
net/openvswitch/flow_netlink.c
net/openvswitch/flow_netlink.h
net/openvswitch/flow_table.c
net/openvswitch/vport-geneve.c
net/openvswitch/vport-gre.c
net/openvswitch/vport-internal_dev.c
net/openvswitch/vport-netdev.c
net/openvswitch/vport-netdev.h
net/openvswitch/vport-vxlan.c
net/openvswitch/vport.c
net/openvswitch/vport.h
net/rds/bind.c
net/rds/send.c
net/rds/tcp_listen.c
net/sched/act_mirred.c
net/sched/sch_hhf.c
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtrdma/verbs.c
net/switchdev/switchdev.c
net/sysctl_net.c
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/core.c
net/tipc/core.h
net/tipc/discover.c
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_distr.c
net/tipc/net.c
net/tipc/node.c
net/tipc/node.h
net/tipc/socket.c
net/tipc/udp_media.c
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/vmci_transport.c
net/vmw_vsock/vmci_transport.h
net/xfrm/xfrm_user.c
samples/bpf/Makefile
samples/bpf/bpf_helpers.h
samples/bpf/trace_output_kern.c [new file with mode: 0644]
samples/bpf/trace_output_user.c [new file with mode: 0644]
scripts/package/builddeb
security/selinux/hooks.c
security/smack/smack_netfilter.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/soc/au1x/db1200.c
sound/soc/codecs/rt5645.c
sound/soc/codecs/rt5645.h
sound/soc/codecs/sgtl5000.c
sound/soc/codecs/tas2552.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/wm8962.c
sound/soc/dwc/designware_i2s.c
sound/soc/fsl/imx-ssi.c
sound/synth/emux/emux_oss.c
tools/perf/util/Build
tools/perf/util/perf_regs.c
tools/perf/util/perf_regs.h
tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c

index 0d5bc46dc1676869358cd6f36975905031f17f9e..ad6949bff2e392d63e7ddb82391746687ea6235e 100644 (file)
@@ -41,9 +41,13 @@ useless and be disabled, returning errors.  So it is important to monitor
 the amount of free space and expand the <COW device> before it fills up.
 
 <persistent?> is P (Persistent) or N (Not persistent - will not survive
-after reboot).
-The difference is that for transient snapshots less metadata must be
-saved on disk - they can be kept in memory by the kernel.
+after reboot).  O (Overflow) can be added as a persistent store option
+to allow userspace to advertise its support for seeing "Overflow" in the
+snapshot status.  So supported store types are "P", "PO" and "N".
+
+The difference between persistent and transient is with transient
+snapshots less metadata must be saved on disk - they can be kept in
+memory by the kernel.
 
 
 * snapshot-merge <origin> <COW device> <persistent> <chunksize>
index 635a3b03663002d8f4eb47b25fd504d2a7ebca93..8d91ba9ff2fd0918bbf18fc1149f2fb3613bf80e 100644 (file)
@@ -25,7 +25,7 @@ Example:
                /* Cypress Gen3 touchpad */
                touchpad@67 {
                        compatible = "cypress,cyapa";
-                       reg = <0x24>;
+                       reg = <0x67>;
                        interrupt-parent = <&gpio>;
                        interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO 2 */
                        wakeup-source;
index 676ecf62491d56f5cee714acd2e7f872914facee..4efca560adda4b22f0498f123f053650e4427da5 100644 (file)
@@ -46,6 +46,7 @@ Required properties:
 Optional properties:
 - dual_emac_res_vlan   : Specifies VID to be used to segregate the ports
 - mac-address          : See ethernet.txt file in the same directory
+- phy-handle           : See ethernet.txt file in the same directory
 
 Note: "ti,hwmods" field is used to fetch the base address and irq
 resources from TI, omap hwmod data base during device registration.
index 9940aa02b8bd5a05a16600c38bf1d503a61e7046..9c23fdf25018ba7b6b8a87fd41653f216c908167 100644 (file)
@@ -12,7 +12,7 @@ Example:
          mdio@803c0000 {
                    #address-cells = <1>;
                    #size-cells = <0>;
-                   compatible = "hisilicon,mdio","hisilicon,hns-mdio";
+                   compatible = "hisilicon,hns-mdio","hisilicon,mdio";
                    reg = <0x0 0x803c0000 0x0 0x10000>;
 
                    ethernet-phy@0 {
diff --git a/Documentation/devicetree/bindings/net/smsc-lan87xx.txt b/Documentation/devicetree/bindings/net/smsc-lan87xx.txt
new file mode 100644 (file)
index 0000000..974edd5
--- /dev/null
@@ -0,0 +1,24 @@
+SMSC LAN87xx Ethernet PHY
+
+Some boards require special tuning values. Configure them
+through an Ethernet OF device node.
+
+Optional properties:
+
+- smsc,disable-energy-detect:
+  If set, do not enable energy detect mode for the SMSC phy.
+  default: enable energy detect mode
+
+Examples:
+smsc phy with disabled energy detect mode on an am335x based board.
+&davinci_mdio {
+       pinctrl-names = "default", "sleep";
+       pinctrl-0 = <&davinci_mdio_default>;
+       pinctrl-1 = <&davinci_mdio_sleep>;
+       status = "okay";
+
+       ethernetphy0: ethernet-phy@0 {
+               reg = <0>;
+               smsc,disable-energy-detect;
+       };
+};
index 8f771441be60556ace93f2b29d87df856882c344..705075da2f10156e92a60828177c8483ee16eeec 100644 (file)
@@ -51,7 +51,7 @@ Optional properties, deprecated for soctype-specific bindings:
 - renesas,tx-fifo-size : Overrides the default tx fifo size given in words
                         (default is 64)
 - renesas,rx-fifo-size : Overrides the default rx fifo size given in words
-                        (default is 64, or 256 on R-Car Gen2)
+                        (default is 64)
 
 Pinctrl properties might be needed, too.  See
 Documentation/devicetree/bindings/pinctrl/renesas,*.
index 64a4ca6cf96ff5bd9df7c3b1c99abd7d086a701c..7d48f63db44ec9b9c0aa68ea0a54a70fc1a7014e 100644 (file)
@@ -5,6 +5,7 @@ Required properties:
        - "renesas,usbhs-r8a7790"
        - "renesas,usbhs-r8a7791"
        - "renesas,usbhs-r8a7794"
+       - "renesas,usbhs-r8a7795"
   - reg: Base address and length of the register for the USBHS
   - interrupts: Interrupt specifier for the USBHS
   - clocks: A list of phandle + clock specifier pairs
index 2d66ed688125f894bae223c7e50fa85327ade534..bb5ab6de5924de19b8562d33b66bba8372fa72a3 100644 (file)
@@ -157,6 +157,9 @@ ip=<client-ip>:<server-ip>:<gw-ip>:<netmask>:<hostname>:<device>:<autoconf>:
                  both:        use both BOOTP and RARP but not DHCP
                               (old option kept for backwards compatibility)
 
+               if dhcp is used, the client identifier can be used by following
+               format "ip=dhcp,client-id-type,client-id-value"
+
                 Default: any
 
   <dns0-ip>    IP address of first nameserver.
index b85d000faeb4067c9ab1ed06690105d459a40a4e..c51f1146f3bd8396f572cddb9c87ae2620d236ba 100644 (file)
@@ -361,7 +361,7 @@ For win8 devices with both T and C coordinates, the position mapping is
    ABS_MT_POSITION_X := T_X
    ABS_MT_POSITION_Y := T_Y
    ABS_MT_TOOL_X := C_X
-   ABS_MT_TOOL_X := C_Y
+   ABS_MT_TOOL_Y := C_Y
 
 Unfortunately, there is not enough information to specify both the touching
 ellipse and the tool ellipse, so one has to resort to approximations.  One
index 99838259e2e67fa4203ed9685ccd4818b532a2dc..85752c81c5ecb359fd8ecce29faa8b348dd77972 100644 (file)
@@ -384,6 +384,14 @@ tcp_mem - vector of 3 INTEGERs: min, pressure, max
        Defaults are calculated at boot time from amount of available
        memory.
 
+tcp_min_rtt_wlen - INTEGER
+       The window length of the windowed min filter to track the minimum RTT.
+       A shorter window lets a flow more quickly pick up new (higher)
+       minimum RTT when it is moved to a longer path (e.g., due to traffic
+       engineering). A longer window makes the filter more resistant to RTT
+       inflations such as transient congestion. The unit is seconds.
+       Default: 300
+
 tcp_moderate_rcvbuf - BOOLEAN
        If set, TCP performs receive buffer auto-tuning, attempting to
        automatically size the buffer (no greater than tcp_rmem[2]) to
@@ -425,6 +433,15 @@ tcp_orphan_retries - INTEGER
        you should think about lowering this value, such sockets
        may consume significant resources. Cf. tcp_max_orphans.
 
+tcp_recovery - INTEGER
+       This value is a bitmap to enable various experimental loss recovery
+       features.
+
+       RACK: 0x1 enables the RACK loss detection for fast detection of lost
+             retransmissions and tail drops.
+
+       Default: 0x1
+
 tcp_reordering - INTEGER
        Initial reordering level of packets in a TCP stream.
        TCP stack can then dynamically adjust flow reordering level
@@ -884,8 +901,8 @@ icmp_ignore_bogus_error_responses - BOOLEAN
 
 icmp_errors_use_inbound_ifaddr - BOOLEAN
 
-       If zero, icmp error messages except redirects are sent with the primary
-       address of the exiting interface.
+       If zero, icmp error messages are sent with the primary address of
+       the exiting interface.
 
        If non-zero, the message will be sent with the primary address of
        the interface that received the packet that caused the icmp error.
@@ -897,23 +914,8 @@ icmp_errors_use_inbound_ifaddr - BOOLEAN
        then the primary address of the first non-loopback interface that
        has one will be used regardless of this setting.
 
-       The source address selection of icmp redirect messages is controlled by
-       icmp_errors_use_inbound_ifaddr.
        Default: 0
 
-icmp_redirects_use_orig_daddr - BOOLEAN
-
-       If zero, icmp redirect messages are sent using the address specified for
-       other icmp errors by icmp_errors_use_inbound_ifaddr.
-
-       If non-zero, the message will be sent with the destination address of
-       the packet that caused the icmp redirect.
-       This behaviour is the preferred one on VRRP routers (see RFC 5798
-       section 8.1.1).
-
-       Default: 0
-
-
 igmp_max_memberships - INTEGER
        Change the maximum number of multicast groups we can subscribe to.
        Default: 20
index 031ef4a634850b8cf4582e5d151fa8080dfa429c..d52aa10cfe911c88b47927c25cfd8ef596c65986 100644 (file)
@@ -90,7 +90,304 @@ or to specify the output device using cmsg and IP_PKTINFO.
 
 Limitations
 -----------
-VRF device currently only works for IPv4. Support for IPv6 is under development.
-
 Index of original ingress interface is not available via cmsg. Will address
 soon.
+
+################################################################################
+
+Using iproute2 for VRFs
+=======================
+VRF devices do *not* have to start with 'vrf-'. That is a convention used here
+for emphasis of the device type, similar to use of 'br' in bridge names.
+
+1. Create a VRF
+
+   To instantiate a VRF device and associate it with a table:
+       $ ip link add dev NAME type vrf table ID
+
+   Remember to add the ip rules as well:
+       $ ip ru add oif NAME table 10
+       $ ip ru add iif NAME table 10
+       $ ip -6 ru add oif NAME table 10
+       $ ip -6 ru add iif NAME table 10
+
+   Without the rules route lookups are not directed to the table.
+
+   For example:
+   $ ip link add dev vrf-blue type vrf table 10
+   $ ip ru add pref 200 oif vrf-blue table 10
+   $ ip ru add pref 200 iif vrf-blue table 10
+   $ ip -6 ru add pref 200 oif vrf-blue table 10
+   $ ip -6 ru add pref 200 iif vrf-blue table 10
+
+
+2. List VRFs
+
+   To list VRFs that have been created:
+       $ ip [-d] link show type vrf
+         NOTE: The -d option is needed to show the table id
+
+   For example:
+   $ ip -d link show type vrf
+   11: vrf-mgmt: <NOARP,MASTER,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
+       link/ether 72:b3:ba:91:e2:24 brd ff:ff:ff:ff:ff:ff promiscuity 0
+       vrf table 1 addrgenmode eui64
+   12: vrf-red: <NOARP,MASTER,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
+       link/ether b6:6f:6e:f6:da:73 brd ff:ff:ff:ff:ff:ff promiscuity 0
+       vrf table 10 addrgenmode eui64
+   13: vrf-blue: <NOARP,MASTER,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
+       link/ether 36:62:e8:7d:bb:8c brd ff:ff:ff:ff:ff:ff promiscuity 0
+       vrf table 66 addrgenmode eui64
+   14: vrf-green: <NOARP,MASTER,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
+       link/ether e6:28:b8:63:70:bb brd ff:ff:ff:ff:ff:ff promiscuity 0
+       vrf table 81 addrgenmode eui64
+
+
+   Or in brief output:
+
+   $ ip -br link show type vrf
+   vrf-mgmt         UP             72:b3:ba:91:e2:24 <NOARP,MASTER,UP,LOWER_UP>
+   vrf-red          UP             b6:6f:6e:f6:da:73 <NOARP,MASTER,UP,LOWER_UP>
+   vrf-blue         UP             36:62:e8:7d:bb:8c <NOARP,MASTER,UP,LOWER_UP>
+   vrf-green        UP             e6:28:b8:63:70:bb <NOARP,MASTER,UP,LOWER_UP>
+
+
+3. Assign a Network Interface to a VRF
+
+   Network interfaces are assigned to a VRF by enslaving the netdevice to a
+   VRF device:
+       $ ip link set dev NAME master VRF-NAME
+
+   On enslavement connected and local routes are automatically moved to the
+   table associated with the VRF device.
+
+   For example:
+   $ ip link set dev eth0 master vrf-mgmt
+
+
+4. Show Devices Assigned to a VRF
+
+   To show devices that have been assigned to a specific VRF add the master
+   option to the ip command:
+       $ ip link show master VRF-NAME
+
+   For example:
+   $ ip link show master vrf-red
+   3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master vrf-red state UP mode DEFAULT group default qlen 1000
+       link/ether 02:00:00:00:02:02 brd ff:ff:ff:ff:ff:ff
+   4: eth2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master vrf-red state UP mode DEFAULT group default qlen 1000
+       link/ether 02:00:00:00:02:03 brd ff:ff:ff:ff:ff:ff
+   7: eth5: <BROADCAST,MULTICAST> mtu 1500 qdisc noop master vrf-red state DOWN mode DEFAULT group default qlen 1000
+       link/ether 02:00:00:00:02:06 brd ff:ff:ff:ff:ff:ff
+
+
+   Or using the brief output:
+   $ ip -br link show master vrf-red
+   eth1             UP             02:00:00:00:02:02 <BROADCAST,MULTICAST,UP,LOWER_UP>
+   eth2             UP             02:00:00:00:02:03 <BROADCAST,MULTICAST,UP,LOWER_UP>
+   eth5             DOWN           02:00:00:00:02:06 <BROADCAST,MULTICAST>
+
+
+5. Show Neighbor Entries for a VRF
+
+   To list neighbor entries associated with devices enslaved to a VRF device
+   add the master option to the ip command:
+       $ ip [-6] neigh show master VRF-NAME
+
+   For example:
+   $  ip neigh show master vrf-red
+   10.2.1.254 dev eth1 lladdr a6:d9:c7:4f:06:23 REACHABLE
+   10.2.2.254 dev eth2 lladdr 5e:54:01:6a:ee:80 REACHABLE
+
+    $ ip -6 neigh show master vrf-red
+    2002:1::64 dev eth1 lladdr a6:d9:c7:4f:06:23 REACHABLE
+
+
+6. Show Addresses for a VRF
+
+   To show addresses for interfaces associated with a VRF add the master
+   option to the ip command:
+       $ ip addr show master VRF-NAME
+
+   For example:
+   $ ip addr show master vrf-red
+   3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master vrf-red state UP group default qlen 1000
+       link/ether 02:00:00:00:02:02 brd ff:ff:ff:ff:ff:ff
+       inet 10.2.1.2/24 brd 10.2.1.255 scope global eth1
+          valid_lft forever preferred_lft forever
+       inet6 2002:1::2/120 scope global
+          valid_lft forever preferred_lft forever
+       inet6 fe80::ff:fe00:202/64 scope link
+          valid_lft forever preferred_lft forever
+   4: eth2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master vrf-red state UP group default qlen 1000
+       link/ether 02:00:00:00:02:03 brd ff:ff:ff:ff:ff:ff
+       inet 10.2.2.2/24 brd 10.2.2.255 scope global eth2
+          valid_lft forever preferred_lft forever
+       inet6 2002:2::2/120 scope global
+          valid_lft forever preferred_lft forever
+       inet6 fe80::ff:fe00:203/64 scope link
+          valid_lft forever preferred_lft forever
+   7: eth5: <BROADCAST,MULTICAST> mtu 1500 qdisc noop master vrf-red state DOWN group default qlen 1000
+       link/ether 02:00:00:00:02:06 brd ff:ff:ff:ff:ff:ff
+
+   Or in brief format:
+   $ ip -br addr show master vrf-red
+   eth1             UP             10.2.1.2/24 2002:1::2/120 fe80::ff:fe00:202/64
+   eth2             UP             10.2.2.2/24 2002:2::2/120 fe80::ff:fe00:203/64
+   eth5             DOWN
+
+
+7. Show Routes for a VRF
+
+   To show routes for a VRF use the ip command to display the table associated
+   with the VRF device:
+       $ ip [-6] route show table ID
+
+   For example:
+   $ ip route show table vrf-red
+   prohibit default
+   broadcast 10.2.1.0 dev eth1  proto kernel  scope link  src 10.2.1.2
+   10.2.1.0/24 dev eth1  proto kernel  scope link  src 10.2.1.2
+   local 10.2.1.2 dev eth1  proto kernel  scope host  src 10.2.1.2
+   broadcast 10.2.1.255 dev eth1  proto kernel  scope link  src 10.2.1.2
+   broadcast 10.2.2.0 dev eth2  proto kernel  scope link  src 10.2.2.2
+   10.2.2.0/24 dev eth2  proto kernel  scope link  src 10.2.2.2
+   local 10.2.2.2 dev eth2  proto kernel  scope host  src 10.2.2.2
+   broadcast 10.2.2.255 dev eth2  proto kernel  scope link  src 10.2.2.2
+
+   $ ip -6 route show table vrf-red
+   local 2002:1:: dev lo  proto none  metric 0  pref medium
+   local 2002:1::2 dev lo  proto none  metric 0  pref medium
+   2002:1::/120 dev eth1  proto kernel  metric 256  pref medium
+   local 2002:2:: dev lo  proto none  metric 0  pref medium
+   local 2002:2::2 dev lo  proto none  metric 0  pref medium
+   2002:2::/120 dev eth2  proto kernel  metric 256  pref medium
+   local fe80:: dev lo  proto none  metric 0  pref medium
+   local fe80:: dev lo  proto none  metric 0  pref medium
+   local fe80::ff:fe00:202 dev lo  proto none  metric 0  pref medium
+   local fe80::ff:fe00:203 dev lo  proto none  metric 0  pref medium
+   fe80::/64 dev eth1  proto kernel  metric 256  pref medium
+   fe80::/64 dev eth2  proto kernel  metric 256  pref medium
+   ff00::/8 dev vrf-red  metric 256  pref medium
+   ff00::/8 dev eth1  metric 256  pref medium
+   ff00::/8 dev eth2  metric 256  pref medium
+
+
+8. Route Lookup for a VRF
+
+   A test route lookup can be done for a VRF by adding the oif option to ip:
+       $ ip [-6] route get oif VRF-NAME ADDRESS
+
+   For example:
+   $ ip route get 10.2.1.40 oif vrf-red
+   10.2.1.40 dev eth1  table vrf-red  src 10.2.1.2
+       cache
+
+   $ ip -6 route get 2002:1::32 oif vrf-red
+   2002:1::32 from :: dev eth1  table vrf-red  proto kernel  src 2002:1::2  metric 256  pref medium
+
+
+9. Removing Network Interface from a VRF
+
+   Network interfaces are removed from a VRF by breaking the enslavement to
+   the VRF device:
+       $ ip link set dev NAME nomaster
+
+   Connected routes are moved back to the default table and local entries are
+   moved to the local table.
+
+   For example:
+   $ ip link set dev eth0 nomaster
+
+--------------------------------------------------------------------------------
+
+Commands used in this example:
+
+cat >> /etc/iproute2/rt_tables <<EOF
+1  vrf-mgmt
+10 vrf-red
+66 vrf-blue
+81 vrf-green
+EOF
+
+function vrf_create
+{
+    VRF=$1
+    TBID=$2
+    # create VRF device
+    ip link add vrf-${VRF} type vrf table ${TBID}
+
+    # add rules that direct lookups to vrf table
+    ip ru add pref 200 oif vrf-${VRF} table ${TBID}
+    ip ru add pref 200 iif vrf-${VRF} table ${TBID}
+    ip -6 ru add pref 200 oif vrf-${VRF} table ${TBID}
+    ip -6 ru add pref 200 iif vrf-${VRF} table ${TBID}
+
+    if [ "${VRF}" != "mgmt" ]; then
+        ip route add table ${TBID} prohibit default
+    fi
+    ip link set dev vrf-${VRF} up
+    ip link set dev vrf-${VRF} state up
+}
+
+vrf_create mgmt 1
+ip link set dev eth0 master vrf-mgmt
+
+vrf_create red 10
+ip link set dev eth1 master vrf-red
+ip link set dev eth2 master vrf-red
+ip link set dev eth5 master vrf-red
+
+vrf_create blue 66
+ip link set dev eth3 master vrf-blue
+
+vrf_create green 81
+ip link set dev eth4 master vrf-green
+
+
+Interface addresses from /etc/network/interfaces:
+auto eth0
+iface eth0 inet static
+      address 10.0.0.2
+      netmask 255.255.255.0
+      gateway 10.0.0.254
+
+iface eth0 inet6 static
+      address 2000:1::2
+      netmask 120
+
+auto eth1
+iface eth1 inet static
+      address 10.2.1.2
+      netmask 255.255.255.0
+
+iface eth1 inet6 static
+      address 2002:1::2
+      netmask 120
+
+auto eth2
+iface eth2 inet static
+      address 10.2.2.2
+      netmask 255.255.255.0
+
+iface eth2 inet6 static
+      address 2002:2::2
+      netmask 120
+
+auto eth3
+iface eth3 inet static
+      address 10.2.3.2
+      netmask 255.255.255.0
+
+iface eth3 inet6 static
+      address 2002:3::2
+      netmask 120
+
+auto eth4
+iface eth4 inet static
+      address 10.2.4.2
+      netmask 255.255.255.0
+
+iface eth4 inet6 static
+      address 2002:4::2
+      netmask 120
index 9bf8683defd9c9f4765ff7893a7404daa41a2a79..b6d822d50781efe1c4a9ac27b87f62778201fe7f 100644 (file)
@@ -3591,6 +3591,13 @@ F:       drivers/gpu/drm/i915/
 F:     include/drm/i915*
 F:     include/uapi/drm/i915*
 
+DRM DRIVERS FOR ATMEL HLCDC
+M:     Boris Brezillon <boris.brezillon@free-electrons.com>
+L:     dri-devel@lists.freedesktop.org
+S:     Supported
+F:     drivers/gpu/drm/atmel-hlcdc/
+F:     Documentation/devicetree/bindings/drm/atmel/
+
 DRM DRIVERS FOR EXYNOS
 M:     Inki Dae <inki.dae@samsung.com>
 M:     Joonyoung Shim <jy0922.shim@samsung.com>
@@ -3619,6 +3626,14 @@ S:       Maintained
 F:     drivers/gpu/drm/imx/
 F:     Documentation/devicetree/bindings/drm/imx/
 
+DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets)
+M:     Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+L:     dri-devel@lists.freedesktop.org
+T:     git git://github.com/patjak/drm-gma500
+S:     Maintained
+F:     drivers/gpu/drm/gma500
+F:     include/drm/gma500*
+
 DRM DRIVERS FOR NVIDIA TEGRA
 M:     Thierry Reding <thierry.reding@gmail.com>
 M:     Terje Bergström <tbergstrom@nvidia.com>
@@ -4003,7 +4018,7 @@ S:        Maintained
 F:     sound/usb/misc/ua101.c
 
 EXTENSIBLE FIRMWARE INTERFACE (EFI)
-M:     Matt Fleming <matt.fleming@intel.com>
+M:     Matt Fleming <matt@codeblueprint.co.uk>
 L:     linux-efi@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
 S:     Maintained
@@ -4018,7 +4033,7 @@ F:        include/linux/efi*.h
 EFI VARIABLE FILESYSTEM
 M:     Matthew Garrett <matthew.garrett@nebula.com>
 M:     Jeremy Kerr <jk@ozlabs.org>
-M:     Matt Fleming <matt.fleming@intel.com>
+M:     Matt Fleming <matt@codeblueprint.co.uk>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
 L:     linux-efi@vger.kernel.org
 S:     Maintained
@@ -6785,7 +6800,6 @@ F:        drivers/scsi/megaraid/
 
 MELLANOX ETHERNET DRIVER (mlx4_en)
 M:     Amir Vadai <amirv@mellanox.com>
-M:     Ido Shamay <idos@mellanox.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
@@ -8526,6 +8540,16 @@ L:       netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/qlogic/qlge/
 
+QLOGIC QL4xxx ETHERNET DRIVER
+M:     Yuval Mintz <Yuval.Mintz@qlogic.com>
+M:     Ariel Elior <Ariel.Elior@qlogic.com>
+M:     everest-linux-l2@qlogic.com
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     drivers/net/ethernet/qlogic/qed/
+F:     include/linux/qed/
+F:     drivers/net/ethernet/qlogic/qede/
+
 QNX4 FILESYSTEM
 M:     Anders Larsen <al@alarsen.net>
 W:     http://www.alarsen.net/linux/qnx4fs/
@@ -8877,6 +8901,13 @@ S:       Maintained
 F:     drivers/net/wireless/rtlwifi/
 F:     drivers/net/wireless/rtlwifi/rtl8192ce/
 
+RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
+M:     Jes Sorensen <Jes.Sorensen@redhat.com>
+L:     linux-wireless@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8723au-mac80211
+S:     Maintained
+F:     drivers/net/wireless/realtek/rtl8xxxu/
+
 S3 SAVAGE FRAMEBUFFER DRIVER
 M:     Antonino Daplas <adaplas@gmail.com>
 L:     linux-fbdev@vger.kernel.org
@@ -9108,6 +9139,15 @@ S: Supported
 F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
 F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
 
+SYNOPSYS DESIGNWARE I2C DRIVER
+M:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+M:     Jarkko Nikula <jarkko.nikula@linux.intel.com>
+M:     Mika Westerberg <mika.westerberg@linux.intel.com>
+L:     linux-i2c@vger.kernel.org
+S:     Maintained
+F:     drivers/i2c/busses/i2c-designware-*
+F:     include/linux/platform_data/i2c-designware.h
+
 SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
 M:     Seungwon Jeon <tgih.jun@samsung.com>
 M:     Jaehoon Chung <jh80.chung@samsung.com>
@@ -9921,7 +9961,6 @@ S:        Maintained
 F:     drivers/staging/lustre
 
 STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
-M:     Julian Andres Klode <jak@jak-linux.org>
 M:     Marc Dietrich <marvin24@gmx.de>
 L:     ac100@lists.launchpad.net (moderated for non-subscribers)
 L:     linux-tegra@vger.kernel.org
@@ -11384,15 +11423,6 @@ W:     http://oops.ghostprotocols.net:81/blog
 S:     Maintained
 F:     drivers/net/wireless/wl3501*
 
-WM97XX TOUCHSCREEN DRIVERS
-M:     Mark Brown <broonie@kernel.org>
-M:     Liam Girdwood <lrg@slimlogic.co.uk>
-L:     linux-input@vger.kernel.org
-W:     https://github.com/CirrusLogic/linux-drivers/wiki
-S:     Supported
-F:     drivers/input/touchscreen/*wm97*
-F:     include/linux/wm97xx.h
-
 WOLFSON MICROELECTRONICS DRIVERS
 L:     patches@opensource.wolfsonmicro.com
 T:     git https://github.com/CirrusLogic/linux-drivers.git
index 1d341eba143d38f0b9e7bd7669c6357f477d94b4..d33ab74bffce02ad49424fafb60b9a941b815ffe 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 4
 PATCHLEVEL = 3
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
-NAME = Hurr durr I'ma sheep
+EXTRAVERSION = -rc6
+NAME = Blurry Fish Butt
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
index 6b340d0f1521c3ad9c4edf984abe60982ae24c04..902e6ab00a066fead53614ed86cb88980aea2fba 100644 (file)
@@ -52,4 +52,6 @@ static inline unsigned long find_zero(unsigned long bits)
 #endif
 }
 
+#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1)
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
index 7611b10a2d238c7b4bb73696b59e2fe8b14ceb2c..0b10ef2a43726e0188b61de96bd5a5bf1ba50067 100644 (file)
@@ -48,4 +48,5 @@ generic-y += types.h
 generic-y += ucontext.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 233159d2eaab3eac34d47c4d4a9308b0d1bdec60..bb8fa023d5741dff9b4f9033ed2cf6f6d7b69f2d 100644 (file)
@@ -578,7 +578,7 @@ dtb-$(CONFIG_MACH_SUN4I) += \
        sun4i-a10-hackberry.dtb \
        sun4i-a10-hyundai-a7hd.dtb \
        sun4i-a10-inet97fv2.dtb \
-       sun4i-a10-itead-iteaduino-plus.dts \
+       sun4i-a10-itead-iteaduino-plus.dtb \
        sun4i-a10-jesurun-q5.dtb \
        sun4i-a10-marsboard.dtb \
        sun4i-a10-mini-xplus.dtb \
index ca0e3c15977f13febd2ae550949ae704ecbf33cb..294cfe40388dd582d77d45eac441b15318ac1cde 100644 (file)
@@ -98,6 +98,7 @@
                        opp-hz = /bits/ 64 <800000000>;
                        opp-microvolt = <1000000>;
                        clock-latency-ns = <200000>;
+                       opp-suspend;
                };
                opp07 {
                        opp-hz = /bits/ 64 <900000000>;
index 15aea760c1dadee45c631d78c64366cea7739276..c625e71217aa94d74c640c113286a5292179b62a 100644 (file)
                                regulator-name = "P1.8V_LDO_OUT10";
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <1800000>;
+                               regulator-always-on;
                        };
 
                        ldo11_reg: LDO11 {
index df9aee92ecf4d71c714e763665013e7d2f6f4591..1b3d6c769a3cbb37f88fe55914707316abea023c 100644 (file)
                interrupt-parent = <&combiner>;
                interrupts = <3 0>;
                clock-names = "sysmmu", "master";
-               clocks = <&clock CLK_SMMU_FIMD1M0>, <&clock CLK_FIMD1>;
+               clocks = <&clock CLK_SMMU_FIMD1M1>, <&clock CLK_FIMD1>;
                power-domains = <&disp_pd>;
                #iommu-cells = <0>;
        };
index 79ffdfe712aa4a8ad193d4afd671962edfb73646..3b43e57845ae92bea4fc25b5c9af428c26ce2aa6 100644 (file)
         */
        pinctrl-0 = <&pwm0_out &pwm1_out &pwm2_out &pwm3_out>;
        pinctrl-names = "default";
-       samsung,pwm-outputs = <0>;
        status = "okay";
 };
 
index 66e47de5e826b0b33aaa6d066aa3c6c3104ff796..96d7eede412e1343d5e4e0a982044cec3ac347d3 100644 (file)
@@ -36,7 +36,7 @@
                pinctrl-0 = <&pinctrl_pmic>;
                reg = <0x08>;
                interrupt-parent = <&gpio5>;
-               interrupts = <23 0x8>;
+               interrupts = <23 IRQ_TYPE_LEVEL_HIGH>;
                regulators {
                        sw1_reg: sw1a {
                                regulator-name = "SW1";
index c3e3ca9362fbb78b6b2ecb8ec0125b83abdb7352..cd170376eaca6be3bc6416363250271590b75153 100644 (file)
@@ -15,6 +15,7 @@
 #include <dt-bindings/clock/imx5-clock.h>
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 
 / {
        aliases {
index 3373fd958e95c72b098ed14ea2a3228ba7903ea6..a503562438888fe936de6c8b9b255a06a4455d26 100644 (file)
@@ -35,7 +35,6 @@
                        compatible = "regulator-fixed";
                        reg = <1>;
                        pinctrl-names = "default";
-                       pinctrl-0 = <&pinctrl_usbh1>;
                        regulator-name = "usbh1_vbus";
                        regulator-min-microvolt = <5000000>;
                        regulator-max-microvolt = <5000000>;
@@ -47,7 +46,6 @@
                        compatible = "regulator-fixed";
                        reg = <2>;
                        pinctrl-names = "default";
-                       pinctrl-0 = <&pinctrl_usbotg>;
                        regulator-name = "usb_otg_vbus";
                        regulator-min-microvolt = <5000000>;
                        regulator-max-microvolt = <5000000>;
index a0b2a79cbfbdf0b42286dea205fe6751fdaf57f6..4624d0f2a75425310a65b462221374eabde85210 100644 (file)
                                "mix.0", "mix.1",
                                "dvc.0", "dvc.1",
                                "clk_a", "clk_b", "clk_c", "clk_i";
+               power-domains = <&cpg_clocks>;
 
                status = "disabled";
 
index 831525dd39a60ad75b024e07b8d6153ff027e6b3..1666c8a6b1432e4f1c8a1a822fb59cb7d5de7375 100644 (file)
                                "mix.0", "mix.1",
                                "dvc.0", "dvc.1",
                                "clk_a", "clk_b", "clk_c", "clk_i";
+               power-domains = <&cpg_clocks>;
 
                status = "disabled";
 
index 2bebaa286f9a3a49c65908f8a4874d5e3091b32a..391230c3dc938fb1a0a4c92ea91e05a209319baa 100644 (file)
                                720000  1200000
                                528000  1100000
                                312000  1000000
-                               144000  900000
+                               144000  1000000
                                >;
                        #cooling-cells = <2>;
                        cooling-min-level = <0>;
index 9bdf54795f05de26283881729a4427bd781ea2eb..56978199c4798fa236394c232e50d58a61e4fd3d 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/cputype.h>
 #include <asm/cp15.h>
 #include <asm/mcpm.h>
+#include <asm/smp_plat.h>
 
 #include "regs-pmu.h"
 #include "common.h"
@@ -70,7 +71,31 @@ static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
                cluster >= EXYNOS5420_NR_CLUSTERS)
                return -EINVAL;
 
-       exynos_cpu_power_up(cpunr);
+       if (!exynos_cpu_power_state(cpunr)) {
+               exynos_cpu_power_up(cpunr);
+
+               /*
+                * This assumes the cluster number of the big cores(Cortex A15)
+                * is 0 and the Little cores(Cortex A7) is 1.
+                * When the system was booted from the Little core,
+                * they should be reset during power up cpu.
+                */
+               if (cluster &&
+                   cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) {
+                       /*
+                        * Before we reset the Little cores, we should wait
+                        * the SPARE2 register is set to 1 because the init
+                        * codes of the iROM will set the register after
+                        * initialization.
+                        */
+                       while (!pmu_raw_readl(S5P_PMU_SPARE2))
+                               udelay(10);
+
+                       pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu),
+                                       EXYNOS_SWRESET);
+               }
+       }
+
        return 0;
 }
 
index b7614333d2968befa767109f693bf7947528db4a..fba9068ed260de7f8211525e772ffc25d7d88f0a 100644 (file)
@@ -513,6 +513,12 @@ static inline unsigned int exynos_pmu_cpunr(unsigned int mpidr)
 #define SPREAD_ENABLE                                          0xF
 #define SPREAD_USE_STANDWFI                                    0xF
 
+#define EXYNOS5420_KFC_CORE_RESET0                             BIT(8)
+#define EXYNOS5420_KFC_ETM_RESET0                              BIT(20)
+
+#define EXYNOS5420_KFC_CORE_RESET(_nr)                         \
+       ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr))
+
 #define EXYNOS5420_BB_CON1                                     0x0784
 #define EXYNOS5420_BB_SEL_EN                                   BIT(31)
 #define EXYNOS5420_BB_PMOS_EN                                  BIT(7)
index ca8a25bb35217404247bc9163529457289187bdf..18b12796acf95f74ca90e1f9e80f31dd325a3065 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/leds.h>
 #include <linux/input.h>
 #include <linux/gpio_keys.h>
-#include <linux/mdio-gpio.h>
 #include <linux/io.h>
 
 #include <asm/setup.h>
index 418188cd1712fe7f6247a8b97fb423e358f4891d..14c56f3f0ec225032a067ad46c08713734a605fa 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/input.h>
 #include <linux/skbuff.h>
 #include <linux/gpio_keys.h>
-#include <linux/mdio-gpio.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <asm/mach-types.h>
index 266b265090cd222034bb6cfd3c1feb64f05ed058..6070282ce24308c100cdfcb30a181283cf0aafa9 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/input.h>
 #include <linux/skbuff.h>
 #include <linux/gpio_keys.h>
-#include <linux/mdio-gpio.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <asm/mach-types.h>
index 6be415111eec09b058c5aa88b8a0693ee1cbb2f2..2f4b14cfddb4762e88818464dcd6c57d5eff2a5c 100644 (file)
@@ -626,6 +626,7 @@ load_common:
                case BPF_LD | BPF_B | BPF_IND:
                        load_order = 0;
 load_ind:
+                       update_on_xread(ctx);
                        OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
                        goto load_common;
                case BPF_LDX | BPF_IMM:
index f9914d7c1bb00b5c4cbe7a19c0f62c8eca54cf81..d10b5d483022f5374fa16c7783b7fb62014c3acb 100644 (file)
@@ -42,7 +42,7 @@ endif
 CHECKFLAGS     += -D__aarch64__
 
 ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
-CFLAGS_MODULE  += -mcmodel=large
+KBUILD_CFLAGS_MODULE   += -mcmodel=large
 endif
 
 # Default value
index 35005866a34ea87daa7093772028cfe0eab55158..606dd5a05c2dfd5682c1343fd05a0ce33b2dd46a 100644 (file)
@@ -13,14 +13,12 @@ soc0: soc@000000000 {
                reg = <0x0 0x803c0000 0x0 0x10000
                       0x0 0x80000000 0x0 0x10000>;
 
-               soc0_phy4: ethernet-phy@4 {
+               soc0_phy0: ethernet-phy@0 {
                        reg = <0x0>;
-                       device_type = "ethernet-phy";
                        compatible = "ethernet-phy-ieee802.3-c22";
                };
-               soc0_phy5: ethernet-phy@5 {
+               soc0_phy1: ethernet-phy@1 {
                        reg = <0x1>;
-                       device_type = "ethernet-phy";
                        compatible = "ethernet-phy-ieee802.3-c22";
                };
        };
@@ -37,7 +35,7 @@ soc0: soc@000000000 {
                       0x0 0xc7000000 0x0 0x60000
                       >;
 
-               phy-handle = <0 0 0 0 &soc0_phy4 &soc0_phy5 0 0>;
+               phy-handle = <0 0 0 0 &soc0_phy0 &soc0_phy1 0 0>;
                interrupts = <
                        /* [14] ge fifo err 8 / xge 6**/
                        149 0x4 150 0x4 151 0x4 152 0x4
index b0329be95cb129f3b283f3d75e4dfeff64214bff..26b066690593cd6304e81fdd24465a25ace2f396 100644 (file)
@@ -79,7 +79,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
 #define PAGE_S2                        __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
 #define PAGE_S2_DEVICE         __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
 
-#define PAGE_NONE              __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+#define PAGE_NONE              __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
 #define PAGE_SHARED            __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
 #define PAGE_SHARED_EXEC       __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
 #define PAGE_COPY              __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
@@ -496,7 +496,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
        const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
-                             PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
+                             PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
        /* preserve the hardware dirty information */
        if (pte_hw_dirty(pte))
                pte = pte_mkdirty(pte);
index 3bc498c250dc08b04f81abdde71e83f886454147..41e58fe3c041e9adcade0f113064ea42e87045ba 100644 (file)
@@ -44,7 +44,7 @@
 #define __ARM_NR_compat_cacheflush     (__ARM_NR_COMPAT_BASE+2)
 #define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE+5)
 
-#define __NR_compat_syscalls           388
+#define __NR_compat_syscalls           390
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
index cef934a90f17ecec303a1dcd12133a962f27b9d1..5b925b761a2a8857a62720110076e062edd4d7f3 100644 (file)
@@ -797,3 +797,12 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create)
 __SYSCALL(__NR_bpf, sys_bpf)
 #define __NR_execveat 387
 __SYSCALL(__NR_execveat, compat_sys_execveat)
+#define __NR_userfaultfd 388
+__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
+#define __NR_membarrier 389
+__SYSCALL(__NR_membarrier, sys_membarrier)
+
+/*
+ * Please add new compat syscalls above this comment and update
+ * __NR_compat_syscalls in asm/unistd.h.
+ */
index 8d1e7236431b428490cfb95cb6bfbeba24143854..991bf5db2ca19aa19b617e4752d6fd65280b164b 100644 (file)
@@ -19,6 +19,9 @@
 /* Required for AArch32 compatibility. */
 #define SA_RESTORER    0x04000000
 
+#define MINSIGSTKSZ 5120
+#define SIGSTKSZ    16384
+
 #include <asm-generic/signal.h>
 
 #endif
index cebf78661a553775003bfee8ec89f65e33e3ec55..253021ef2769078e69793288a8cc067aebb76d34 100644 (file)
@@ -201,7 +201,7 @@ void unregister_step_hook(struct step_hook *hook)
 }
 
 /*
- * Call registered single step handers
+ * Call registered single step handlers
  * There is no Syndrome info to check for determining the handler.
  * So we call all the registered handlers, until the right handler is
  * found which returns zero.
@@ -271,20 +271,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
  * Use reader/writer locks instead of plain spinlock.
  */
 static LIST_HEAD(break_hook);
-static DEFINE_RWLOCK(break_hook_lock);
+static DEFINE_SPINLOCK(break_hook_lock);
 
 void register_break_hook(struct break_hook *hook)
 {
-       write_lock(&break_hook_lock);
-       list_add(&hook->node, &break_hook);
-       write_unlock(&break_hook_lock);
+       spin_lock(&break_hook_lock);
+       list_add_rcu(&hook->node, &break_hook);
+       spin_unlock(&break_hook_lock);
 }
 
 void unregister_break_hook(struct break_hook *hook)
 {
-       write_lock(&break_hook_lock);
-       list_del(&hook->node);
-       write_unlock(&break_hook_lock);
+       spin_lock(&break_hook_lock);
+       list_del_rcu(&hook->node);
+       spin_unlock(&break_hook_lock);
+       synchronize_rcu();
 }
 
 static int call_break_hook(struct pt_regs *regs, unsigned int esr)
@@ -292,11 +293,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
        struct break_hook *hook;
        int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
 
-       read_lock(&break_hook_lock);
-       list_for_each_entry(hook, &break_hook, node)
+       rcu_read_lock();
+       list_for_each_entry_rcu(hook, &break_hook, node)
                if ((esr & hook->esr_mask) == hook->esr_val)
                        fn = hook->fn;
-       read_unlock(&break_hook_lock);
+       rcu_read_unlock();
 
        return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
 }
index e8ca6eaedd0252e2056530d71d519f423931d323..13671a9cf0167057d583f1c2dddca54ee94658f8 100644 (file)
@@ -258,7 +258,8 @@ static bool __init efi_virtmap_init(void)
                 */
                if (!is_normal_ram(md))
                        prot = __pgprot(PROT_DEVICE_nGnRE);
-               else if (md->type == EFI_RUNTIME_SERVICES_CODE)
+               else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
+                        !PAGE_ALIGNED(md->phys_addr))
                        prot = PAGE_KERNEL_EXEC;
                else
                        prot = PAGE_KERNEL;
index 08cafc518b9a57ad724530b9dbb144d50683c13f..0f03a8fe23144e777b3ead0a6ea18e038b5d1066 100644 (file)
@@ -178,6 +178,24 @@ ENTRY(ftrace_stub)
 ENDPROC(ftrace_stub)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       /* save return value regs*/
+       .macro save_return_regs
+       sub sp, sp, #64
+       stp x0, x1, [sp]
+       stp x2, x3, [sp, #16]
+       stp x4, x5, [sp, #32]
+       stp x6, x7, [sp, #48]
+       .endm
+
+       /* restore return value regs*/
+       .macro restore_return_regs
+       ldp x0, x1, [sp]
+       ldp x2, x3, [sp, #16]
+       ldp x4, x5, [sp, #32]
+       ldp x6, x7, [sp, #48]
+       add sp, sp, #64
+       .endm
+
 /*
  * void ftrace_graph_caller(void)
  *
@@ -204,11 +222,11 @@ ENDPROC(ftrace_graph_caller)
  * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
  */
 ENTRY(return_to_handler)
-       str     x0, [sp, #-16]!
+       save_return_regs
        mov     x0, x29                 //     parent's fp
        bl      ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
        mov     x30, x0                 // restore the original return address
-       ldr     x0, [sp], #16
+       restore_return_regs
        ret
 END(return_to_handler)
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
index f341866aa810340e47aa9b7283b6b472ca2eae84..c08b9ad6f42931e8766d0186daa51a6cce8dbe39 100644 (file)
@@ -85,7 +85,7 @@ bool aarch64_insn_is_branch_imm(u32 insn)
                aarch64_insn_is_bcond(insn));
 }
 
-static DEFINE_SPINLOCK(patch_lock);
+static DEFINE_RAW_SPINLOCK(patch_lock);
 
 static void __kprobes *patch_map(void *addr, int fixmap)
 {
@@ -131,13 +131,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
        unsigned long flags = 0;
        int ret;
 
-       spin_lock_irqsave(&patch_lock, flags);
+       raw_spin_lock_irqsave(&patch_lock, flags);
        waddr = patch_map(addr, FIX_TEXT_POKE0);
 
        ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
 
        patch_unmap(FIX_TEXT_POKE0);
-       spin_unlock_irqrestore(&patch_lock, flags);
+       raw_spin_unlock_irqrestore(&patch_lock, flags);
 
        return ret;
 }
index 6bab21f84a9ff38402e70345016ed50ae8e95e30..232247945b1c215c25fbfd708573fe3def5c68c5 100644 (file)
@@ -364,6 +364,8 @@ static void __init relocate_initrd(void)
                to_free = ram_end - orig_start;
 
        size = orig_end - orig_start;
+       if (!size)
+               return;
 
        /* initrd needs to be relocated completely inside linear mapping */
        new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn),
index aba9ead1384c036a0d6a441c92ced63cfd7ed4ae..9fadf6d7039b721b072379b5af51abce726f5b92 100644 (file)
@@ -287,6 +287,7 @@ retry:
                         * starvation.
                         */
                        mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
+                       mm_flags |= FAULT_FLAG_TRIED;
                        goto retry;
                }
        }
index f61f2dd67464746c728474a7b5503dd7dfbdcb67..241b9b9729d821510fb2addde4b276e73e6185a7 100644 (file)
@@ -20,4 +20,5 @@ generic-y += sections.h
 generic-y += topology.h
 generic-y += trace_clock.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 61cd1e786a142c440caa231a665349ed3d8f8e01..91d49c0a31185041055e627689ec09b2ab708bd1 100644 (file)
@@ -46,4 +46,5 @@ generic-y += types.h
 generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += user.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index f17c4dc6050c7d23ade635f749aa75eed8f0ac1d..945544ec603ee12408928c0abd4db19cfbc784d9 100644 (file)
@@ -59,4 +59,5 @@ generic-y += types.h
 generic-y += ucontext.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index b7f68192d15b52cb4e6c34c78eac88a02fa971d5..1778805f63809d378a5cafb6c920b517ea753c7b 100644 (file)
@@ -43,4 +43,5 @@ generic-y += topology.h
 generic-y += trace_clock.h
 generic-y += types.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 8e47b832cc7684af7a10d2a0680651cf5bf8cc0d..1fa084cf1a4398934889658b8b21f66154bf0ab6 100644 (file)
@@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
index 70e6ae1e700673e3acbd03452d22f57db9c1166d..373cb23301e30248bfd62f2a08c6529f93db0382 100644 (file)
@@ -73,4 +73,5 @@ generic-y += uaccess.h
 generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index daee37bd09991a3edf732451587e59a8db001d54..db8ddabc6bd2819ba579c4c407a569e2232daa51 100644 (file)
@@ -58,4 +58,5 @@ generic-y += types.h
 generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 9de3ba12f6b97c0f8722e2f929c70fbae429fd34..502a91d8dbbd80df039d9a111de04c1caadf0e3c 100644 (file)
@@ -8,3 +8,4 @@ generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += trace_clock.h
 generic-y += vtime.h
+generic-y += word-at-a-time.h
index e0eb704ca1fa93755d678e82c2336c8e188a34f0..fd104bd221ced1dff2c9485bdcb1be520171df7f 100644 (file)
@@ -9,3 +9,4 @@ generic-y += module.h
 generic-y += preempt.h
 generic-y += sections.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
index 0b6b40d37b95b5acd2f0561a82f12b3859b35880..5b4ec541ba7c99936d8f5072dc5716a778751235 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -57,7 +58,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -67,10 +67,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -179,6 +181,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -206,6 +209,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -370,6 +375,7 @@ CONFIG_ZORRO8390=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -537,6 +543,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index eeb3a8991fc411e952cd6a20b3e44602b80f38e0..6e5198e2c124f89b86d6b5267280f5596448cc0a 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -344,6 +349,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -495,6 +501,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 3a7006654ce97c9df5bd521d66af5c29c61171fb..f75600b0ca23f78ec028948babb81b5b2c02b3ed 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -355,6 +360,7 @@ CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -517,6 +523,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 0586b323a673791fff0661eb5804347bc7ad16fb..a42d91c389a6fbba1c2c8f27069acb53332dd56f 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -343,6 +348,7 @@ CONFIG_BVME6000_NET=y
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index ad1dbce07aa4b532fdb49fe515db2f2a19c37aa7..77f4a11083e9964050022f4ff22625b69b8672dd 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -345,6 +350,7 @@ CONFIG_HPLANCE=y
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -497,6 +503,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index b44acacaecf41f43bae326e63318c3c8c7e7d0f8..5a329f77329b155ed5a4dc0f06ca6973c82aae94 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -54,7 +55,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -64,10 +64,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -176,6 +178,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -203,6 +206,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -364,6 +369,7 @@ CONFIG_MAC8390=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -519,6 +525,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 8afca3753db1f8f52245f2b4265c35f74268ae5e..83c80d2030ec96aa4740615101422e1fca1884e5 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -64,7 +65,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -74,10 +74,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -186,6 +188,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -213,6 +216,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -281,6 +285,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -410,6 +415,7 @@ CONFIG_ZORRO8390=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PLIP=m
@@ -599,6 +605,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index ef00875994d9ac34191f2bd6a572d9c44118ef4d..6cb42c3bf5a280d1a537d515396a463049d4a250 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -52,7 +53,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -62,10 +62,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -174,6 +176,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -201,6 +204,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -266,6 +270,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -343,6 +348,7 @@ CONFIG_MVME147_NET=y
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 387c2bd90ff1490a8a1c1fcb08a11753c8b2b6b1..c7508c30330c43ee32c0f75d7c7c8112f16cdc35 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -343,6 +348,7 @@ CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 35355c1bc714000d1e3d44b5a429b97fabeac84a..64b71664a3036aa2827984f34005fc2cd044de44 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -354,6 +359,7 @@ CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PLIP=m
@@ -510,6 +516,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 8442d267b877202e5293c0c5178eb340113e69f8..9a4cab78a2ea82ce3043be9c8a5582328774f21b 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -341,6 +346,7 @@ CONFIG_SUN3_82586=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
index 0e1b542e155582a3685340bf0ce8651fcc5d8948..1a2eaac13dbdd540f223aea5c67c660723e3cf54 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -341,6 +346,7 @@ CONFIG_SUN3LANCE=y
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 5a822bb790f72135862dcc0b7a97296954d73bd9..066e74f666ae95e4c9f58c42010bc857ca6454ca 100644 (file)
@@ -4,4 +4,34 @@
 #define __ALIGN .align 4
 #define __ALIGN_STR ".align 4"
 
+/*
+ * Make sure the compiler doesn't do anything stupid with the
+ * arguments on the stack - they are owned by the *caller*, not
+ * the callee. This just fools gcc into not spilling into them,
+ * and keeps it from doing tailcall recursion and/or using the
+ * stack slots for temporaries, since they are live and "used"
+ * all the way to the end of the function.
+ */
+#define asmlinkage_protect(n, ret, args...) \
+       __asmlinkage_protect##n(ret, ##args)
+#define __asmlinkage_protect_n(ret, args...) \
+       __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
+#define __asmlinkage_protect0(ret) \
+       __asmlinkage_protect_n(ret)
+#define __asmlinkage_protect1(ret, arg1) \
+       __asmlinkage_protect_n(ret, "m" (arg1))
+#define __asmlinkage_protect2(ret, arg1, arg2) \
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
+#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
+#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+                             "m" (arg4))
+#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+                             "m" (arg4), "m" (arg5))
+#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+                             "m" (arg4), "m" (arg5), "m" (arg6))
+
 #endif
index 244e0dbe45dbeda359e233cde23b4652f0ce13dc..0793a7f174176e6d590ca4d9567a9e3523c42c50 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            356
+#define NR_syscalls            375
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index 61fb6cb9d2ae3c66a1c0c6dec1ac95adb83dd810..5e6fae6c275f9b110464cb80bbb47187c2251dcd 100644 (file)
 #define __NR_memfd_create      353
 #define __NR_bpf               354
 #define __NR_execveat          355
+#define __NR_socket            356
+#define __NR_socketpair                357
+#define __NR_bind              358
+#define __NR_connect           359
+#define __NR_listen            360
+#define __NR_accept4           361
+#define __NR_getsockopt                362
+#define __NR_setsockopt                363
+#define __NR_getsockname       364
+#define __NR_getpeername       365
+#define __NR_sendto            366
+#define __NR_sendmsg           367
+#define __NR_recvfrom          368
+#define __NR_recvmsg           369
+#define __NR_shutdown          370
+#define __NR_recvmmsg          371
+#define __NR_sendmmsg          372
+#define __NR_userfaultfd       373
+#define __NR_membarrier                374
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index a0ec4303f2c8e57a04fb353178d43b0be6a461fe..5dd0e80042f51107e63e0fcd832f4c46c85b826c 100644 (file)
@@ -376,4 +376,22 @@ ENTRY(sys_call_table)
        .long sys_memfd_create
        .long sys_bpf
        .long sys_execveat              /* 355 */
-
+       .long sys_socket
+       .long sys_socketpair
+       .long sys_bind
+       .long sys_connect
+       .long sys_listen                /* 360 */
+       .long sys_accept4
+       .long sys_getsockopt
+       .long sys_setsockopt
+       .long sys_getsockname
+       .long sys_getpeername           /* 365 */
+       .long sys_sendto
+       .long sys_sendmsg
+       .long sys_recvfrom
+       .long sys_recvmsg
+       .long sys_shutdown              /* 370 */
+       .long sys_recvmmsg
+       .long sys_sendmmsg
+       .long sys_userfaultfd
+       .long sys_membarrier
index df31353fd2001dc0e357feafcf975aa6e5537622..29acb89daaaa55f2b1640c3d751233ed034b43b7 100644 (file)
@@ -54,4 +54,5 @@ generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 2f222f355c4bbc69842ccd62f3419b0cbd1732a4..b0ae88c9fed922a4ba95be0498f421d3927eb40e 100644 (file)
@@ -10,3 +10,4 @@ generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += syscalls.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
index 89a628455bc253b01c2cc7b9f3cc2a6d9a426493..bd634259eab9d6b0f7d1b8e746d0b11c01f0950d 100644 (file)
@@ -933,7 +933,7 @@ void __init plat_mem_setup(void)
        while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
                && (total < MAX_MEMORY)) {
                memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
-                                               __pa_symbol(&__init_end), -1,
+                                               __pa_symbol(&_end), -1,
                                                0x100000,
                                                CVMX_BOOTMEM_FLAG_NO_LOCKING);
                if (memory >= 0) {
index 40ec4ca3f946a9238afa6e0edd25c0440f86a36b..c7fe4d01e79c61bbaee58aa25b9b50bf5b9a380e 100644 (file)
@@ -17,4 +17,5 @@ generic-y += segment.h
 generic-y += serial.h
 generic-y += trace_clock.h
 generic-y += user.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 9e777cd42b67190a5dce5ac11553a3de05e4375e..d10fd80dbb7e96b898d2230c2d0f5112d02c3bc1 100644 (file)
@@ -256,6 +256,7 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si
  */
 #define ioremap_nocache(offset, size)                                  \
        __ioremap_mode((offset), (size), _CACHE_UNCACHED)
+#define ioremap_uc ioremap_nocache
 
 /*
  * ioremap_cachable -  map bus memory into CPU space
index c4ddc4f0d2dcb11c7aa55167434d72e7990e083a..23cd9b118c9e4f8f8fd43031dd67dcee8e610af8 100644 (file)
 
 #define __SWAB_64_THRU_32__
 
-#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) ||              \
-    defined(_MIPS_ARCH_LOONGSON3A)
+#if !defined(__mips16) &&                                      \
+       ((defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) ||  \
+        defined(_MIPS_ARCH_LOONGSON3A))
 
-static inline __attribute__((nomips16)) __attribute_const__
-               __u16 __arch_swab16(__u16 x)
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
 {
        __asm__(
        "       .set    push                    \n"
        "       .set    arch=mips32r2           \n"
-       "       .set    nomips16                \n"
        "       wsbh    %0, %1                  \n"
        "       .set    pop                     \n"
        : "=r" (x)
@@ -32,13 +31,11 @@ static inline __attribute__((nomips16)) __attribute_const__
 }
 #define __arch_swab16 __arch_swab16
 
-static inline __attribute__((nomips16)) __attribute_const__
-               __u32 __arch_swab32(__u32 x)
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
 {
        __asm__(
        "       .set    push                    \n"
        "       .set    arch=mips32r2           \n"
-       "       .set    nomips16                \n"
        "       wsbh    %0, %1                  \n"
        "       rotr    %0, %0, 16              \n"
        "       .set    pop                     \n"
@@ -54,13 +51,11 @@ static inline __attribute__((nomips16)) __attribute_const__
  * 64-bit kernel on r2 CPUs.
  */
 #ifdef __mips64
-static inline __attribute__((nomips16)) __attribute_const__
-               __u64 __arch_swab64(__u64 x)
+static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
 {
        __asm__(
        "       .set    push                    \n"
        "       .set    arch=mips64r2           \n"
-       "       .set    nomips16                \n"
        "       dsbh    %0, %1                  \n"
        "       dshd    %0, %0                  \n"
        "       .set    pop                     \n"
@@ -71,5 +66,5 @@ static inline __attribute__((nomips16)) __attribute_const__
 }
 #define __arch_swab64 __arch_swab64
 #endif /* __mips64 */
-#endif /* MIPS R2 or newer or Loongson 3A */
+#endif /* (not __mips16) and (MIPS R2 or newer or Loongson 3A) */
 #endif /* _ASM_SWAB_H */
index c03088f9f514e7c21f7ae0e185f8be0456af372b..cfabadb135d9fe94912080ea41ca761e6f1eca08 100644 (file)
 #define __NR_memfd_create              (__NR_Linux + 354)
 #define __NR_bpf                       (__NR_Linux + 355)
 #define __NR_execveat                  (__NR_Linux + 356)
+#define __NR_userfaultfd               (__NR_Linux + 357)
+#define __NR_membarrier                        (__NR_Linux + 358)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            356
+#define __NR_Linux_syscalls            358
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                356
+#define __NR_O32_Linux_syscalls                358
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_memfd_create              (__NR_Linux + 314)
 #define __NR_bpf                       (__NR_Linux + 315)
 #define __NR_execveat                  (__NR_Linux + 316)
+#define __NR_userfaultfd               (__NR_Linux + 317)
+#define __NR_membarrier                        (__NR_Linux + 318)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            316
+#define __NR_Linux_syscalls            318
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         316
+#define __NR_64_Linux_syscalls         318
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_memfd_create              (__NR_Linux + 318)
 #define __NR_bpf                       (__NR_Linux + 319)
 #define __NR_execveat                  (__NR_Linux + 320)
+#define __NR_userfaultfd               (__NR_Linux + 321)
+#define __NR_membarrier                        (__NR_Linux + 322)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            320
+#define __NR_Linux_syscalls            322
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                320
+#define __NR_N32_Linux_syscalls                322
 
 #endif /* _UAPI_ASM_UNISTD_H */
index 4e62bf85d0b0f910cc56c726167cb933b35045d0..459cb017306c21eb63259b8cb0a406c65080cc8b 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/power/jz4740-battery.h>
 #include <linux/power/gpio-charger.h>
 
+#include <asm/mach-jz4740/gpio.h>
 #include <asm/mach-jz4740/jz4740_fb.h>
 #include <asm/mach-jz4740/jz4740_mmc.h>
 #include <asm/mach-jz4740/jz4740_nand.h>
index a74e181058b0fc8ce001fffc68bce8f95bc7f02d..8c6d76c9b2d69bf6603e32598e3b8d74cba15bf6 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/seq_file.h>
 
 #include <asm/mach-jz4740/base.h>
+#include <asm/mach-jz4740/gpio.h>
 
 #define JZ4740_GPIO_BASE_A (32*0)
 #define JZ4740_GPIO_BASE_B (32*1)
index 9f71c06aebf6313c4a0b2bac9b1bbdc6228cee31..209ded16806bf5a295ff202258f5b7ff42070940 100644 (file)
@@ -39,6 +39,7 @@
         mfc0   \dest, CP0_CONFIG, 3
        andi    \dest, \dest, MIPS_CONF3_MT
        beqz    \dest, \nomt
+        nop
        .endm
 
 .section .text.cps-vec
@@ -223,10 +224,9 @@ LEAF(excep_ejtag)
        END(excep_ejtag)
 
 LEAF(mips_cps_core_init)
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_MIPS_MT_SMP
        /* Check that the core implements the MT ASE */
        has_mt  t0, 3f
-        nop
 
        .set    push
        .set    mips64r2
@@ -310,8 +310,9 @@ LEAF(mips_cps_boot_vpes)
        PTR_ADDU t0, t0, t1
 
        /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
+       li      t9, 0
+#ifdef CONFIG_MIPS_MT_SMP
        has_mt  ta2, 1f
-        li     t9, 0
 
        /* Find the number of VPEs present in the core */
        mfc0    t1, CP0_MVPCONF0
@@ -330,6 +331,7 @@ LEAF(mips_cps_boot_vpes)
        /* Retrieve the VPE ID from EBase.CPUNum */
        mfc0    t9, $15, 1
        and     t9, t9, t1
+#endif
 
 1:     /* Calculate a pointer to this VPEs struct vpe_boot_config */
        li      t1, VPEBOOTCFG_SIZE
@@ -337,7 +339,7 @@ LEAF(mips_cps_boot_vpes)
        PTR_L   ta3, COREBOOTCFG_VPECONFIG(t0)
        PTR_ADDU v0, v0, ta3
 
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_MIPS_MT_SMP
 
        /* If the core doesn't support MT then return */
        bnez    ta2, 1f
@@ -451,7 +453,7 @@ LEAF(mips_cps_boot_vpes)
 
 2:     .set    pop
 
-#endif /* CONFIG_MIPS_MT */
+#endif /* CONFIG_MIPS_MT_SMP */
 
        /* Return */
        jr      ra
index 423ae83af1fb7043a1daff5d06a079658446de5a..3375745b91980013d76ad2e7d9cd632d31b26af9 100644 (file)
@@ -18,7 +18,7 @@
        .set pop
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                    struct thread_info *next_ti, int usedfpu)
+ *                    struct thread_info *next_ti)
  */
        .align  7
        LEAF(resume)
        cpu_save_nonscratch a0
        LONG_S  ra, THREAD_REG31(a0)
 
-       /*
-        * check if we need to save FPU registers
-        */
-       .set push
-       .set noreorder
-       beqz    a3, 1f
-        PTR_L  t3, TASK_THREAD_INFO(a0)
-       .set pop
-
-       /*
-        * clear saved user stack CU1 bit
-        */
-       LONG_L  t0, ST_OFF(t3)
-       li      t1, ~ST0_CU1
-       and     t0, t0, t1
-       LONG_S  t0, ST_OFF(t3)
-
-       .set push
-       .set arch=mips64r2
-       fpu_save_double a0 t0 t1                # c0_status passed in t0
-                                               # clobbers t1
-       .set pop
-1:
-
 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
        /* Check if we need to store CVMSEG state */
        dmfc0   t0, $11,7       /* CvmMemCtl */
index 5087a4b72e6b9a4ae12fc8acdb6f7f8f318784c5..ac27ef7d4d0ebd8be86798d8191e32c720263625 100644 (file)
  */
 #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
 
-/*
- * FPU context is saved iff the process has used it's FPU in the current
- * time slice as indicated by TIF_USEDFPU.  In any case, the CU1 bit for user
- * space STATUS register should be 0, so that a process *always* starts its
- * userland with FPU disabled after each context switch.
- *
- * FPU will be enabled as soon as the process accesses FPU again, through
- * do_cpu() trap.
- */
-
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                    struct thread_info *next_ti, int usedfpu)
+ *                    struct thread_info *next_ti)
  */
 LEAF(resume)
        mfc0    t1, CP0_STATUS
@@ -50,22 +40,6 @@ LEAF(resume)
        cpu_save_nonscratch a0
        sw      ra, THREAD_REG31(a0)
 
-       beqz    a3, 1f
-
-       PTR_L   t3, TASK_THREAD_INFO(a0)
-
-       /*
-        * clear saved user stack CU1 bit
-        */
-       lw      t0, ST_OFF(t3)
-       li      t1, ~ST0_CU1
-       and     t0, t0, t1
-       sw      t0, ST_OFF(t3)
-
-       fpu_save_single a0, t0                  # clobbers t0
-
-1:
-
 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
        PTR_LA  t8, __stack_chk_guard
        LONG_L  t9, TASK_STACK_CANARY(a1)
index 4cc13508d967c4076e0b720b120487281003ed72..65a74e4f0f456d1de9d29ed83a62e1bcc6e644fc 100644 (file)
@@ -36,16 +36,8 @@ NESTED(handle_sys, PT_SIZE, sp)
        lw      t1, PT_EPC(sp)          # skip syscall on return
 
        subu    v0, v0, __NR_O32_Linux  # check syscall number
-       sltiu   t0, v0, __NR_O32_Linux_syscalls + 1
        addiu   t1, 4                   # skip to next instruction
        sw      t1, PT_EPC(sp)
-       beqz    t0, illegal_syscall
-
-       sll     t0, v0, 2
-       la      t1, sys_call_table
-       addu    t1, t0
-       lw      t2, (t1)                # syscall routine
-       beqz    t2, illegal_syscall
 
        sw      a3, PT_R26(sp)          # save a3 for syscall restarting
 
@@ -96,6 +88,16 @@ loads_done:
        li      t1, _TIF_WORK_SYSCALL_ENTRY
        and     t0, t1
        bnez    t0, syscall_trace_entry # -> yes
+syscall_common:
+       sltiu   t0, v0, __NR_O32_Linux_syscalls + 1
+       beqz    t0, illegal_syscall
+
+       sll     t0, v0, 2
+       la      t1, sys_call_table
+       addu    t1, t0
+       lw      t2, (t1)                # syscall routine
+
+       beqz    t2, illegal_syscall
 
        jalr    t2                      # Do The Real Thing (TM)
 
@@ -116,7 +118,7 @@ o32_syscall_exit:
 
 syscall_trace_entry:
        SAVE_STATIC
-       move    s0, t2
+       move    s0, v0
        move    a0, sp
 
        /*
@@ -129,27 +131,18 @@ syscall_trace_entry:
 
 1:     jal     syscall_trace_enter
 
-       bltz    v0, 2f                  # seccomp failed? Skip syscall
+       bltz    v0, 1f                  # seccomp failed? Skip syscall
+
+       move    v0, s0                  # restore syscall
 
-       move    t0, s0
        RESTORE_STATIC
        lw      a0, PT_R4(sp)           # Restore argument registers
        lw      a1, PT_R5(sp)
        lw      a2, PT_R6(sp)
        lw      a3, PT_R7(sp)
-       jalr    t0
-
-       li      t0, -EMAXERRNO - 1      # error?
-       sltu    t0, t0, v0
-       sw      t0, PT_R7(sp)           # set error flag
-       beqz    t0, 1f
-
-       lw      t1, PT_R2(sp)           # syscall number
-       negu    v0                      # error
-       sw      t1, PT_R0(sp)           # save it for syscall restarting
-1:     sw      v0, PT_R2(sp)           # result
+       j       syscall_common
 
-2:     j       syscall_exit
+1:     j       syscall_exit
 
 /* ------------------------------------------------------------------------ */
 
@@ -599,3 +592,5 @@ EXPORT(sys_call_table)
        PTR     sys_memfd_create
        PTR     sys_bpf                         /* 4355 */
        PTR     sys_execveat
+       PTR     sys_userfaultfd
+       PTR     sys_membarrier
index a6f6b762c47a4c5a2d395e13a1d564964595abe1..e732981cf99fde26181f1db3bcb65ebd86ea4a0d 100644 (file)
@@ -39,18 +39,11 @@ NESTED(handle_sys64, PT_SIZE, sp)
        .set    at
 #endif
 
-       dsubu   t0, v0, __NR_64_Linux   # check syscall number
-       sltiu   t0, t0, __NR_64_Linux_syscalls + 1
 #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
        ld      t1, PT_EPC(sp)          # skip syscall on return
        daddiu  t1, 4                   # skip to next instruction
        sd      t1, PT_EPC(sp)
 #endif
-       beqz    t0, illegal_syscall
-
-       dsll    t0, v0, 3               # offset into table
-       ld      t2, (sys_call_table - (__NR_64_Linux * 8))(t0)
-                                       # syscall routine
 
        sd      a3, PT_R26(sp)          # save a3 for syscall restarting
 
@@ -59,6 +52,17 @@ NESTED(handle_sys64, PT_SIZE, sp)
        and     t0, t1, t0
        bnez    t0, syscall_trace_entry
 
+syscall_common:
+       dsubu   t2, v0, __NR_64_Linux
+       sltiu   t0, t2, __NR_64_Linux_syscalls + 1
+       beqz    t0, illegal_syscall
+
+       dsll    t0, t2, 3               # offset into table
+       dla     t2, sys_call_table
+       daddu   t0, t2, t0
+       ld      t2, (t0)                # syscall routine
+       beqz    t2, illegal_syscall
+
        jalr    t2                      # Do The Real Thing (TM)
 
        li      t0, -EMAXERRNO - 1      # error?
@@ -78,14 +82,14 @@ n64_syscall_exit:
 
 syscall_trace_entry:
        SAVE_STATIC
-       move    s0, t2
+       move    s0, v0
        move    a0, sp
        move    a1, v0
        jal     syscall_trace_enter
 
-       bltz    v0, 2f                  # seccomp failed? Skip syscall
+       bltz    v0, 1f                  # seccomp failed? Skip syscall
 
-       move    t0, s0
+       move    v0, s0
        RESTORE_STATIC
        ld      a0, PT_R4(sp)           # Restore argument registers
        ld      a1, PT_R5(sp)
@@ -93,19 +97,9 @@ syscall_trace_entry:
        ld      a3, PT_R7(sp)
        ld      a4, PT_R8(sp)
        ld      a5, PT_R9(sp)
-       jalr    t0
-
-       li      t0, -EMAXERRNO - 1      # error?
-       sltu    t0, t0, v0
-       sd      t0, PT_R7(sp)           # set error flag
-       beqz    t0, 1f
-
-       ld      t1, PT_R2(sp)           # syscall number
-       dnegu   v0                      # error
-       sd      t1, PT_R0(sp)           # save it for syscall restarting
-1:     sd      v0, PT_R2(sp)           # result
+       j       syscall_common
 
-2:     j       syscall_exit
+1:     j       syscall_exit
 
 illegal_syscall:
        /* This also isn't a 64-bit syscall, throw an error.  */
@@ -436,4 +430,6 @@ EXPORT(sys_call_table)
        PTR     sys_memfd_create
        PTR     sys_bpf                         /* 5315 */
        PTR     sys_execveat
+       PTR     sys_userfaultfd
+       PTR     sys_membarrier
        .size   sys_call_table,.-sys_call_table
index 4b2010654c463158b7dee80194de736195c04595..c794843975845df2e0a9d9c0c0fd14dad1ed17ec 100644 (file)
@@ -52,6 +52,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
        and     t0, t1, t0
        bnez    t0, n32_syscall_trace_entry
 
+syscall_common:
        jalr    t2                      # Do The Real Thing (TM)
 
        li      t0, -EMAXERRNO - 1      # error?
@@ -75,9 +76,9 @@ n32_syscall_trace_entry:
        move    a1, v0
        jal     syscall_trace_enter
 
-       bltz    v0, 2f                  # seccomp failed? Skip syscall
+       bltz    v0, 1f                  # seccomp failed? Skip syscall
 
-       move    t0, s0
+       move    t2, s0
        RESTORE_STATIC
        ld      a0, PT_R4(sp)           # Restore argument registers
        ld      a1, PT_R5(sp)
@@ -85,19 +86,9 @@ n32_syscall_trace_entry:
        ld      a3, PT_R7(sp)
        ld      a4, PT_R8(sp)
        ld      a5, PT_R9(sp)
-       jalr    t0
+       j       syscall_common
 
-       li      t0, -EMAXERRNO - 1      # error?
-       sltu    t0, t0, v0
-       sd      t0, PT_R7(sp)           # set error flag
-       beqz    t0, 1f
-
-       ld      t1, PT_R2(sp)           # syscall number
-       dnegu   v0                      # error
-       sd      t1, PT_R0(sp)           # save it for syscall restarting
-1:     sd      v0, PT_R2(sp)           # result
-
-2:     j       syscall_exit
+1:     j       syscall_exit
 
 not_n32_scall:
        /* This is not an n32 compatibility syscall, pass it on to
@@ -429,4 +420,6 @@ EXPORT(sysn32_call_table)
        PTR     sys_memfd_create
        PTR     sys_bpf
        PTR     compat_sys_execveat             /* 6320 */
+       PTR     sys_userfaultfd
+       PTR     sys_membarrier
        .size   sysn32_call_table,.-sysn32_call_table
index f543ff4feef99f8c4ce02554f4dd54da1752b6de..6369cfd390c6330269b05eb095020dafa6cdc048 100644 (file)
@@ -87,6 +87,7 @@ loads_done:
        and     t0, t1, t0
        bnez    t0, trace_a_syscall
 
+syscall_common:
        jalr    t2                      # Do The Real Thing (TM)
 
        li      t0, -EMAXERRNO - 1      # error?
@@ -130,9 +131,9 @@ trace_a_syscall:
 
 1:     jal     syscall_trace_enter
 
-       bltz    v0, 2f                  # seccomp failed? Skip syscall
+       bltz    v0, 1f                  # seccomp failed? Skip syscall
 
-       move    t0, s0
+       move    t2, s0
        RESTORE_STATIC
        ld      a0, PT_R4(sp)           # Restore argument registers
        ld      a1, PT_R5(sp)
@@ -142,19 +143,9 @@ trace_a_syscall:
        ld      a5, PT_R9(sp)
        ld      a6, PT_R10(sp)
        ld      a7, PT_R11(sp)          # For indirect syscalls
-       jalr    t0
+       j       syscall_common
 
-       li      t0, -EMAXERRNO - 1      # error?
-       sltu    t0, t0, v0
-       sd      t0, PT_R7(sp)           # set error flag
-       beqz    t0, 1f
-
-       ld      t1, PT_R2(sp)           # syscall number
-       dnegu   v0                      # error
-       sd      t1, PT_R0(sp)           # save it for syscall restarting
-1:     sd      v0, PT_R2(sp)           # result
-
-2:     j       syscall_exit
+1:     j       syscall_exit
 
 /* ------------------------------------------------------------------------ */
 
@@ -584,4 +575,6 @@ EXPORT(sys32_call_table)
        PTR     sys_memfd_create
        PTR     sys_bpf                         /* 4355 */
        PTR     compat_sys_execveat
+       PTR     sys_userfaultfd
+       PTR     sys_membarrier
        .size   sys32_call_table,.-sys32_call_table
index a914dc1cb6d1bc339cf44cc0c5aeac887a2e5f74..d8117be729a20ee26d2df8bb42a6f58b9670f513 100644 (file)
@@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
        else
 #endif
 #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
-            if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+            if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
                dma_flag = __GFP_DMA;
        else
 #endif
index dabf4179cd7e373ac39dde61a6027f631a4544bc..5d2e0c8d29c0bd0003bae3f7337edbcab5a403c4 100644 (file)
 
 LEAF(sk_load_word)
        is_offset_negative(word)
-       .globl sk_load_word_positive
-sk_load_word_positive:
+FEXPORT(sk_load_word_positive)
        is_offset_in_header(4, word)
        /* Offset within header boundaries */
        PTR_ADDU t1, $r_skb_data, offset
+       .set    reorder
        lw      $r_A, 0(t1)
+       .set    noreorder
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
 # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
        wsbh    t0, $r_A
@@ -85,12 +86,13 @@ sk_load_word_positive:
 
 LEAF(sk_load_half)
        is_offset_negative(half)
-       .globl sk_load_half_positive
-sk_load_half_positive:
+FEXPORT(sk_load_half_positive)
        is_offset_in_header(2, half)
        /* Offset within header boundaries */
        PTR_ADDU t1, $r_skb_data, offset
+       .set    reorder
        lh      $r_A, 0(t1)
+       .set    noreorder
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
 # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
        wsbh    t0, $r_A
@@ -109,8 +111,7 @@ sk_load_half_positive:
 
 LEAF(sk_load_byte)
        is_offset_negative(byte)
-       .globl sk_load_byte_positive
-sk_load_byte_positive:
+FEXPORT(sk_load_byte_positive)
        is_offset_in_header(1, byte)
        /* Offset within header boundaries */
        PTR_ADDU t1, $r_skb_data, offset
index 6edb9ee6128ebc4d45de622afdf118f0bf5d12ee..1c8dd0f5cd5d1567126f42b67112b5b0f91962a8 100644 (file)
@@ -9,3 +9,4 @@ generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += sections.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
index 914864eb5a25daf87688f1565b33700b289be0bf..d63330e88379dcc591e29e645b5363f063122f9d 100644 (file)
@@ -61,4 +61,5 @@ generic-y += types.h
 generic-y += unaligned.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 6bc0ee4b1070a83de003d1c74ec64f818bd3dacb..2c041b535a64ed58d3be2aa79916f94308190b92 100644 (file)
@@ -111,7 +111,7 @@ CONFIG_SCSI_QLA_FC=m
 CONFIG_SCSI_QLA_ISCSI=m
 CONFIG_SCSI_LPFC=m
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_ALUA=m
 CONFIG_ATA=y
index 7991f37e5fe2a174fd3284a4fb1b5e72d2ab267a..36871a4bfa54293b2e70851ea1aa4c1a379dd1dd 100644 (file)
@@ -114,7 +114,7 @@ CONFIG_SCSI_QLA_FC=m
 CONFIG_SCSI_QLA_ISCSI=m
 CONFIG_SCSI_LPFC=m
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_ALUA=m
 CONFIG_ATA=y
index cab6753f1be56e3f1810be80d50cf94524eaa04b..3f191f573d4f1f487b5e417ddd55eaed8d49951d 100644 (file)
@@ -61,8 +61,13 @@ struct machdep_calls {
                                               unsigned long addr,
                                               unsigned char *hpte_slot_array,
                                               int psize, int ssize, int local);
-       /* special for kexec, to be called in real mode, linear mapping is
-        * destroyed as well */
+       /*
+        * Special for kexec.
+        * To be called in real mode with interrupts disabled. No locks are
+        * taken as such, concurrent access on pre POWER5 hardware could result
+        * in a deadlock.
+        * The linear mapping is destroyed as well.
+        */
        void            (*hpte_clear_all)(void);
 
        void __iomem *  (*ioremap)(phys_addr_t addr, unsigned long size,
index 5b3a903adae6d761effa550a802ed5d6a2aeb656..e4396a7d0f7cf5627a92ea8c07756aba6bc52c7a 100644 (file)
@@ -40,6 +40,11 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
        return (val + c->high_bits) & ~rhs;
 }
 
+static inline unsigned long zero_bytemask(unsigned long mask)
+{
+       return ~1ul << __fls(mask);
+}
+
 #else
 
 #ifdef CONFIG_64BIT
index 13befa35d8a8ecdd31611aadb42c6be206ba743e..c8822af10a587389999473171db475eb5462714b 100644 (file)
@@ -582,13 +582,21 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
  * be when they isi), and we are the only one left.  We rely on our kernel
  * mapping being 0xC0's and the hardware ignoring those two real bits.
  *
+ * This must be called with interrupts disabled.
+ *
+ * Taking the native_tlbie_lock is unsafe here due to the possibility of
+ * lockdep being on. On pre POWER5 hardware, not taking the lock could
+ * cause deadlock. POWER5 and newer not taking the lock is fine. This only
+ * gets called during boot before secondary CPUs have come up and during
+ * crashdump and all bets are off anyway.
+ *
  * TODO: add batching support when enabled.  remember, no dynamic memory here,
  * athough there is the control page available...
  */
 static void native_hpte_clear(void)
 {
        unsigned long vpn = 0;
-       unsigned long slot, slots, flags;
+       unsigned long slot, slots;
        struct hash_pte *hptep = htab_address;
        unsigned long hpte_v;
        unsigned long pteg_count;
@@ -596,13 +604,6 @@ static void native_hpte_clear(void)
 
        pteg_count = htab_hash_mask + 1;
 
-       local_irq_save(flags);
-
-       /* we take the tlbie lock and hold it.  Some hardware will
-        * deadlock if we try to tlbie from two processors at once.
-        */
-       raw_spin_lock(&native_tlbie_lock);
-
        slots = pteg_count * HPTES_PER_GROUP;
 
        for (slot = 0; slot < slots; slot++, hptep++) {
@@ -614,8 +615,8 @@ static void native_hpte_clear(void)
                hpte_v = be64_to_cpu(hptep->v);
 
                /*
-                * Call __tlbie() here rather than tlbie() since we
-                * already hold the native_tlbie_lock.
+                * Call __tlbie() here rather than tlbie() since we can't take the
+                * native_tlbie_lock.
                 */
                if (hpte_v & HPTE_V_VALID) {
                        hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
@@ -625,8 +626,6 @@ static void native_hpte_clear(void)
        }
 
        asm volatile("eieio; tlbsync; ptesync":::"memory");
-       raw_spin_unlock(&native_tlbie_lock);
-       local_irq_restore(flags);
 }
 
 /*
index 230f3a7cdea45f8d160797fe55eb7b154c9c9dba..4296d55e88f30afa7cb91fd54d06e6b2a532d577 100644 (file)
@@ -487,9 +487,12 @@ int opal_machine_check(struct pt_regs *regs)
         *    PRD component would have already got notified about this
         *    error through other channels.
         *
-        * In any case, let us just fall through. We anyway heading
-        * down to panic path.
+        * If hardware marked this as an unrecoverable MCE, we are
+        * going to panic anyway. Even if it didn't, it's not safe to
+        * continue at this point, so we should explicitly panic.
         */
+
+       panic("PowerNV Unrecovered Machine Check");
        return 0;
 }
 
index 09787139834ddd8bd01fbce0448ca34297a1cd5c..3db53e8aff9279cfe761ac9f43926559e347eaf4 100644 (file)
@@ -194,11 +194,6 @@ static const struct os_area_db_id os_area_db_id_rtc_diff = {
        .key = OS_AREA_DB_KEY_RTC_DIFF
 };
 
-static const struct os_area_db_id os_area_db_id_video_mode = {
-       .owner = OS_AREA_DB_OWNER_LINUX,
-       .key = OS_AREA_DB_KEY_VIDEO_MODE
-};
-
 #define SECONDS_FROM_1970_TO_2000 946684800LL
 
 /**
index d4788111c16171135422a0ef29e23e2eb866236d..fac6ac9790fad18efc2f587757068f87ca7765fd 100644 (file)
@@ -10,7 +10,7 @@ targets += misc.o piggy.o sizes.h head.o
 
 KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
-KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
+KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
 KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
 
index 0c98f1508542c9f900ee2bed1394413b8d5d8d88..ed7da281df66743f0badff631c9183bf318ec9b7 100644 (file)
@@ -381,7 +381,7 @@ CONFIG_ISCSI_TCP=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
index 82083e1fbdc4c6cc9f4ad6a2c0cfbfbcd3af1210..9858b14cde1edccdcda3a217446f547641d98944 100644 (file)
@@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
index c05c9e0821e3bcd956b929c591e41b5445ac9565..7f14f80717d4975161a696dd2e803d4ee87011d6 100644 (file)
@@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
index 5ad26dd94d77e83fedeba5c7f71c8eba0ff2ab29..9043d2e1e2ae0b3c01a7b6588bed848f44dd92ff 100644 (file)
@@ -6,3 +6,4 @@ generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
index 2a0efc63b9e5afb29cb2e6edd109dd9848353b27..dc19ee0c92aaa693d2ad3b8c4c614b3e0e427de7 100644 (file)
@@ -19,7 +19,7 @@ int numa_pfn_to_nid(unsigned long pfn);
 int __node_distance(int a, int b);
 void numa_update_cpu_topology(void);
 
-extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+extern cpumask_t node_to_cpumask_map[MAX_NUMNODES];
 extern int numa_debug_enabled;
 
 #else
index 27ebde643933a908c1ebb2a75ff723d8d43a65f6..94fc55fc72ce88a18eb73d3f43d5a7895ac6cd9c 100644 (file)
@@ -68,7 +68,7 @@ static inline int cpu_to_node(int cpu)
 #define cpumask_of_node cpumask_of_node
 static inline const struct cpumask *cpumask_of_node(int node)
 {
-       return node_to_cpumask_map[node];
+       return &node_to_cpumask_map[node];
 }
 
 /*
index 48c9af7a76831ea63ef6ef92760df02f15c1188c..3aeeb1b562c00ff9c7afe559452fdc2c06457116 100644 (file)
@@ -176,6 +176,7 @@ int main(void)
        DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
        DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
        DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
+       DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset));
        DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
        DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
        DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
index 09b039d7983d802f2674504439e43e21c03d4cae..582fe44ab07cc69aaef1d4f782f6f89364914974 100644 (file)
@@ -733,6 +733,14 @@ ENTRY(psw_idle)
        stg     %r3,__SF_EMPTY(%r15)
        larl    %r1,.Lpsw_idle_lpsw+4
        stg     %r1,__SF_EMPTY+8(%r15)
+#ifdef CONFIG_SMP
+       larl    %r1,smp_cpu_mtid
+       llgf    %r1,0(%r1)
+       ltgr    %r1,%r1
+       jz      .Lpsw_idle_stcctm
+       .insn   rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
+.Lpsw_idle_stcctm:
+#endif
        STCK    __CLOCK_IDLE_ENTER(%r2)
        stpt    __TIMER_IDLE_ENTER(%r2)
 .Lpsw_idle_lpsw:
@@ -1159,7 +1167,27 @@ cleanup_critical:
        jhe     1f
        mvc     __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
        mvc     __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
-1:     # account system time going idle
+1:     # calculate idle cycles
+#ifdef CONFIG_SMP
+       clg     %r9,BASED(.Lcleanup_idle_insn)
+       jl      3f
+       larl    %r1,smp_cpu_mtid
+       llgf    %r1,0(%r1)
+       ltgr    %r1,%r1
+       jz      3f
+       .insn   rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
+       larl    %r3,mt_cycles
+       ag      %r3,__LC_PERCPU_OFFSET
+       la      %r4,__SF_EMPTY+16(%r15)
+2:     lg      %r0,0(%r3)
+       slg     %r0,0(%r4)
+       alg     %r0,64(%r4)
+       stg     %r0,0(%r3)
+       la      %r3,8(%r3)
+       la      %r4,8(%r4)
+       brct    %r1,2b
+#endif
+3:     # account system time going idle
        lg      %r9,__LC_STEAL_TIMER
        alg     %r9,__CLOCK_IDLE_ENTER(%r2)
        slg     %r9,__LC_LAST_UPDATE_CLOCK
index c8653435c70d9d203dbe05deed3c96d0aad6cdd9..dafc44f519c340329581c8a5b2fda6fdb6920252 100644 (file)
@@ -25,7 +25,7 @@ static DEFINE_SPINLOCK(virt_timer_lock);
 static atomic64_t virt_timer_current;
 static atomic64_t virt_timer_elapsed;
 
-static DEFINE_PER_CPU(u64, mt_cycles[32]);
+DEFINE_PER_CPU(u64, mt_cycles[8]);
 static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
 static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
 static DEFINE_PER_CPU(u64, mt_scaling_jiffies);
@@ -60,6 +60,34 @@ static inline int virt_timer_forward(u64 elapsed)
        return elapsed >= atomic64_read(&virt_timer_current);
 }
 
+static void update_mt_scaling(void)
+{
+       u64 cycles_new[8], *cycles_old;
+       u64 delta, fac, mult, div;
+       int i;
+
+       stcctm5(smp_cpu_mtid + 1, cycles_new);
+       cycles_old = this_cpu_ptr(mt_cycles);
+       fac = 1;
+       mult = div = 0;
+       for (i = 0; i <= smp_cpu_mtid; i++) {
+               delta = cycles_new[i] - cycles_old[i];
+               div += delta;
+               mult *= i + 1;
+               mult += delta * fac;
+               fac *= i + 1;
+       }
+       div *= fac;
+       if (div > 0) {
+               /* Update scaling factor */
+               __this_cpu_write(mt_scaling_mult, mult);
+               __this_cpu_write(mt_scaling_div, div);
+               memcpy(cycles_old, cycles_new,
+                      sizeof(u64) * (smp_cpu_mtid + 1));
+       }
+       __this_cpu_write(mt_scaling_jiffies, jiffies_64);
+}
+
 /*
  * Update process times based on virtual cpu times stored by entry.S
  * to the lowcore fields user_timer, system_timer & steal_clock.
@@ -69,7 +97,6 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
        struct thread_info *ti = task_thread_info(tsk);
        u64 timer, clock, user, system, steal;
        u64 user_scaled, system_scaled;
-       int i;
 
        timer = S390_lowcore.last_update_timer;
        clock = S390_lowcore.last_update_clock;
@@ -85,34 +112,10 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
        S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
        S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
 
-       /* Do MT utilization calculation */
+       /* Update MT utilization calculation */
        if (smp_cpu_mtid &&
-           time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) {
-               u64 cycles_new[32], *cycles_old;
-               u64 delta, fac, mult, div;
-
-               cycles_old = this_cpu_ptr(mt_cycles);
-               if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
-                       fac = 1;
-                       mult = div = 0;
-                       for (i = 0; i <= smp_cpu_mtid; i++) {
-                               delta = cycles_new[i] - cycles_old[i];
-                               div += delta;
-                               mult *= i + 1;
-                               mult += delta * fac;
-                               fac *= i + 1;
-                       }
-                       div *= fac;
-                       if (div > 0) {
-                               /* Update scaling factor */
-                               __this_cpu_write(mt_scaling_mult, mult);
-                               __this_cpu_write(mt_scaling_div, div);
-                               memcpy(cycles_old, cycles_new,
-                                      sizeof(u64) * (smp_cpu_mtid + 1));
-                       }
-               }
-               __this_cpu_write(mt_scaling_jiffies, jiffies_64);
-       }
+           time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
+               update_mt_scaling();
 
        user = S390_lowcore.user_timer - ti->user_timer;
        S390_lowcore.steal_timer -= user;
@@ -181,6 +184,11 @@ void vtime_account_irq_enter(struct task_struct *tsk)
        S390_lowcore.last_update_timer = get_vtimer();
        S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
 
+       /* Update MT utilization calculation */
+       if (smp_cpu_mtid &&
+           time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
+               update_mt_scaling();
+
        system = S390_lowcore.system_timer - ti->system_timer;
        S390_lowcore.steal_timer -= system;
        ti->system_timer = S390_lowcore.system_timer;
index 7de4e2f780d789478d4d700821944f96b3846586..30b2698a28e29a6991a7116da1877e5bdee1963e 100644 (file)
@@ -368,7 +368,7 @@ static void topology_add_core(struct toptree *core)
                cpumask_copy(&top->thread_mask, &core->mask);
                cpumask_copy(&top->core_mask, &core_mc(core)->mask);
                cpumask_copy(&top->book_mask, &core_book(core)->mask);
-               cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]);
+               cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]);
                top->node_id = core_node(core)->id;
        }
 }
@@ -383,7 +383,7 @@ static void toptree_to_topology(struct toptree *numa)
 
        /* Clear all node masks */
        for (i = 0; i < MAX_NUMNODES; i++)
-               cpumask_clear(node_to_cpumask_map[i]);
+               cpumask_clear(&node_to_cpumask_map[i]);
 
        /* Rebuild all masks */
        toptree_for_each(core, numa, CORE)
index 09b1d2355bd9849ab583bb52c33eb789b4f9804b..43f32ce60aa3d98af0b7665090fa3eb080d12fa7 100644 (file)
@@ -23,7 +23,7 @@
 pg_data_t *node_data[MAX_NUMNODES];
 EXPORT_SYMBOL(node_data);
 
-cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+cpumask_t node_to_cpumask_map[MAX_NUMNODES];
 EXPORT_SYMBOL(node_to_cpumask_map);
 
 const struct numa_mode numa_mode_plain = {
@@ -144,7 +144,7 @@ void __init numa_setup(void)
 static int __init numa_init_early(void)
 {
        /* Attach all possible CPUs to node 0 for now. */
-       cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask);
+       cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
        return 0;
 }
 early_initcall(numa_init_early);
index 92ffe397b893c553c8504f10525a5e3d9d1e9e34..a05218ff3fe465b6e4812d7655360dc1b495a519 100644 (file)
@@ -13,3 +13,4 @@ generic-y += sections.h
 generic-y += trace_clock.h
 generic-y += xor.h
 generic-y += serial.h
+generic-y += word-at-a-time.h
index fe20d14ae051a5892350185d55ce1adfc352e538..ceb5201a30ed36899010715143b8679cd4a819fb 100644 (file)
@@ -59,6 +59,7 @@ pages_do_alias(unsigned long addr1, unsigned long addr2)
 
 #define clear_page(page)       memset((void *)(page), 0, PAGE_SIZE)
 extern void copy_page(void *to, void *from);
+#define copy_user_page(to, from, vaddr, pg)  __copy_user(to, from, PAGE_SIZE)
 
 struct page;
 struct vm_area_struct;
index 2e48eb8813ffa2fccf6df34ad5cee3bcf1857f94..c90930de76ba8670041598ba0d6461ef439c9539 100644 (file)
@@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { {
                .blkcipher = {
                        .min_keysize    = AES_MIN_KEY_SIZE,
                        .max_keysize    = AES_MAX_KEY_SIZE,
+                       .ivsize         = AES_BLOCK_SIZE,
                        .setkey         = aes_set_key,
                        .encrypt        = cbc_encrypt,
                        .decrypt        = cbc_decrypt,
@@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { {
                .blkcipher = {
                        .min_keysize    = AES_MIN_KEY_SIZE,
                        .max_keysize    = AES_MAX_KEY_SIZE,
+                       .ivsize         = AES_BLOCK_SIZE,
                        .setkey         = aes_set_key,
                        .encrypt        = ctr_crypt,
                        .decrypt        = ctr_crypt,
index 6bf2479a12fbe2a9c82b4275e40ac9e85ac191ed..561a84d93cf682a400a7555862f065f1fb04c84c 100644 (file)
@@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { {
                .blkcipher = {
                        .min_keysize    = CAMELLIA_MIN_KEY_SIZE,
                        .max_keysize    = CAMELLIA_MAX_KEY_SIZE,
+                       .ivsize         = CAMELLIA_BLOCK_SIZE,
                        .setkey         = camellia_set_key,
                        .encrypt        = cbc_encrypt,
                        .decrypt        = cbc_decrypt,
index dd6a34fa6e19d2e36f5d30de6256655dc0ee2e0c..61af794aa2d31d5df27d0a318ac8b8f9d605637b 100644 (file)
@@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { {
                .blkcipher = {
                        .min_keysize    = DES_KEY_SIZE,
                        .max_keysize    = DES_KEY_SIZE,
+                       .ivsize         = DES_BLOCK_SIZE,
                        .setkey         = des_set_key,
                        .encrypt        = cbc_encrypt,
                        .decrypt        = cbc_decrypt,
@@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { {
                .blkcipher = {
                        .min_keysize    = DES3_EDE_KEY_SIZE,
                        .max_keysize    = DES3_EDE_KEY_SIZE,
+                       .ivsize         = DES3_EDE_BLOCK_SIZE,
                        .setkey         = des3_ede_set_key,
                        .encrypt        = cbc3_encrypt,
                        .decrypt        = cbc3_decrypt,
index ee186e13dfe6fde92c9127aa07dccc474d1253d2..f102048d9c0e78a31b6773715a491e85e755a89c 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/errno.h>
 #include <linux/io.h>
 #include <linux/module.h>
+#include <linux/string.h>
 
 #include <gxio/iorpc_globals.h>
 #include <gxio/iorpc_mpipe.h>
 /* HACK: Avoid pointless "shadow" warnings. */
 #define link link_shadow
 
-/**
- * strscpy - Copy a C-string into a sized buffer, but only if it fits
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @size: size of destination buffer
- *
- * Use this routine to avoid copying too-long strings.
- * The routine returns the total number of bytes copied
- * (including the trailing NUL) or zero if the buffer wasn't
- * big enough.  To ensure that programmers pay attention
- * to the return code, the destination has a single NUL
- * written at the front (if size is non-zero) when the
- * buffer is not big enough.
- */
-static size_t strscpy(char *dest, const char *src, size_t size)
-{
-       size_t len = strnlen(src, size) + 1;
-       if (len > size) {
-               if (size)
-                       dest[0] = '\0';
-               return 0;
-       }
-       memcpy(dest, src, len);
-       return len;
-}
-
 int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
 {
        char file[32];
@@ -540,7 +515,7 @@ int gxio_mpipe_link_instance(const char *link_name)
        if (!context)
                return GXIO_ERR_NO_DEVICE;
 
-       if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
+       if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
                return GXIO_ERR_NO_DEVICE;
 
        return gxio_mpipe_info_instance_aux(context, name);
@@ -559,7 +534,7 @@ int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
 
        rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
        if (rv >= 0) {
-               if (strscpy(link_name, name.name, sizeof(name.name)) == 0)
+               if (strscpy(link_name, name.name, sizeof(name.name)) < 0)
                        return GXIO_ERR_INVAL_MEMORY_SIZE;
                memcpy(link_mac, mac.mac, sizeof(mac.mac));
        }
@@ -576,7 +551,7 @@ int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
        _gxio_mpipe_link_name_t name;
        int rv;
 
-       if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
+       if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
                return GXIO_ERR_NO_DEVICE;
 
        rv = gxio_mpipe_link_open_aux(context, name, flags);
index 9e5ce0d7b292160d5f544fcda08c00ea6c04f168..b66a693c2c3453e4f4642fea133890ab268a32d8 100644 (file)
@@ -6,7 +6,7 @@
 struct word_at_a_time { /* unused */ };
 #define WORD_AT_A_TIME_CONSTANTS {}
 
-/* Generate 0x01 byte values for non-zero bytes using a SIMD instruction. */
+/* Generate 0x01 byte values for zero bytes using a SIMD instruction. */
 static inline unsigned long has_zero(unsigned long val, unsigned long *data,
                                     const struct word_at_a_time *c)
 {
@@ -33,4 +33,10 @@ static inline long find_zero(unsigned long mask)
 #endif
 }
 
+#ifdef __BIG_ENDIAN
+#define zero_bytemask(mask) (~1ul << (63 - __builtin_clzl(mask)))
+#else
+#define zero_bytemask(mask) ((2ul << __builtin_ctzl(mask)) - 1)
+#endif
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
index 149ec55f9c46abd97cbb9b69c7a55afa23e23393..904f3ebf4220153f816a1deca118381190f44ec4 100644 (file)
@@ -25,4 +25,5 @@ generic-y += preempt.h
 generic-y += switch_to.h
 generic-y += topology.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 1fc7a286dc6f342319ec06a81b53a087b9708ef9..256c45b3ae343c983e667b01404d8fb3e3667b4a 100644 (file)
@@ -62,4 +62,5 @@ generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 328c8352480c5dcfd34d72a70d01d6a57e5bb515..96d058a871007e7b119bd3a6d3ff14eb9a5a61eb 100644 (file)
@@ -1308,6 +1308,7 @@ config HIGHMEM
 config X86_PAE
        bool "PAE (Physical Address Extension) Support"
        depends on X86_32 && !HIGHMEM4G
+       select SWIOTLB
        ---help---
          PAE is required for NX support, and furthermore enables
          larger swapspace support for non-overcommit purposes. It
index 80a0e4389c9ad3f5e6e1f6d8bc5292e391801ff2..bacaa13acac544e037571bd292e91f5239256edc 100644 (file)
@@ -554,6 +554,11 @@ static int __init camellia_aesni_init(void)
 {
        const char *feature_name;
 
+       if (!cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) {
+               pr_info("AVX or AES-NI instructions are not detected.\n");
+               return -ENODEV;
+       }
+
        if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
                pr_info("CPU feature '%s' is not supported.\n", feature_name);
                return -ENODEV;
index e6cf2ad350d15a8e6ca207a2618c77d820aacc22..9727b3b48bd174c8ae8297bd94e897484375618a 100644 (file)
 #define X86_FEATURE_HW_PSTATE  ( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 #define X86_FEATURE_HWP                ( 7*32+ 10) /* "hwp" Intel HWP */
-#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
+#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
 #define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
 #define X86_FEATURE_HWP_EPP    ( 7*32+13) /* Intel HWP_EPP */
 #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
index 2beee03820889b6c6b436e884a4e83e023d76f9a..3a36ee704c307414b305e1cac21ec9aba4de5872 100644 (file)
@@ -1226,10 +1226,8 @@ void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
 
 int kvm_is_in_guest(void);
 
-int __x86_set_memory_region(struct kvm *kvm,
-                           const struct kvm_userspace_memory_region *mem);
-int x86_set_memory_region(struct kvm *kvm,
-                         const struct kvm_userspace_memory_region *mem);
+int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
+int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
 
index 83aea8055119e2f26beb1c909536609d30e88943..4c20dd333412db5b367d0625e9b7cf69a7891493 100644 (file)
@@ -336,10 +336,10 @@ HYPERVISOR_update_descriptor(u64 ma, u64 desc)
        return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
 }
 
-static inline int
+static inline long
 HYPERVISOR_memory_op(unsigned int cmd, void *arg)
 {
-       return _hypercall2(int, memory_op, cmd, arg);
+       return _hypercall2(long, memory_op, cmd, arg);
 }
 
 static inline int
index b0ae1c4dc79142d9284d14e76ac181f1c271ad64..217909b4d6f56d84892655f680f974eaf83ec78e 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __ASM_X86_BITSPERLONG_H
 #define __ASM_X86_BITSPERLONG_H
 
-#ifdef __x86_64__
+#if defined(__x86_64__) && !defined(__ILP32__)
 # define __BITS_PER_LONG 64
 #else
 # define __BITS_PER_LONG 32
index 381c8b9b3a33570fcf88b82fa69d1fc43298d89f..20e242ea1bc46b5f5828c7b95071d920853b7609 100644 (file)
 struct ms_hyperv_info ms_hyperv;
 EXPORT_SYMBOL_GPL(ms_hyperv);
 
-static void (*hv_kexec_handler)(void);
-static void (*hv_crash_handler)(struct pt_regs *regs);
-
 #if IS_ENABLED(CONFIG_HYPERV)
 static void (*vmbus_handler)(void);
+static void (*hv_kexec_handler)(void);
+static void (*hv_crash_handler)(struct pt_regs *regs);
 
 void hyperv_vector_handler(struct pt_regs *regs)
 {
@@ -96,8 +95,8 @@ void hv_remove_crash_handler(void)
        hv_crash_handler = NULL;
 }
 EXPORT_SYMBOL_GPL(hv_remove_crash_handler);
-#endif
 
+#ifdef CONFIG_KEXEC_CORE
 static void hv_machine_shutdown(void)
 {
        if (kexec_in_progress && hv_kexec_handler)
@@ -111,7 +110,8 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs)
                hv_crash_handler(regs);
        native_machine_crash_shutdown(regs);
 }
-
+#endif /* CONFIG_KEXEC_CORE */
+#endif /* CONFIG_HYPERV */
 
 static uint32_t  __init ms_hyperv_platform(void)
 {
@@ -186,8 +186,10 @@ static void __init ms_hyperv_init_platform(void)
        no_timer_check = 1;
 #endif
 
+#if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE)
        machine_ops.shutdown = hv_machine_shutdown;
        machine_ops.crash_shutdown = hv_machine_crash_shutdown;
+#endif
        mark_tsc_unstable("running on Hyper-V");
 }
 
index 3d423a101fae05ccd722a4e564b83ba5e0112b6e..608fb26c72544c5ee0fd7793c0703f642c7ed60f 100644 (file)
@@ -37,7 +37,7 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
                { X86_FEATURE_PLN,              CR_EAX, 4, 0x00000006, 0 },
                { X86_FEATURE_PTS,              CR_EAX, 6, 0x00000006, 0 },
                { X86_FEATURE_HWP,              CR_EAX, 7, 0x00000006, 0 },
-               { X86_FEATURE_HWP_NOITFY,       CR_EAX, 8, 0x00000006, 0 },
+               { X86_FEATURE_HWP_NOTIFY,       CR_EAX, 8, 0x00000006, 0 },
                { X86_FEATURE_HWP_ACT_WINDOW,   CR_EAX, 9, 0x00000006, 0 },
                { X86_FEATURE_HWP_EPP,          CR_EAX,10, 0x00000006, 0 },
                { X86_FEATURE_HWP_PKG_REQ,      CR_EAX,11, 0x00000006, 0 },
index e068d6683dba6bab6bd4c9f9804a621385e3baee..74ca2fe7a0b3a60d7fc6c71c2d5dda30fcf3e767 100644 (file)
@@ -185,10 +185,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
 }
 
 #ifdef CONFIG_KEXEC_FILE
-static int get_nr_ram_ranges_callback(unsigned long start_pfn,
-                               unsigned long nr_pfn, void *arg)
+static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
 {
-       int *nr_ranges = arg;
+       unsigned int *nr_ranges = arg;
 
        (*nr_ranges)++;
        return 0;
@@ -214,7 +213,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced,
 
        ced->image = image;
 
-       walk_system_ram_range(0, -1, &nr_ranges,
+       walk_system_ram_res(0, -1, &nr_ranges,
                                get_nr_ram_ranges_callback);
 
        ced->max_nr_ranges = nr_ranges;
index 6d0e62ae8516760d6ae4af7deaa31418e82238fa..39e585a554b71d2a469c8b8c3ba487be7edc87a7 100644 (file)
@@ -506,3 +506,58 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
        return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
 }
 
+/*
+ * Called from fs/proc with a reference on @p to find the function
+ * which called into schedule(). This needs to be done carefully
+ * because the task might wake up and we might look at a stack
+ * changing under us.
+ */
+unsigned long get_wchan(struct task_struct *p)
+{
+       unsigned long start, bottom, top, sp, fp, ip;
+       int count = 0;
+
+       if (!p || p == current || p->state == TASK_RUNNING)
+               return 0;
+
+       start = (unsigned long)task_stack_page(p);
+       if (!start)
+               return 0;
+
+       /*
+        * Layout of the stack page:
+        *
+        * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
+        * PADDING
+        * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
+        * stack
+        * ----------- bottom = start + sizeof(thread_info)
+        * thread_info
+        * ----------- start
+        *
+        * The tasks stack pointer points at the location where the
+        * framepointer is stored. The data on the stack is:
+        * ... IP FP ... IP FP
+        *
+        * We need to read FP and IP, so we need to adjust the upper
+        * bound by another unsigned long.
+        */
+       top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
+       top -= 2 * sizeof(unsigned long);
+       bottom = start + sizeof(struct thread_info);
+
+       sp = READ_ONCE(p->thread.sp);
+       if (sp < bottom || sp > top)
+               return 0;
+
+       fp = READ_ONCE(*(unsigned long *)sp);
+       do {
+               if (fp < bottom || fp > top)
+                       return 0;
+               ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long)));
+               if (!in_sched_functions(ip))
+                       return ip;
+               fp = READ_ONCE(*(unsigned long *)fp);
+       } while (count++ < 16 && p->state != TASK_RUNNING);
+       return 0;
+}
index c13df2c735f82765015a4924b08aed9904c27419..737527b40e5bf40bb1e757b635cc30994db911bd 100644 (file)
@@ -324,31 +324,3 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 
        return prev_p;
 }
-
-#define top_esp                (THREAD_SIZE - sizeof(unsigned long))
-#define top_ebp                (THREAD_SIZE - 2*sizeof(unsigned long))
-
-unsigned long get_wchan(struct task_struct *p)
-{
-       unsigned long bp, sp, ip;
-       unsigned long stack_page;
-       int count = 0;
-       if (!p || p == current || p->state == TASK_RUNNING)
-               return 0;
-       stack_page = (unsigned long)task_stack_page(p);
-       sp = p->thread.sp;
-       if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
-               return 0;
-       /* include/asm-i386/system.h:switch_to() pushes bp last. */
-       bp = *(unsigned long *) sp;
-       do {
-               if (bp < stack_page || bp > top_ebp+stack_page)
-                       return 0;
-               ip = *(unsigned long *) (bp+4);
-               if (!in_sched_functions(ip))
-                       return ip;
-               bp = *(unsigned long *) bp;
-       } while (count++ < 16);
-       return 0;
-}
-
index 3c1bbcf129245aa7909708af46489d73f2e9c297..b35921a670b25b03878e3f9ac2e96abfae0e910c 100644 (file)
@@ -499,30 +499,6 @@ void set_personality_ia32(bool x32)
 }
 EXPORT_SYMBOL_GPL(set_personality_ia32);
 
-unsigned long get_wchan(struct task_struct *p)
-{
-       unsigned long stack;
-       u64 fp, ip;
-       int count = 0;
-
-       if (!p || p == current || p->state == TASK_RUNNING)
-               return 0;
-       stack = (unsigned long)task_stack_page(p);
-       if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
-               return 0;
-       fp = *(u64 *)(p->thread.sp);
-       do {
-               if (fp < (unsigned long)stack ||
-                   fp >= (unsigned long)stack+THREAD_SIZE)
-                       return 0;
-               ip = *(u64 *)(fp+8);
-               if (!in_sched_functions(ip))
-                       return ip;
-               fp = *(u64 *)fp;
-       } while (count++ < 16);
-       return 0;
-}
-
 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
 {
        int ret = 0;
index b372a7557c16c7d8391fffafdf0b1c74b49c4822..9da95b9daf8deb83af606ae0fffb73f7fab74ff2 100644 (file)
@@ -2418,7 +2418,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
        u64 val, cr0, cr4;
        u32 base3;
        u16 selector;
-       int i;
+       int i, r;
 
        for (i = 0; i < 16; i++)
                *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
@@ -2460,13 +2460,17 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
        dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
        ctxt->ops->set_gdt(ctxt, &dt);
 
+       r = rsm_enter_protected_mode(ctxt, cr0, cr4);
+       if (r != X86EMUL_CONTINUE)
+               return r;
+
        for (i = 0; i < 6; i++) {
-               int r = rsm_load_seg_64(ctxt, smbase, i);
+               r = rsm_load_seg_64(ctxt, smbase, i);
                if (r != X86EMUL_CONTINUE)
                        return r;
        }
 
-       return rsm_enter_protected_mode(ctxt, cr0, cr4);
+       return X86EMUL_CONTINUE;
 }
 
 static int em_rsm(struct x86_emulate_ctxt *ctxt)
index 06ef4908ba61d2e25ead615953a2f42c923a9219..6a8bc64566abde57f8914f103a6b5d9d49ed8ae8 100644 (file)
@@ -4105,17 +4105,13 @@ static void seg_setup(int seg)
 static int alloc_apic_access_page(struct kvm *kvm)
 {
        struct page *page;
-       struct kvm_userspace_memory_region kvm_userspace_mem;
        int r = 0;
 
        mutex_lock(&kvm->slots_lock);
        if (kvm->arch.apic_access_page_done)
                goto out;
-       kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
-       kvm_userspace_mem.flags = 0;
-       kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE;
-       kvm_userspace_mem.memory_size = PAGE_SIZE;
-       r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
+       r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
+                                   APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
        if (r)
                goto out;
 
@@ -4140,17 +4136,12 @@ static int alloc_identity_pagetable(struct kvm *kvm)
 {
        /* Called with kvm->slots_lock held. */
 
-       struct kvm_userspace_memory_region kvm_userspace_mem;
        int r = 0;
 
        BUG_ON(kvm->arch.ept_identity_pagetable_done);
 
-       kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
-       kvm_userspace_mem.flags = 0;
-       kvm_userspace_mem.guest_phys_addr =
-               kvm->arch.ept_identity_map_addr;
-       kvm_userspace_mem.memory_size = PAGE_SIZE;
-       r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
+       r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
+                                   kvm->arch.ept_identity_map_addr, PAGE_SIZE);
 
        return r;
 }
@@ -4949,14 +4940,9 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
 {
        int ret;
-       struct kvm_userspace_memory_region tss_mem = {
-               .slot = TSS_PRIVATE_MEMSLOT,
-               .guest_phys_addr = addr,
-               .memory_size = PAGE_SIZE * 3,
-               .flags = 0,
-       };
 
-       ret = x86_set_memory_region(kvm, &tss_mem);
+       ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
+                                   PAGE_SIZE * 3);
        if (ret)
                return ret;
        kvm->arch.tss_addr = addr;
index 92511d4b72364a978db0b38628b9449907ee1832..9a9a198303219b6430159af03d4d1e1d898ec6f7 100644 (file)
@@ -6453,6 +6453,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
        return 1;
 }
 
+static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
+{
+       return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+               !vcpu->arch.apf.halted);
+}
+
 static int vcpu_run(struct kvm_vcpu *vcpu)
 {
        int r;
@@ -6461,8 +6467,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
        vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
 
        for (;;) {
-               if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
-                   !vcpu->arch.apf.halted)
+               if (kvm_vcpu_running(vcpu))
                        r = vcpu_enter_guest(vcpu);
                else
                        r = vcpu_block(kvm, vcpu);
@@ -7474,34 +7479,66 @@ void kvm_arch_sync_events(struct kvm *kvm)
        kvm_free_pit(kvm);
 }
 
-int __x86_set_memory_region(struct kvm *kvm,
-                           const struct kvm_userspace_memory_region *mem)
+int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
 {
        int i, r;
+       unsigned long hva;
+       struct kvm_memslots *slots = kvm_memslots(kvm);
+       struct kvm_memory_slot *slot, old;
 
        /* Called with kvm->slots_lock held.  */
-       BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM);
+       if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
+               return -EINVAL;
+
+       slot = id_to_memslot(slots, id);
+       if (size) {
+               if (WARN_ON(slot->npages))
+                       return -EEXIST;
+
+               /*
+                * MAP_SHARED to prevent internal slot pages from being moved
+                * by fork()/COW.
+                */
+               hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
+                             MAP_SHARED | MAP_ANONYMOUS, 0);
+               if (IS_ERR((void *)hva))
+                       return PTR_ERR((void *)hva);
+       } else {
+               if (!slot->npages)
+                       return 0;
 
+               hva = 0;
+       }
+
+       old = *slot;
        for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
-               struct kvm_userspace_memory_region m = *mem;
+               struct kvm_userspace_memory_region m;
 
-               m.slot |= i << 16;
+               m.slot = id | (i << 16);
+               m.flags = 0;
+               m.guest_phys_addr = gpa;
+               m.userspace_addr = hva;
+               m.memory_size = size;
                r = __kvm_set_memory_region(kvm, &m);
                if (r < 0)
                        return r;
        }
 
+       if (!size) {
+               r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
+               WARN_ON(r < 0);
+       }
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(__x86_set_memory_region);
 
-int x86_set_memory_region(struct kvm *kvm,
-                         const struct kvm_userspace_memory_region *mem)
+int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
 {
        int r;
 
        mutex_lock(&kvm->slots_lock);
-       r = __x86_set_memory_region(kvm, mem);
+       r = __x86_set_memory_region(kvm, id, gpa, size);
        mutex_unlock(&kvm->slots_lock);
 
        return r;
@@ -7516,16 +7553,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
                 * unless the the memory map has changed due to process exit
                 * or fd copying.
                 */
-               struct kvm_userspace_memory_region mem;
-               memset(&mem, 0, sizeof(mem));
-               mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
-               x86_set_memory_region(kvm, &mem);
-
-               mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
-               x86_set_memory_region(kvm, &mem);
-
-               mem.slot = TSS_PRIVATE_MEMSLOT;
-               x86_set_memory_region(kvm, &mem);
+               x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
+               x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
+               x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
        }
        kvm_iommu_unmap_guest(kvm);
        kfree(kvm->arch.vpic);
@@ -7628,27 +7658,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                const struct kvm_userspace_memory_region *mem,
                                enum kvm_mr_change change)
 {
-       /*
-        * Only private memory slots need to be mapped here since
-        * KVM_SET_MEMORY_REGION ioctl is no longer supported.
-        */
-       if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
-               unsigned long userspace_addr;
-
-               /*
-                * MAP_SHARED to prevent internal slot pages from being moved
-                * by fork()/COW.
-                */
-               userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
-                                        PROT_READ | PROT_WRITE,
-                                        MAP_SHARED | MAP_ANONYMOUS, 0);
-
-               if (IS_ERR((void *)userspace_addr))
-                       return PTR_ERR((void *)userspace_addr);
-
-               memslot->userspace_addr = userspace_addr;
-       }
-
        return 0;
 }
 
@@ -7710,17 +7719,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
 {
        int nr_mmu_pages = 0;
 
-       if (change == KVM_MR_DELETE && old->id >= KVM_USER_MEM_SLOTS) {
-               int ret;
-
-               ret = vm_munmap(old->userspace_addr,
-                               old->npages * PAGE_SIZE);
-               if (ret < 0)
-                       printk(KERN_WARNING
-                              "kvm_vm_ioctl_set_memory_region: "
-                              "failed to munmap memory\n");
-       }
-
        if (!kvm->arch.n_requested_mmu_pages)
                nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
 
@@ -7769,19 +7767,36 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
        kvm_mmu_invalidate_zap_all_pages(kvm);
 }
 
+static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
+{
+       if (!list_empty_careful(&vcpu->async_pf.done))
+               return true;
+
+       if (kvm_apic_has_events(vcpu))
+               return true;
+
+       if (vcpu->arch.pv.pv_unhalted)
+               return true;
+
+       if (atomic_read(&vcpu->arch.nmi_queued))
+               return true;
+
+       if (test_bit(KVM_REQ_SMI, &vcpu->requests))
+               return true;
+
+       if (kvm_arch_interrupt_allowed(vcpu) &&
+           kvm_cpu_has_interrupt(vcpu))
+               return true;
+
+       return false;
+}
+
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
        if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
                kvm_x86_ops->check_nested_events(vcpu, false);
 
-       return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
-               !vcpu->arch.apf.halted)
-               || !list_empty_careful(&vcpu->async_pf.done)
-               || kvm_apic_has_events(vcpu)
-               || vcpu->arch.pv.pv_unhalted
-               || atomic_read(&vcpu->arch.nmi_queued) ||
-               (kvm_arch_interrupt_allowed(vcpu) &&
-                kvm_cpu_has_interrupt(vcpu));
+       return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
 }
 
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
index 30564e2752d361870e91a4e25a5afbb3d029b7d6..df48430c279b8688996b9f0074c08b1ce139af06 100644 (file)
@@ -1132,7 +1132,7 @@ void mark_rodata_ro(void)
         * has been zapped already via cleanup_highmem().
         */
        all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
-       set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
+       set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
 
        rodata_test();
 
index 1db84c0758b732b3465fcc896ef98862dabe0f16..6a28ded74211145a74bfe677f9619f2d2fb676ae 100644 (file)
@@ -704,6 +704,70 @@ out:
        return ret;
 }
 
+/*
+ * Iterate the EFI memory map in reverse order because the regions
+ * will be mapped top-down. The end result is the same as if we had
+ * mapped things forward, but doesn't require us to change the
+ * existing implementation of efi_map_region().
+ */
+static inline void *efi_map_next_entry_reverse(void *entry)
+{
+       /* Initial call */
+       if (!entry)
+               return memmap.map_end - memmap.desc_size;
+
+       entry -= memmap.desc_size;
+       if (entry < memmap.map)
+               return NULL;
+
+       return entry;
+}
+
+/*
+ * efi_map_next_entry - Return the next EFI memory map descriptor
+ * @entry: Previous EFI memory map descriptor
+ *
+ * This is a helper function to iterate over the EFI memory map, which
+ * we do in different orders depending on the current configuration.
+ *
+ * To begin traversing the memory map @entry must be %NULL.
+ *
+ * Returns %NULL when we reach the end of the memory map.
+ */
+static void *efi_map_next_entry(void *entry)
+{
+       if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
+               /*
+                * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
+                * config table feature requires us to map all entries
+                * in the same order as they appear in the EFI memory
+                * map. That is to say, entry N must have a lower
+                * virtual address than entry N+1. This is because the
+                * firmware toolchain leaves relative references in
+                * the code/data sections, which are split and become
+                * separate EFI memory regions. Mapping things
+                * out-of-order leads to the firmware accessing
+                * unmapped addresses.
+                *
+                * Since we need to map things this way whether or not
+                * the kernel actually makes use of
+                * EFI_PROPERTIES_TABLE, let's just switch to this
+                * scheme by default for 64-bit.
+                */
+               return efi_map_next_entry_reverse(entry);
+       }
+
+       /* Initial call */
+       if (!entry)
+               return memmap.map;
+
+       entry += memmap.desc_size;
+       if (entry >= memmap.map_end)
+               return NULL;
+
+       return entry;
+}
+
 /*
  * Map the efi memory ranges of the runtime services and update new_mmap with
  * virtual addresses.
@@ -714,7 +778,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
        unsigned long left = 0;
        efi_memory_desc_t *md;
 
-       for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+       p = NULL;
+       while ((p = efi_map_next_entry(p))) {
                md = p;
                if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
 #ifdef CONFIG_X86_64
index 30d12afe52ed173b2a81720cd5c89c24e667de2a..993b7a71386d53f79befa7a302ede2fdcbed6bd4 100644 (file)
 #include <linux/memblock.h>
 #include <linux/edd.h>
 
+#ifdef CONFIG_KEXEC_CORE
+#include <linux/kexec.h>
+#endif
+
 #include <xen/xen.h>
 #include <xen/events.h>
 #include <xen/interface/xen.h>
@@ -1077,6 +1081,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
                /* Fast syscall setup is all done in hypercalls, so
                   these are all ignored.  Stub them out here to stop
                   Xen console noise. */
+               break;
 
        default:
                if (!pmu_msr_write(msr, low, high, &ret))
@@ -1807,6 +1812,21 @@ static struct notifier_block xen_hvm_cpu_notifier = {
        .notifier_call  = xen_hvm_cpu_notify,
 };
 
+#ifdef CONFIG_KEXEC_CORE
+static void xen_hvm_shutdown(void)
+{
+       native_machine_shutdown();
+       if (kexec_in_progress)
+               xen_reboot(SHUTDOWN_soft_reset);
+}
+
+static void xen_hvm_crash_shutdown(struct pt_regs *regs)
+{
+       native_machine_crash_shutdown(regs);
+       xen_reboot(SHUTDOWN_soft_reset);
+}
+#endif
+
 static void __init xen_hvm_guest_init(void)
 {
        if (xen_pv_domain())
@@ -1826,6 +1846,10 @@ static void __init xen_hvm_guest_init(void)
        x86_init.irqs.intr_init = xen_init_IRQ;
        xen_hvm_init_time_ops();
        xen_hvm_init_mmu_ops();
+#ifdef CONFIG_KEXEC_CORE
+       machine_ops.shutdown = xen_hvm_shutdown;
+       machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
+#endif
 }
 #endif
 
index bfc08b13044b181c5948e5a2f22c205e900e0b47..660b3cfef23485f149e1a9b0b88f0b12666dbefb 100644 (file)
@@ -112,6 +112,15 @@ static unsigned long *p2m_identity;
 static pte_t *p2m_missing_pte;
 static pte_t *p2m_identity_pte;
 
+/*
+ * Hint at last populated PFN.
+ *
+ * Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack
+ * can avoid scanning the whole P2M (which may be sized to account for
+ * hotplugged memory).
+ */
+static unsigned long xen_p2m_last_pfn;
+
 static inline unsigned p2m_top_index(unsigned long pfn)
 {
        BUG_ON(pfn >= MAX_P2M_PFN);
@@ -270,7 +279,7 @@ void xen_setup_mfn_list_list(void)
        else
                HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
                        virt_to_mfn(p2m_top_mfn);
-       HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
+       HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
        HYPERVISOR_shared_info->arch.p2m_generation = 0;
        HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
        HYPERVISOR_shared_info->arch.p2m_cr3 =
@@ -406,6 +415,8 @@ void __init xen_vmalloc_p2m_tree(void)
        static struct vm_struct vm;
        unsigned long p2m_limit;
 
+       xen_p2m_last_pfn = xen_max_p2m_pfn;
+
        p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
        vm.flags = VM_ALLOC;
        vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
@@ -608,6 +619,12 @@ static bool alloc_p2m(unsigned long pfn)
                        free_p2m_page(p2m);
        }
 
+       /* Expanded the p2m? */
+       if (pfn > xen_p2m_last_pfn) {
+               xen_p2m_last_pfn = pfn;
+               HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
+       }
+
        return true;
 }
 
index f5ef6746d47a0ee36f6b0a11edd0c49cbcf3590a..1c30e4ab1022bda71ff80d841509605ae07034cc 100644 (file)
@@ -548,7 +548,7 @@ static unsigned long __init xen_get_max_pages(void)
 {
        unsigned long max_pages, limit;
        domid_t domid = DOMID_SELF;
-       int ret;
+       long ret;
 
        limit = xen_get_pages_limit();
        max_pages = limit;
@@ -798,7 +798,7 @@ char * __init xen_memory_setup(void)
                xen_ignore_unusable();
 
        /* Make sure the Xen-supplied memory map is well-ordered. */
-       sanitize_e820_map(xen_e820_map, xen_e820_map_entries,
+       sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
                          &xen_e820_map_entries);
 
        max_pages = xen_get_max_pages();
index 63c223dff5f1eebed92297d2cd641535a3aeceb9..b56855a1382a374f8c52632b9aa243a1112a1a06 100644 (file)
@@ -28,4 +28,5 @@ generic-y += statfs.h
 generic-y += termios.h
 generic-y += topology.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 1e28ddb656b891b92d7c135fa65914939b1451aa..8764c241e5bb44858e753b75f6c102c06a927171 100644 (file)
@@ -31,7 +31,8 @@ static int get_first_sibling(unsigned int cpu)
        return cpu;
 }
 
-int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
+int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
+                           const struct cpumask *online_mask)
 {
        unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
        cpumask_var_t cpus;
@@ -41,7 +42,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
 
        cpumask_clear(cpus);
        nr_cpus = nr_uniq_cpus = 0;
-       for_each_online_cpu(i) {
+       for_each_cpu(i, online_mask) {
                nr_cpus++;
                first_sibling = get_first_sibling(i);
                if (!cpumask_test_cpu(first_sibling, cpus))
@@ -51,7 +52,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
 
        queue = 0;
        for_each_possible_cpu(i) {
-               if (!cpu_online(i)) {
+               if (!cpumask_test_cpu(i, online_mask)) {
                        map[i] = 0;
                        continue;
                }
@@ -95,7 +96,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
        if (!map)
                return NULL;
 
-       if (!blk_mq_update_queue_map(map, set->nr_hw_queues))
+       if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
                return map;
 
        kfree(map);
index 279c5d674edf3cb38627feb360eb745194eecd4e..788fffd9b4098e35a953ed8cc182a9633f9cc421 100644 (file)
@@ -229,8 +229,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
        unsigned int i, first = 1;
        ssize_t ret = 0;
 
-       blk_mq_disable_hotplug();
-
        for_each_cpu(i, hctx->cpumask) {
                if (first)
                        ret += sprintf(ret + page, "%u", i);
@@ -240,8 +238,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
                first = 0;
        }
 
-       blk_mq_enable_hotplug();
-
        ret += sprintf(ret + page, "\n");
        return ret;
 }
@@ -343,7 +339,7 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
        struct blk_mq_ctx *ctx;
        int i;
 
-       if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
+       if (!hctx->nr_ctx)
                return;
 
        hctx_for_each_ctx(hctx, ctx, i)
@@ -358,7 +354,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
        struct blk_mq_ctx *ctx;
        int i, ret;
 
-       if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
+       if (!hctx->nr_ctx)
                return 0;
 
        ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
@@ -381,6 +377,8 @@ void blk_mq_unregister_disk(struct gendisk *disk)
        struct blk_mq_ctx *ctx;
        int i, j;
 
+       blk_mq_disable_hotplug();
+
        queue_for_each_hw_ctx(q, hctx, i) {
                blk_mq_unregister_hctx(hctx);
 
@@ -395,6 +393,9 @@ void blk_mq_unregister_disk(struct gendisk *disk)
        kobject_put(&q->mq_kobj);
 
        kobject_put(&disk_to_dev(disk)->kobj);
+
+       q->mq_sysfs_init_done = false;
+       blk_mq_enable_hotplug();
 }
 
 static void blk_mq_sysfs_init(struct request_queue *q)
@@ -425,27 +426,30 @@ int blk_mq_register_disk(struct gendisk *disk)
        struct blk_mq_hw_ctx *hctx;
        int ret, i;
 
+       blk_mq_disable_hotplug();
+
        blk_mq_sysfs_init(q);
 
        ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
        if (ret < 0)
-               return ret;
+               goto out;
 
        kobject_uevent(&q->mq_kobj, KOBJ_ADD);
 
        queue_for_each_hw_ctx(q, hctx, i) {
-               hctx->flags |= BLK_MQ_F_SYSFS_UP;
                ret = blk_mq_register_hctx(hctx);
                if (ret)
                        break;
        }
 
-       if (ret) {
+       if (ret)
                blk_mq_unregister_disk(disk);
-               return ret;
-       }
+       else
+               q->mq_sysfs_init_done = true;
+out:
+       blk_mq_enable_hotplug();
 
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(blk_mq_register_disk);
 
@@ -454,6 +458,9 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
        struct blk_mq_hw_ctx *hctx;
        int i;
 
+       if (!q->mq_sysfs_init_done)
+               return;
+
        queue_for_each_hw_ctx(q, hctx, i)
                blk_mq_unregister_hctx(hctx);
 }
@@ -463,6 +470,9 @@ int blk_mq_sysfs_register(struct request_queue *q)
        struct blk_mq_hw_ctx *hctx;
        int i, ret = 0;
 
+       if (!q->mq_sysfs_init_done)
+               return ret;
+
        queue_for_each_hw_ctx(q, hctx, i) {
                ret = blk_mq_register_hctx(hctx);
                if (ret)
index 9115c6d59948addbc445a26ad0f9ccaf4237b137..ed96474d75cb62fb261526736727c67ea2238d46 100644 (file)
@@ -471,17 +471,30 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
 }
 EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
 
-void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
+void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
                void *priv)
 {
-       struct blk_mq_tags *tags = hctx->tags;
+       struct blk_mq_hw_ctx *hctx;
+       int i;
+
+
+       queue_for_each_hw_ctx(q, hctx, i) {
+               struct blk_mq_tags *tags = hctx->tags;
+
+               /*
+                * If not software queues are currently mapped to this
+                * hardware queue, there's nothing to check
+                */
+               if (!blk_mq_hw_queue_mapped(hctx))
+                       continue;
+
+               if (tags->nr_reserved_tags)
+                       bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
+               bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
+                     false);
+       }
 
-       if (tags->nr_reserved_tags)
-               bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
-       bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
-                       false);
 }
-EXPORT_SYMBOL(blk_mq_tag_busy_iter);
 
 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
 {
index 9eb2cf4f01cb874706d64af87a01e94e0121f7e4..d468a79f2c4a2c11a00387816bcc03b64aea09d1 100644 (file)
@@ -58,6 +58,8 @@ extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
 extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
 extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
+void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
+               void *priv);
 
 enum {
        BLK_MQ_TAG_CACHE_MIN    = 1,
index f2d67b4047a04d7015c3c2af16871972c3b5a720..7785ae96267a197926c700f74bcd6524892a8c01 100644 (file)
@@ -393,14 +393,16 @@ void __blk_mq_complete_request(struct request *rq)
  *     Ends all I/O on a request. It does not handle partial completions.
  *     The actual completion happens out-of-order, through a IPI handler.
  **/
-void blk_mq_complete_request(struct request *rq)
+void blk_mq_complete_request(struct request *rq, int error)
 {
        struct request_queue *q = rq->q;
 
        if (unlikely(blk_should_fake_timeout(q)))
                return;
-       if (!blk_mark_rq_complete(rq))
+       if (!blk_mark_rq_complete(rq)) {
+               rq->errors = error;
                __blk_mq_complete_request(rq);
+       }
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
 
@@ -616,10 +618,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
                 * If a request wasn't started before the queue was
                 * marked dying, kill it here or it'll go unnoticed.
                 */
-               if (unlikely(blk_queue_dying(rq->q))) {
-                       rq->errors = -EIO;
-                       blk_mq_complete_request(rq);
-               }
+               if (unlikely(blk_queue_dying(rq->q)))
+                       blk_mq_complete_request(rq, -EIO);
                return;
        }
        if (rq->cmd_flags & REQ_NO_TIMEOUT)
@@ -641,24 +641,16 @@ static void blk_mq_rq_timer(unsigned long priv)
                .next           = 0,
                .next_set       = 0,
        };
-       struct blk_mq_hw_ctx *hctx;
        int i;
 
-       queue_for_each_hw_ctx(q, hctx, i) {
-               /*
-                * If not software queues are currently mapped to this
-                * hardware queue, there's nothing to check
-                */
-               if (!blk_mq_hw_queue_mapped(hctx))
-                       continue;
-
-               blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
-       }
+       blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
 
        if (data.next_set) {
                data.next = blk_rq_timeout(round_jiffies_up(data.next));
                mod_timer(&q->timeout, data.next);
        } else {
+               struct blk_mq_hw_ctx *hctx;
+
                queue_for_each_hw_ctx(q, hctx, i) {
                        /* the hctx may be unmapped, so check it here */
                        if (blk_mq_hw_queue_mapped(hctx))
@@ -1789,13 +1781,19 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
        }
 }
 
-static void blk_mq_map_swqueue(struct request_queue *q)
+static void blk_mq_map_swqueue(struct request_queue *q,
+                              const struct cpumask *online_mask)
 {
        unsigned int i;
        struct blk_mq_hw_ctx *hctx;
        struct blk_mq_ctx *ctx;
        struct blk_mq_tag_set *set = q->tag_set;
 
+       /*
+        * Avoid others reading imcomplete hctx->cpumask through sysfs
+        */
+       mutex_lock(&q->sysfs_lock);
+
        queue_for_each_hw_ctx(q, hctx, i) {
                cpumask_clear(hctx->cpumask);
                hctx->nr_ctx = 0;
@@ -1806,16 +1804,17 @@ static void blk_mq_map_swqueue(struct request_queue *q)
         */
        queue_for_each_ctx(q, ctx, i) {
                /* If the cpu isn't online, the cpu is mapped to first hctx */
-               if (!cpu_online(i))
+               if (!cpumask_test_cpu(i, online_mask))
                        continue;
 
                hctx = q->mq_ops->map_queue(q, i);
                cpumask_set_cpu(i, hctx->cpumask);
-               cpumask_set_cpu(i, hctx->tags->cpumask);
                ctx->index_hw = hctx->nr_ctx;
                hctx->ctxs[hctx->nr_ctx++] = ctx;
        }
 
+       mutex_unlock(&q->sysfs_lock);
+
        queue_for_each_hw_ctx(q, hctx, i) {
                struct blk_mq_ctxmap *map = &hctx->ctx_map;
 
@@ -1851,6 +1850,14 @@ static void blk_mq_map_swqueue(struct request_queue *q)
                hctx->next_cpu = cpumask_first(hctx->cpumask);
                hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
        }
+
+       queue_for_each_ctx(q, ctx, i) {
+               if (!cpumask_test_cpu(i, online_mask))
+                       continue;
+
+               hctx = q->mq_ops->map_queue(q, i);
+               cpumask_set_cpu(i, hctx->tags->cpumask);
+       }
 }
 
 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
@@ -1918,6 +1925,9 @@ void blk_mq_release(struct request_queue *q)
                kfree(hctx);
        }
 
+       kfree(q->mq_map);
+       q->mq_map = NULL;
+
        kfree(q->queue_hw_ctx);
 
        /* ctx kobj stays in queue_ctx */
@@ -2027,13 +2037,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        if (blk_mq_init_hw_queues(q, set))
                goto err_hctxs;
 
+       get_online_cpus();
        mutex_lock(&all_q_mutex);
-       list_add_tail(&q->all_q_node, &all_q_list);
-       mutex_unlock(&all_q_mutex);
 
+       list_add_tail(&q->all_q_node, &all_q_list);
        blk_mq_add_queue_tag_set(set, q);
+       blk_mq_map_swqueue(q, cpu_online_mask);
 
-       blk_mq_map_swqueue(q);
+       mutex_unlock(&all_q_mutex);
+       put_online_cpus();
 
        return q;
 
@@ -2057,30 +2069,27 @@ void blk_mq_free_queue(struct request_queue *q)
 {
        struct blk_mq_tag_set   *set = q->tag_set;
 
+       mutex_lock(&all_q_mutex);
+       list_del_init(&q->all_q_node);
+       mutex_unlock(&all_q_mutex);
+
        blk_mq_del_queue_tag_set(q);
 
        blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
        blk_mq_free_hw_queues(q, set);
 
        percpu_ref_exit(&q->mq_usage_counter);
-
-       kfree(q->mq_map);
-
-       q->mq_map = NULL;
-
-       mutex_lock(&all_q_mutex);
-       list_del_init(&q->all_q_node);
-       mutex_unlock(&all_q_mutex);
 }
 
 /* Basically redo blk_mq_init_queue with queue frozen */
-static void blk_mq_queue_reinit(struct request_queue *q)
+static void blk_mq_queue_reinit(struct request_queue *q,
+                               const struct cpumask *online_mask)
 {
        WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
 
        blk_mq_sysfs_unregister(q);
 
-       blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
+       blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask);
 
        /*
         * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
@@ -2088,7 +2097,7 @@ static void blk_mq_queue_reinit(struct request_queue *q)
         * involves free and re-allocate memory, worthy doing?)
         */
 
-       blk_mq_map_swqueue(q);
+       blk_mq_map_swqueue(q, online_mask);
 
        blk_mq_sysfs_register(q);
 }
@@ -2097,16 +2106,43 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
                                      unsigned long action, void *hcpu)
 {
        struct request_queue *q;
+       int cpu = (unsigned long)hcpu;
+       /*
+        * New online cpumask which is going to be set in this hotplug event.
+        * Declare this cpumasks as global as cpu-hotplug operation is invoked
+        * one-by-one and dynamically allocating this could result in a failure.
+        */
+       static struct cpumask online_new;
 
        /*
-        * Before new mappings are established, hotadded cpu might already
-        * start handling requests. This doesn't break anything as we map
-        * offline CPUs to first hardware queue. We will re-init the queue
-        * below to get optimal settings.
+        * Before hotadded cpu starts handling requests, new mappings must
+        * be established.  Otherwise, these requests in hw queue might
+        * never be dispatched.
+        *
+        * For example, there is a single hw queue (hctx) and two CPU queues
+        * (ctx0 for CPU0, and ctx1 for CPU1).
+        *
+        * Now CPU1 is just onlined and a request is inserted into
+        * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is
+        * still zero.
+        *
+        * And then while running hw queue, flush_busy_ctxs() finds bit0 is
+        * set in pending bitmap and tries to retrieve requests in
+        * hctx->ctxs[0]->rq_list.  But htx->ctxs[0] is a pointer to ctx0,
+        * so the request in ctx1->rq_list is ignored.
         */
-       if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
-           action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_DEAD:
+       case CPU_UP_CANCELED:
+               cpumask_copy(&online_new, cpu_online_mask);
+               break;
+       case CPU_UP_PREPARE:
+               cpumask_copy(&online_new, cpu_online_mask);
+               cpumask_set_cpu(cpu, &online_new);
+               break;
+       default:
                return NOTIFY_OK;
+       }
 
        mutex_lock(&all_q_mutex);
 
@@ -2130,7 +2166,7 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
        }
 
        list_for_each_entry(q, &all_q_list, all_q_node)
-               blk_mq_queue_reinit(q);
+               blk_mq_queue_reinit(q, &online_new);
 
        list_for_each_entry(q, &all_q_list, all_q_node)
                blk_mq_unfreeze_queue(q);
index 6a48c4c0d8a2a6efb881ea29b772df3bba9d5540..f4fea79649105b4e134860b53294ef2dac90a95f 100644 (file)
@@ -51,7 +51,8 @@ void blk_mq_disable_hotplug(void);
  * CPU -> queue mappings
  */
 extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
-extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
+extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
+                                  const struct cpumask *online_mask);
 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
 
 /*
index 8acb886032ae7a604fe0e965eb5d3ce07dd4845b..9c1dc8d6106a89a0f853271c1dfc49cd301ec983 100644 (file)
@@ -544,7 +544,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
        struct crypto_alg *base = &alg->halg.base;
 
        if (alg->halg.digestsize > PAGE_SIZE / 8 ||
-           alg->halg.statesize > PAGE_SIZE / 8)
+           alg->halg.statesize > PAGE_SIZE / 8 ||
+           alg->halg.statesize == 0)
                return -EINVAL;
 
        base->cra_type = &crypto_ahash_type;
index 09f37b51680871d8a34dc3a33563872652d4f0d6..4dde37c3d8fcba549ad1eb978bf23466321e9152 100644 (file)
@@ -61,6 +61,7 @@ ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header);
 ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX);
 ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX);
 ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_fadt_index, ACPI_INVALID_TABLE_INDEX);
 
 #if (!ACPI_REDUCED_HARDWARE)
 ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);
index f7731f260c318606e32455b7175a01ea157d8267..591ea95319e25ca7e5630970dd080ab1eab85e5e 100644 (file)
@@ -85,7 +85,7 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded);
 /*
  * tbfadt - FADT parse/convert/validate
  */
-void acpi_tb_parse_fadt(u32 table_index);
+void acpi_tb_parse_fadt(void);
 
 void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length);
 
@@ -138,8 +138,6 @@ acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id);
  */
 acpi_status acpi_tb_initialize_facs(void);
 
-u8 acpi_tb_tables_loaded(void);
-
 void
 acpi_tb_print_table_header(acpi_physical_address address,
                           struct acpi_table_header *header);
index faad911d46b5eb71c4ae93a5300cf460e63c8738..10ce48e16ebf43a334fdc2f13479572d9bca4bd1 100644 (file)
@@ -71,7 +71,7 @@ acpi_status acpi_enable(void)
 
        /* ACPI tables must be present */
 
-       if (!acpi_tb_tables_loaded()) {
+       if (acpi_gbl_fadt_index == ACPI_INVALID_TABLE_INDEX) {
                return_ACPI_STATUS(AE_NO_ACPI_TABLES);
        }
 
index 455a0700db392b1663a16c392d8da3bb544b72b6..a6454f4a6fb343b52cada9dc5394094768c6ea14 100644 (file)
@@ -298,7 +298,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64)
  *
  * FUNCTION:    acpi_tb_parse_fadt
  *
- * PARAMETERS:  table_index         - Index for the FADT
+ * PARAMETERS:  None
  *
  * RETURN:      None
  *
@@ -307,7 +307,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64)
  *
  ******************************************************************************/
 
-void acpi_tb_parse_fadt(u32 table_index)
+void acpi_tb_parse_fadt(void)
 {
        u32 length;
        struct acpi_table_header *table;
@@ -319,11 +319,11 @@ void acpi_tb_parse_fadt(u32 table_index)
         * Get a local copy of the FADT and convert it to a common format
         * Map entire FADT, assumed to be smaller than one page.
         */
-       length = acpi_gbl_root_table_list.tables[table_index].length;
+       length = acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index].length;
 
        table =
-           acpi_os_map_memory(acpi_gbl_root_table_list.tables[table_index].
-                              address, length);
+           acpi_os_map_memory(acpi_gbl_root_table_list.
+                              tables[acpi_gbl_fadt_index].address, length);
        if (!table) {
                return;
        }
index 4337990127cc39930b50983d7e7eff05966fcfb6..d8ddef38c947f750a226cee1b69fa373c9e1bf8f 100644 (file)
@@ -97,29 +97,6 @@ acpi_status acpi_tb_initialize_facs(void)
 }
 #endif                         /* !ACPI_REDUCED_HARDWARE */
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_tables_loaded
- *
- * PARAMETERS:  None
- *
- * RETURN:      TRUE if required ACPI tables are loaded
- *
- * DESCRIPTION: Determine if the minimum required ACPI tables are present
- *              (FADT, FACS, DSDT)
- *
- ******************************************************************************/
-
-u8 acpi_tb_tables_loaded(void)
-{
-
-       if (acpi_gbl_root_table_list.current_table_count >= 4) {
-               return (TRUE);
-       }
-
-       return (FALSE);
-}
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_tb_check_dsdt_header
@@ -392,7 +369,8 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
                    ACPI_COMPARE_NAME(&acpi_gbl_root_table_list.
                                      tables[table_index].signature,
                                      ACPI_SIG_FADT)) {
-                       acpi_tb_parse_fadt(table_index);
+                       acpi_gbl_fadt_index = table_index;
+                       acpi_tb_parse_fadt();
                }
 
 next_table:
index 2a4154a09e4dca0dc9af3aa09f9a395a313f60e8..85e17bacc834156664c5980c4f7ea7ef68e5d6b6 100644 (file)
@@ -77,13 +77,16 @@ static bool default_stop_ok(struct device *dev)
                                      dev_update_qos_constraint);
 
        if (constraint_ns > 0) {
-               constraint_ns -= td->start_latency_ns;
+               constraint_ns -= td->save_state_latency_ns +
+                               td->stop_latency_ns +
+                               td->start_latency_ns +
+                               td->restore_state_latency_ns;
                if (constraint_ns == 0)
                        return false;
        }
        td->effective_constraint_ns = constraint_ns;
-       td->cached_stop_ok = constraint_ns > td->stop_latency_ns ||
-                               constraint_ns == 0;
+       td->cached_stop_ok = constraint_ns >= 0;
+
        /*
         * The children have been suspended already, so we don't need to take
         * their stop latencies into account here.
@@ -126,18 +129,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
 
        off_on_time_ns = genpd->power_off_latency_ns +
                                genpd->power_on_latency_ns;
-       /*
-        * It doesn't make sense to remove power from the domain if saving
-        * the state of all devices in it and the power off/power on operations
-        * take too much time.
-        *
-        * All devices in this domain have been stopped already at this point.
-        */
-       list_for_each_entry(pdd, &genpd->dev_list, list_node) {
-               if (pdd->dev->driver)
-                       off_on_time_ns +=
-                               to_gpd_data(pdd)->td.save_state_latency_ns;
-       }
 
        min_off_time_ns = -1;
        /*
@@ -193,7 +184,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
                 * constraint_ns cannot be negative here, because the device has
                 * been suspended.
                 */
-               constraint_ns -= td->restore_state_latency_ns;
                if (constraint_ns <= off_on_time_ns)
                        return false;
 
index f42f2bac646623fc1db767bae3a5fff0ecf98aac..4c55cfbad19e95df8cb67864d78af960c073b4df 100644 (file)
@@ -32,8 +32,7 @@ static DEFINE_MUTEX(regmap_debugfs_early_lock);
 /* Calculate the length of a fixed format  */
 static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
 {
-       snprintf(buf, buf_size, "%x", max_val);
-       return strlen(buf);
+       return snprintf(NULL, 0, "%x", max_val);
 }
 
 static ssize_t regmap_name_read_file(struct file *file,
@@ -432,7 +431,7 @@ static ssize_t regmap_access_read_file(struct file *file,
                /* If we're in the region the user is trying to read */
                if (p >= *ppos) {
                        /* ...but not beyond it */
-                       if (buf_pos >= count - 1 - tot_len)
+                       if (buf_pos + tot_len + 1 >= count)
                                break;
 
                        /* Format the register */
index f9889b6bc02c316bed46e130c9f5c7ce38b7b93b..674f800a3b5760ad6374c98fa11e88097e30d160 100644 (file)
@@ -1486,17 +1486,16 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
 {
        const bool write = cmd->rq->cmd_flags & REQ_WRITE;
        struct loop_device *lo = cmd->rq->q->queuedata;
-       int ret = -EIO;
+       int ret = 0;
 
-       if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY))
+       if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
+               ret = -EIO;
                goto failed;
+       }
 
        ret = do_req_filebacked(lo, cmd->rq);
-
  failed:
-       if (ret)
-               cmd->rq->errors = -EIO;
-       blk_mq_complete_request(cmd->rq);
+       blk_mq_complete_request(cmd->rq, ret ? -EIO : 0);
 }
 
 static void loop_queue_write_work(struct work_struct *work)
index a295b98c6baed2df8bdd9484a62e44ca9bbfdc7a..1c9e4fe5aa440cbde62bb5e6c0cf0c397d8417a7 100644 (file)
@@ -289,7 +289,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
        case NULL_IRQ_SOFTIRQ:
                switch (queue_mode)  {
                case NULL_Q_MQ:
-                       blk_mq_complete_request(cmd->rq);
+                       blk_mq_complete_request(cmd->rq, cmd->rq->errors);
                        break;
                case NULL_Q_RQ:
                        blk_complete_request(cmd->rq);
index b97fc3fe0916a6b6fd3fb2be32be44ce3c137b39..6f04771f1019798cc2feabf73eff2ddbadc84b81 100644 (file)
@@ -618,16 +618,15 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
                        spin_unlock_irqrestore(req->q->queue_lock, flags);
                        return;
                }
+
                if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
                        if (cmd_rq->ctx == CMD_CTX_CANCELLED)
-                               req->errors = -EINTR;
-                       else
-                               req->errors = status;
+                               status = -EINTR;
                } else {
-                       req->errors = nvme_error_status(status);
+                       status = nvme_error_status(status);
                }
-       } else
-               req->errors = 0;
+       }
+
        if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
                u32 result = le32_to_cpup(&cqe->result);
                req->special = (void *)(uintptr_t)result;
@@ -650,7 +649,7 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
        }
        nvme_free_iod(nvmeq->dev, iod);
 
-       blk_mq_complete_request(req);
+       blk_mq_complete_request(req, status);
 }
 
 /* length is in bytes.  gfp flags indicates whether we may sleep. */
@@ -863,8 +862,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (ns && ns->ms && !blk_integrity_rq(req)) {
                if (!(ns->pi_type && ns->ms == 8) &&
                                        req->cmd_type != REQ_TYPE_DRV_PRIV) {
-                       req->errors = -EFAULT;
-                       blk_mq_complete_request(req);
+                       blk_mq_complete_request(req, -EFAULT);
                        return BLK_MQ_RQ_QUEUE_OK;
                }
        }
@@ -2439,6 +2437,22 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn)
        list_sort(NULL, &dev->namespaces, ns_cmp);
 }
 
+static void nvme_set_irq_hints(struct nvme_dev *dev)
+{
+       struct nvme_queue *nvmeq;
+       int i;
+
+       for (i = 0; i < dev->online_queues; i++) {
+               nvmeq = dev->queues[i];
+
+               if (!nvmeq->tags || !(*nvmeq->tags))
+                       continue;
+
+               irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
+                                       blk_mq_tags_cpumask(*nvmeq->tags));
+       }
+}
+
 static void nvme_dev_scan(struct work_struct *work)
 {
        struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
@@ -2450,6 +2464,7 @@ static void nvme_dev_scan(struct work_struct *work)
                return;
        nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn));
        kfree(ctrl);
+       nvme_set_irq_hints(dev);
 }
 
 /*
@@ -2953,22 +2968,6 @@ static const struct file_operations nvme_dev_fops = {
        .compat_ioctl   = nvme_dev_ioctl,
 };
 
-static void nvme_set_irq_hints(struct nvme_dev *dev)
-{
-       struct nvme_queue *nvmeq;
-       int i;
-
-       for (i = 0; i < dev->online_queues; i++) {
-               nvmeq = dev->queues[i];
-
-               if (!nvmeq->tags || !(*nvmeq->tags))
-                       continue;
-
-               irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
-                                       blk_mq_tags_cpumask(*nvmeq->tags));
-       }
-}
-
 static int nvme_dev_start(struct nvme_dev *dev)
 {
        int result;
@@ -3010,8 +3009,6 @@ static int nvme_dev_start(struct nvme_dev *dev)
        if (result)
                goto free_tags;
 
-       nvme_set_irq_hints(dev);
-
        dev->event_limit = 1;
        return result;
 
@@ -3062,7 +3059,6 @@ static int nvme_dev_resume(struct nvme_dev *dev)
        } else {
                nvme_unfreeze_queues(dev);
                nvme_dev_add(dev);
-               nvme_set_irq_hints(dev);
        }
        return 0;
 }
index d93a0372b37b5c7b4cb214e7013e64897c3a9aba..f5e49b639818bd370357215dbd4cefc4bceb8e20 100644 (file)
@@ -1863,9 +1863,11 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
                rbd_osd_read_callback(obj_request);
                break;
        case CEPH_OSD_OP_SETALLOCHINT:
-               rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
+               rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
+                          osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
                /* fall through */
        case CEPH_OSD_OP_WRITE:
+       case CEPH_OSD_OP_WRITEFULL:
                rbd_osd_write_callback(obj_request);
                break;
        case CEPH_OSD_OP_STAT:
@@ -2401,7 +2403,10 @@ static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
                                opcode = CEPH_OSD_OP_ZERO;
                }
        } else if (op_type == OBJ_OP_WRITE) {
-               opcode = CEPH_OSD_OP_WRITE;
+               if (!offset && length == object_size)
+                       opcode = CEPH_OSD_OP_WRITEFULL;
+               else
+                       opcode = CEPH_OSD_OP_WRITE;
                osd_req_op_alloc_hint_init(osd_request, num_ops,
                                        object_size, object_size);
                num_ops++;
@@ -3760,6 +3765,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        /* set io sizes to object size */
        segment_size = rbd_obj_bytes(&rbd_dev->header);
        blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
+       q->limits.max_sectors = queue_max_hw_sectors(q);
        blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
        blk_queue_max_segment_size(q, segment_size);
        blk_queue_io_min(q, segment_size);
index e93899cc6f60be0bd13b45dde3b8d697b7a733c8..6ca35495a5becdbac067cb4338981191fd6bc56a 100644 (file)
@@ -144,7 +144,7 @@ static void virtblk_done(struct virtqueue *vq)
        do {
                virtqueue_disable_cb(vq);
                while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
-                       blk_mq_complete_request(vbr->req);
+                       blk_mq_complete_request(vbr->req, vbr->req->errors);
                        req_done = true;
                }
                if (unlikely(virtqueue_is_broken(vq)))
index deb3f001791f159c5c7ebce19814de31e3106a5e..767657565de64e73f61304741fe9f39c496a2892 100644 (file)
@@ -212,6 +212,9 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref,
 
 static int xen_blkif_disconnect(struct xen_blkif *blkif)
 {
+       struct pending_req *req, *n;
+       int i = 0, j;
+
        if (blkif->xenblkd) {
                kthread_stop(blkif->xenblkd);
                wake_up(&blkif->shutdown_wq);
@@ -238,13 +241,28 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
        /* Remove all persistent grants and the cache of ballooned pages. */
        xen_blkbk_free_caches(blkif);
 
+       /* Check that there is no request in use */
+       list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
+               list_del(&req->free_list);
+
+               for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
+                       kfree(req->segments[j]);
+
+               for (j = 0; j < MAX_INDIRECT_PAGES; j++)
+                       kfree(req->indirect_pages[j]);
+
+               kfree(req);
+               i++;
+       }
+
+       WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
+       blkif->nr_ring_pages = 0;
+
        return 0;
 }
 
 static void xen_blkif_free(struct xen_blkif *blkif)
 {
-       struct pending_req *req, *n;
-       int i = 0, j;
 
        xen_blkif_disconnect(blkif);
        xen_vbd_free(&blkif->vbd);
@@ -257,22 +275,6 @@ static void xen_blkif_free(struct xen_blkif *blkif)
        BUG_ON(!list_empty(&blkif->free_pages));
        BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
 
-       /* Check that there is no request in use */
-       list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
-               list_del(&req->free_list);
-
-               for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
-                       kfree(req->segments[j]);
-
-               for (j = 0; j < MAX_INDIRECT_PAGES; j++)
-                       kfree(req->indirect_pages[j]);
-
-               kfree(req);
-               i++;
-       }
-
-       WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
-
        kmem_cache_free(xen_blkif_cachep, blkif);
 }
 
index 0823a96902f87fa90d2e35a425183ea0de2e0049..611170896b8c94ce1d7494d62116ba1fde574fce 100644 (file)
@@ -1142,6 +1142,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
        RING_IDX i, rp;
        unsigned long flags;
        struct blkfront_info *info = (struct blkfront_info *)dev_id;
+       int error;
 
        spin_lock_irqsave(&info->io_lock, flags);
 
@@ -1182,37 +1183,37 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                        continue;
                }
 
-               req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
+               error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
                switch (bret->operation) {
                case BLKIF_OP_DISCARD:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
                                struct request_queue *rq = info->rq;
                                printk(KERN_WARNING "blkfront: %s: %s op failed\n",
                                           info->gd->disk_name, op_name(bret->operation));
-                               req->errors = -EOPNOTSUPP;
+                               error = -EOPNOTSUPP;
                                info->feature_discard = 0;
                                info->feature_secdiscard = 0;
                                queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
                                queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
                        }
-                       blk_mq_complete_request(req);
+                       blk_mq_complete_request(req, error);
                        break;
                case BLKIF_OP_FLUSH_DISKCACHE:
                case BLKIF_OP_WRITE_BARRIER:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
                                printk(KERN_WARNING "blkfront: %s: %s op failed\n",
                                       info->gd->disk_name, op_name(bret->operation));
-                               req->errors = -EOPNOTSUPP;
+                               error = -EOPNOTSUPP;
                        }
                        if (unlikely(bret->status == BLKIF_RSP_ERROR &&
                                     info->shadow[id].req.u.rw.nr_segments == 0)) {
                                printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
                                       info->gd->disk_name, op_name(bret->operation));
-                               req->errors = -EOPNOTSUPP;
+                               error = -EOPNOTSUPP;
                        }
-                       if (unlikely(req->errors)) {
-                               if (req->errors == -EOPNOTSUPP)
-                                       req->errors = 0;
+                       if (unlikely(error)) {
+                               if (error == -EOPNOTSUPP)
+                                       error = 0;
                                info->feature_flush = 0;
                                xlvbd_flush(info);
                        }
@@ -1223,7 +1224,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                                dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
                                        "request: %x\n", bret->status);
 
-                       blk_mq_complete_request(req);
+                       blk_mq_complete_request(req, error);
                        break;
                default:
                        BUG();
index c9c5dd0bad364e2f4850a33fd95ee90e19a80a18..ec6af15950622ef2448cfda759fd73bb141286f0 100644 (file)
@@ -183,7 +183,7 @@ config BT_HCIBCM203X
 
 config BT_HCIBPA10X
        tristate "HCI BPA10x USB driver"
-       depends on USB
+       depends on USB && BT_HCIUART
        select BT_HCIUART_H4
        help
          Bluetooth HCI BPA10x USB driver.
index e527a3e13939c2f19c74401a5c24edb5c13e97ad..fa893c3ec4087f39382f3dc83b968c9859f41cca 100644 (file)
@@ -93,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x04CA, 0x300f) },
        { USB_DEVICE(0x04CA, 0x3010) },
        { USB_DEVICE(0x0930, 0x0219) },
+       { USB_DEVICE(0x0930, 0x021c) },
        { USB_DEVICE(0x0930, 0x0220) },
        { USB_DEVICE(0x0930, 0x0227) },
        { USB_DEVICE(0x0b05, 0x17d0) },
@@ -104,6 +105,7 @@ static const struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x0CF3, 0x311F) },
        { USB_DEVICE(0x0cf3, 0x3121) },
        { USB_DEVICE(0x0CF3, 0x817a) },
+       { USB_DEVICE(0x0CF3, 0x817b) },
        { USB_DEVICE(0x0cf3, 0xe003) },
        { USB_DEVICE(0x0CF3, 0xE004) },
        { USB_DEVICE(0x0CF3, 0xE005) },
@@ -153,6 +155,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
@@ -164,6 +167,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0CF3, 0x817b), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
index 2fc363a0393dbe57e6c48de4a1770de8ac6c8b86..0b697946e9bc7d91abf76b0dbfa7384fec4ce9b8 100644 (file)
@@ -323,7 +323,7 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len)
        }
 
        BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
-               hw_name ? : "BCM", (subver & 0x7000) >> 13,
+               hw_name ? : "BCM", (subver & 0xe000) >> 13,
                (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
 
        return 0;
@@ -353,7 +353,7 @@ int btbcm_finalize(struct hci_dev *hdev)
        kfree_skb(skb);
 
        BT_INFO("%s: BCM (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
-               (subver & 0x7000) >> 13, (subver & 0x1f00) >> 8,
+               (subver & 0xe000) >> 13, (subver & 0x1f00) >> 8,
                (subver & 0x00ff), rev & 0x0fff);
 
        btbcm_check_bdaddr(hdev);
@@ -461,7 +461,7 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
        }
 
        BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
-               hw_name ? : "BCM", (subver & 0x7000) >> 13,
+               hw_name ? : "BCM", (subver & 0xe000) >> 13,
                (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
 
        err = request_firmware(&fw, fw_name, &hdev->dev);
@@ -490,7 +490,7 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
        kfree_skb(skb);
 
        BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
-               hw_name ? : "BCM", (subver & 0x7000) >> 13,
+               hw_name ? : "BCM", (subver & 0xe000) >> 13,
                (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
 
        /* Read Local Name */
@@ -527,6 +527,15 @@ int btbcm_setup_apple(struct hci_dev *hdev)
                kfree_skb(skb);
        }
 
+       /* Read USB Product Info */
+       skb = btbcm_read_usb_product(hdev);
+       if (!IS_ERR(skb)) {
+               BT_INFO("%s: BCM: product %4.4x:%4.4x", hdev->name,
+                       get_unaligned_le16(skb->data + 1),
+                       get_unaligned_le16(skb->data + 3));
+               kfree_skb(skb);
+       }
+
        /* Read Local Name */
        skb = btbcm_read_local_name(hdev);
        if (!IS_ERR(skb)) {
index 7047fe6a6a2b3b2f4ab7e952688ff084d9310009..1f13e617bf560d7dde4c30af30e7b6ded180f1c4 100644 (file)
@@ -91,6 +91,75 @@ int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
 }
 EXPORT_SYMBOL_GPL(btintel_set_bdaddr);
 
+int btintel_set_diag(struct hci_dev *hdev, bool enable)
+{
+       struct sk_buff *skb;
+       u8 param[3];
+       int err;
+
+       if (enable) {
+               param[0] = 0x03;
+               param[1] = 0x03;
+               param[2] = 0x03;
+       } else {
+               param[0] = 0x00;
+               param[1] = 0x00;
+               param[2] = 0x00;
+       }
+
+       skb = __hci_cmd_sync(hdev, 0xfc43, 3, param, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               if (err == -ENODATA)
+                       goto done;
+               BT_ERR("%s: Changing Intel diagnostic mode failed (%d)",
+                      hdev->name, err);
+               return err;
+       }
+       kfree_skb(skb);
+
+done:
+       btintel_set_event_mask(hdev, enable);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_set_diag);
+
+int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable)
+{
+       struct sk_buff *skb;
+       u8 param[2];
+       int err;
+
+       param[0] = 0x01;
+       param[1] = 0x00;
+
+       skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: Entering Intel manufacturer mode failed (%d)",
+                      hdev->name, err);
+               return PTR_ERR(skb);
+       }
+       kfree_skb(skb);
+
+       err = btintel_set_diag(hdev, enable);
+
+       param[0] = 0x00;
+       param[1] = 0x00;
+
+       skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: Leaving Intel manufacturer mode failed (%d)",
+                      hdev->name, err);
+               return PTR_ERR(skb);
+       }
+       kfree_skb(skb);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(btintel_set_diag_mfg);
+
 void btintel_hw_error(struct hci_dev *hdev, u8 code)
 {
        struct sk_buff *skb;
@@ -216,6 +285,64 @@ int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name)
 }
 EXPORT_SYMBOL_GPL(btintel_load_ddc_config);
 
+int btintel_set_event_mask(struct hci_dev *hdev, bool debug)
+{
+       u8 mask[8] = { 0x87, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+       struct sk_buff *skb;
+       int err;
+
+       if (debug)
+               mask[1] |= 0x62;
+
+       skb = __hci_cmd_sync(hdev, 0xfc52, 8, mask, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: Setting Intel event mask failed (%d)",
+                      hdev->name, err);
+               return err;
+       }
+       kfree_skb(skb);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_set_event_mask);
+
+int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug)
+{
+       struct sk_buff *skb;
+       u8 param[2];
+       int err;
+
+       param[0] = 0x01;
+       param[1] = 0x00;
+
+       skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: Entering Intel manufacturer mode failed (%d)",
+                      hdev->name, err);
+               return PTR_ERR(skb);
+       }
+       kfree_skb(skb);
+
+       err = btintel_set_event_mask(hdev, debug);
+
+       param[0] = 0x00;
+       param[1] = 0x00;
+
+       skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: Leaving Intel manufacturer mode failed (%d)",
+                      hdev->name, err);
+               return PTR_ERR(skb);
+       }
+       kfree_skb(skb);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(btintel_set_event_mask_mfg);
+
 /* ------- REGMAP IBT SUPPORT ------- */
 
 #define IBT_REG_MODE_8BIT  0x00
index f0655c476fd250a5e20dfc06cf45496aa4b433ae..07e58e05a7fa6771c128c3965632d97413b2ff15 100644 (file)
@@ -73,12 +73,16 @@ struct intel_secure_send_result {
 
 int btintel_check_bdaddr(struct hci_dev *hdev);
 int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+int btintel_set_diag(struct hci_dev *hdev, bool enable);
+int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable);
 void btintel_hw_error(struct hci_dev *hdev, u8 code);
 
 void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver);
 int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
                        const void *param);
 int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name);
+int btintel_set_event_mask(struct hci_dev *hdev, bool debug);
+int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug);
 
 struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
                                   u16 opcode_write);
@@ -95,6 +99,16 @@ static inline int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdadd
        return -EOPNOTSUPP;
 }
 
+static inline int btintel_set_diag(struct hci_dev *hdev, bool enable)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable)
+{
+       return -EOPNOTSUPP;
+}
+
 static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
 {
 }
@@ -116,6 +130,16 @@ static inline int btintel_load_ddc_config(struct hci_dev *hdev,
        return -EOPNOTSUPP;
 }
 
+static inline int btintel_set_event_mask(struct hci_dev *hdev, bool debug)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug)
+{
+       return -EOPNOTSUPP;
+}
+
 static inline struct regmap *btintel_regmap_init(struct hci_dev *hdev,
                                                 u16 opcode_read,
                                                 u16 opcode_write)
index 247b1062cb9affc67d9e7f7a0234c167f90f4b79..e33dacf5bd98765178ddac60f7d50d742de555ac 100644 (file)
@@ -60,6 +60,8 @@ static struct usb_driver btusb_driver;
 #define BTUSB_QCA_ROME         0x8000
 #define BTUSB_BCM_APPLE                0x10000
 #define BTUSB_REALTEK          0x20000
+#define BTUSB_BCM2045          0x40000
+#define BTUSB_IFNUM_2          0x80000
 
 static const struct usb_device_id btusb_table[] = {
        /* Generic Bluetooth USB device */
@@ -73,7 +75,7 @@ static const struct usb_device_id btusb_table[] = {
 
        /* Apple-specific (Broadcom) devices */
        { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01),
-         .driver_info = BTUSB_BCM_APPLE },
+         .driver_info = BTUSB_BCM_APPLE | BTUSB_IFNUM_2 },
 
        /* MediaTek MT76x0E */
        { USB_DEVICE(0x0e8d, 0x763f) },
@@ -124,6 +126,9 @@ static const struct usb_device_id btusb_table[] = {
        /* Broadcom BCM20702B0 (Dynex/Insignia) */
        { USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM },
 
+       /* Broadcom BCM43142A0 (Foxconn/Lenovo) */
+       { USB_DEVICE(0x105b, 0xe065), .driver_info = BTUSB_BCM_PATCHRAM },
+
        /* Foxconn - Hon Hai */
        { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01),
          .driver_info = BTUSB_BCM_PATCHRAM },
@@ -164,6 +169,9 @@ static const struct usb_device_id blacklist_table[] = {
        /* Broadcom BCM2033 without firmware */
        { USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE },
 
+       /* Broadcom BCM2045 devices */
+       { USB_DEVICE(0x0a5c, 0x2045), .driver_info = BTUSB_BCM2045 },
+
        /* Atheros 3011 with sflash firmware */
        { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
@@ -195,6 +203,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
@@ -206,6 +215,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x817b), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
@@ -341,12 +351,14 @@ static const struct usb_device_id blacklist_table[] = {
 #define BTUSB_FIRMWARE_FAILED  8
 #define BTUSB_BOOTING          9
 #define BTUSB_RESET_RESUME     10
+#define BTUSB_DIAG_RUNNING     11
 
 struct btusb_data {
        struct hci_dev       *hdev;
        struct usb_device    *udev;
        struct usb_interface *intf;
        struct usb_interface *isoc;
+       struct usb_interface *diag;
 
        unsigned long flags;
 
@@ -361,6 +373,7 @@ struct btusb_data {
        struct usb_anchor intr_anchor;
        struct usb_anchor bulk_anchor;
        struct usb_anchor isoc_anchor;
+       struct usb_anchor diag_anchor;
        spinlock_t rxlock;
 
        struct sk_buff *evt_skb;
@@ -372,6 +385,8 @@ struct btusb_data {
        struct usb_endpoint_descriptor *bulk_rx_ep;
        struct usb_endpoint_descriptor *isoc_tx_ep;
        struct usb_endpoint_descriptor *isoc_rx_ep;
+       struct usb_endpoint_descriptor *diag_tx_ep;
+       struct usb_endpoint_descriptor *diag_rx_ep;
 
        __u8 cmdreq_type;
        __u8 cmdreq;
@@ -869,6 +884,92 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags)
        return err;
 }
 
+static void btusb_diag_complete(struct urb *urb)
+{
+       struct hci_dev *hdev = urb->context;
+       struct btusb_data *data = hci_get_drvdata(hdev);
+       int err;
+
+       BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status,
+              urb->actual_length);
+
+       if (urb->status == 0) {
+               struct sk_buff *skb;
+
+               skb = bt_skb_alloc(urb->actual_length, GFP_ATOMIC);
+               if (skb) {
+                       memcpy(skb_put(skb, urb->actual_length),
+                              urb->transfer_buffer, urb->actual_length);
+                       hci_recv_diag(hdev, skb);
+               }
+       } else if (urb->status == -ENOENT) {
+               /* Avoid suspend failed when usb_kill_urb */
+               return;
+       }
+
+       if (!test_bit(BTUSB_DIAG_RUNNING, &data->flags))
+               return;
+
+       usb_anchor_urb(urb, &data->diag_anchor);
+       usb_mark_last_busy(data->udev);
+
+       err = usb_submit_urb(urb, GFP_ATOMIC);
+       if (err < 0) {
+               /* -EPERM: urb is being killed;
+                * -ENODEV: device got disconnected */
+               if (err != -EPERM && err != -ENODEV)
+                       BT_ERR("%s urb %p failed to resubmit (%d)",
+                              hdev->name, urb, -err);
+               usb_unanchor_urb(urb);
+       }
+}
+
+static int btusb_submit_diag_urb(struct hci_dev *hdev, gfp_t mem_flags)
+{
+       struct btusb_data *data = hci_get_drvdata(hdev);
+       struct urb *urb;
+       unsigned char *buf;
+       unsigned int pipe;
+       int err, size = HCI_MAX_FRAME_SIZE;
+
+       BT_DBG("%s", hdev->name);
+
+       if (!data->diag_rx_ep)
+               return -ENODEV;
+
+       urb = usb_alloc_urb(0, mem_flags);
+       if (!urb)
+               return -ENOMEM;
+
+       buf = kmalloc(size, mem_flags);
+       if (!buf) {
+               usb_free_urb(urb);
+               return -ENOMEM;
+       }
+
+       pipe = usb_rcvbulkpipe(data->udev, data->diag_rx_ep->bEndpointAddress);
+
+       usb_fill_bulk_urb(urb, data->udev, pipe, buf, size,
+                         btusb_diag_complete, hdev);
+
+       urb->transfer_flags |= URB_FREE_BUFFER;
+
+       usb_mark_last_busy(data->udev);
+       usb_anchor_urb(urb, &data->diag_anchor);
+
+       err = usb_submit_urb(urb, mem_flags);
+       if (err < 0) {
+               if (err != -EPERM && err != -ENODEV)
+                       BT_ERR("%s urb %p submission failed (%d)",
+                              hdev->name, urb, -err);
+               usb_unanchor_urb(urb);
+       }
+
+       usb_free_urb(urb);
+
+       return err;
+}
+
 static void btusb_tx_complete(struct urb *urb)
 {
        struct sk_buff *skb = urb->context;
@@ -956,6 +1057,11 @@ static int btusb_open(struct hci_dev *hdev)
        set_bit(BTUSB_BULK_RUNNING, &data->flags);
        btusb_submit_bulk_urb(hdev, GFP_KERNEL);
 
+       if (data->diag) {
+               if (!btusb_submit_diag_urb(hdev, GFP_KERNEL))
+                       set_bit(BTUSB_DIAG_RUNNING, &data->flags);
+       }
+
 done:
        usb_autopm_put_interface(data->intf);
        return 0;
@@ -971,6 +1077,7 @@ static void btusb_stop_traffic(struct btusb_data *data)
        usb_kill_anchored_urbs(&data->intr_anchor);
        usb_kill_anchored_urbs(&data->bulk_anchor);
        usb_kill_anchored_urbs(&data->isoc_anchor);
+       usb_kill_anchored_urbs(&data->diag_anchor);
 }
 
 static int btusb_close(struct hci_dev *hdev)
@@ -986,6 +1093,7 @@ static int btusb_close(struct hci_dev *hdev)
        clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
        clear_bit(BTUSB_BULK_RUNNING, &data->flags);
        clear_bit(BTUSB_INTR_RUNNING, &data->flags);
+       clear_bit(BTUSB_DIAG_RUNNING, &data->flags);
 
        btusb_stop_traffic(data);
        btusb_free_frags(data);
@@ -1593,8 +1701,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
                BT_INFO("%s: Intel device is already patched. patch num: %02x",
                        hdev->name, ver->fw_patch_num);
                kfree_skb(skb);
-               btintel_check_bdaddr(hdev);
-               return 0;
+               goto complete;
        }
 
        /* Opens the firmware patch file based on the firmware version read
@@ -1606,8 +1713,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
        fw = btusb_setup_intel_get_fw(hdev, ver);
        if (!fw) {
                kfree_skb(skb);
-               btintel_check_bdaddr(hdev);
-               return 0;
+               goto complete;
        }
        fw_ptr = fw->data;
 
@@ -1680,8 +1786,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
        BT_INFO("%s: Intel Bluetooth firmware patch completed and activated",
                hdev->name);
 
-       btintel_check_bdaddr(hdev);
-       return 0;
+       goto complete;
 
 exit_mfg_disable:
        /* Disable the manufacturer mode without reset */
@@ -1696,8 +1801,7 @@ exit_mfg_disable:
 
        BT_INFO("%s: Intel Bluetooth firmware patch completed", hdev->name);
 
-       btintel_check_bdaddr(hdev);
-       return 0;
+       goto complete;
 
 exit_mfg_deactivate:
        release_firmware(fw);
@@ -1717,6 +1821,12 @@ exit_mfg_deactivate:
        BT_INFO("%s: Intel Bluetooth firmware patch completed and deactivated",
                hdev->name);
 
+complete:
+       /* Set the event mask for Intel specific vendor events. This enables
+        * a few extra events that are useful during general operation.
+        */
+       btintel_set_event_mask_mfg(hdev, false);
+
        btintel_check_bdaddr(hdev);
        return 0;
 }
@@ -2006,6 +2116,15 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
        BT_INFO("%s: Secure boot is %s", hdev->name,
                params->secure_boot ? "enabled" : "disabled");
 
+       BT_INFO("%s: OTP lock is %s", hdev->name,
+               params->otp_lock ? "enabled" : "disabled");
+
+       BT_INFO("%s: API lock is %s", hdev->name,
+               params->api_lock ? "enabled" : "disabled");
+
+       BT_INFO("%s: Debug lock is %s", hdev->name,
+               params->debug_lock ? "enabled" : "disabled");
+
        BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name,
                params->min_fw_build_nn, params->min_fw_build_cw,
                2000 + params->min_fw_build_yy);
@@ -2222,6 +2341,15 @@ done:
         */
        btintel_load_ddc_config(hdev, fwname);
 
+       /* Set the event mask for Intel specific vendor events. This enables
+        * a few extra events that are useful during general operation. It
+        * does not enable any debugging related events.
+        *
+        * The device will function correctly without these events enabled
+        * and thus no need to fail the setup.
+        */
+       btintel_set_event_mask(hdev, false);
+
        return 0;
 }
 
@@ -2547,19 +2675,115 @@ static int btusb_setup_qca(struct hci_dev *hdev)
        return 0;
 }
 
+#ifdef CONFIG_BT_HCIBTUSB_BCM
+static inline int __set_diag_interface(struct hci_dev *hdev)
+{
+       struct btusb_data *data = hci_get_drvdata(hdev);
+       struct usb_interface *intf = data->diag;
+       int i;
+
+       if (!data->diag)
+               return -ENODEV;
+
+       data->diag_tx_ep = NULL;
+       data->diag_rx_ep = NULL;
+
+       for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
+               struct usb_endpoint_descriptor *ep_desc;
+
+               ep_desc = &intf->cur_altsetting->endpoint[i].desc;
+
+               if (!data->diag_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) {
+                       data->diag_tx_ep = ep_desc;
+                       continue;
+               }
+
+               if (!data->diag_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) {
+                       data->diag_rx_ep = ep_desc;
+                       continue;
+               }
+       }
+
+       if (!data->diag_tx_ep || !data->diag_rx_ep) {
+               BT_ERR("%s invalid diagnostic descriptors", hdev->name);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static struct urb *alloc_diag_urb(struct hci_dev *hdev, bool enable)
+{
+       struct btusb_data *data = hci_get_drvdata(hdev);
+       struct sk_buff *skb;
+       struct urb *urb;
+       unsigned int pipe;
+
+       if (!data->diag_tx_ep)
+               return ERR_PTR(-ENODEV);
+
+       urb = usb_alloc_urb(0, GFP_KERNEL);
+       if (!urb)
+               return ERR_PTR(-ENOMEM);
+
+       skb = bt_skb_alloc(2, GFP_KERNEL);
+       if (!skb) {
+               usb_free_urb(urb);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       *skb_put(skb, 1) = 0xf0;
+       *skb_put(skb, 1) = enable;
+
+       pipe = usb_sndbulkpipe(data->udev, data->diag_tx_ep->bEndpointAddress);
+
+       usb_fill_bulk_urb(urb, data->udev, pipe,
+                         skb->data, skb->len, btusb_tx_complete, skb);
+
+       skb->dev = (void *)hdev;
+
+       return urb;
+}
+
+static int btusb_bcm_set_diag(struct hci_dev *hdev, bool enable)
+{
+       struct btusb_data *data = hci_get_drvdata(hdev);
+       struct urb *urb;
+
+       if (!data->diag)
+               return -ENODEV;
+
+       if (!test_bit(HCI_RUNNING, &hdev->flags))
+               return -ENETDOWN;
+
+       urb = alloc_diag_urb(hdev, enable);
+       if (IS_ERR(urb))
+               return PTR_ERR(urb);
+
+       return submit_or_queue_tx_urb(hdev, urb);
+}
+#endif
+
 static int btusb_probe(struct usb_interface *intf,
                       const struct usb_device_id *id)
 {
        struct usb_endpoint_descriptor *ep_desc;
        struct btusb_data *data;
        struct hci_dev *hdev;
+       unsigned ifnum_base;
        int i, err;
 
        BT_DBG("intf %p id %p", intf, id);
 
        /* interface numbers are hardcoded in the spec */
-       if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
-               return -ENODEV;
+       if (intf->cur_altsetting->desc.bInterfaceNumber != 0) {
+               if (!(id->driver_info & BTUSB_IFNUM_2))
+                       return -ENODEV;
+               if (intf->cur_altsetting->desc.bInterfaceNumber != 2)
+                       return -ENODEV;
+       }
+
+       ifnum_base = intf->cur_altsetting->desc.bInterfaceNumber;
 
        if (!id->driver_info) {
                const struct usb_device_id *match;
@@ -2627,6 +2851,7 @@ static int btusb_probe(struct usb_interface *intf,
        init_usb_anchor(&data->intr_anchor);
        init_usb_anchor(&data->bulk_anchor);
        init_usb_anchor(&data->isoc_anchor);
+       init_usb_anchor(&data->diag_anchor);
        spin_lock_init(&data->rxlock);
 
        if (id->driver_info & BTUSB_INTEL_NEW) {
@@ -2660,33 +2885,53 @@ static int btusb_probe(struct usb_interface *intf,
        hdev->send   = btusb_send_frame;
        hdev->notify = btusb_notify;
 
+       if (id->driver_info & BTUSB_BCM2045)
+               set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
+
        if (id->driver_info & BTUSB_BCM92035)
                hdev->setup = btusb_setup_bcm92035;
 
 #ifdef CONFIG_BT_HCIBTUSB_BCM
        if (id->driver_info & BTUSB_BCM_PATCHRAM) {
+               hdev->manufacturer = 15;
                hdev->setup = btbcm_setup_patchram;
+               hdev->set_diag = btusb_bcm_set_diag;
                hdev->set_bdaddr = btbcm_set_bdaddr;
+
+               /* Broadcom LM_DIAG Interface numbers are hardcoded */
+               data->diag = usb_ifnum_to_if(data->udev, ifnum_base + 2);
        }
 
-       if (id->driver_info & BTUSB_BCM_APPLE)
+       if (id->driver_info & BTUSB_BCM_APPLE) {
+               hdev->manufacturer = 15;
                hdev->setup = btbcm_setup_apple;
+               hdev->set_diag = btusb_bcm_set_diag;
+
+               /* Broadcom LM_DIAG Interface numbers are hardcoded */
+               data->diag = usb_ifnum_to_if(data->udev, ifnum_base + 2);
+       }
 #endif
 
        if (id->driver_info & BTUSB_INTEL) {
+               hdev->manufacturer = 2;
                hdev->setup = btusb_setup_intel;
                hdev->shutdown = btusb_shutdown_intel;
+               hdev->set_diag = btintel_set_diag_mfg;
                hdev->set_bdaddr = btintel_set_bdaddr;
                set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
                set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+               set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
        }
 
        if (id->driver_info & BTUSB_INTEL_NEW) {
+               hdev->manufacturer = 2;
                hdev->send = btusb_send_frame_intel;
                hdev->setup = btusb_setup_intel_new;
                hdev->hw_error = btintel_hw_error;
+               hdev->set_diag = btintel_set_diag;
                hdev->set_bdaddr = btintel_set_bdaddr;
                set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+               set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
        }
 
        if (id->driver_info & BTUSB_MARVELL)
@@ -2697,8 +2942,10 @@ static int btusb_probe(struct usb_interface *intf,
                set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
        }
 
-       if (id->driver_info & BTUSB_INTEL_BOOT)
+       if (id->driver_info & BTUSB_INTEL_BOOT) {
+               hdev->manufacturer = 2;
                set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+       }
 
        if (id->driver_info & BTUSB_ATH3012) {
                hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
@@ -2727,8 +2974,8 @@ static int btusb_probe(struct usb_interface *intf,
                /* AMP controllers do not support SCO packets */
                data->isoc = NULL;
        } else {
-               /* Interface numbers are hardcoded in the specification */
-               data->isoc = usb_ifnum_to_if(data->udev, 1);
+               /* Interface orders are hardcoded in the specification */
+               data->isoc = usb_ifnum_to_if(data->udev, ifnum_base + 1);
        }
 
        if (!reset)
@@ -2791,6 +3038,16 @@ static int btusb_probe(struct usb_interface *intf,
                }
        }
 
+#ifdef CONFIG_BT_HCIBTUSB_BCM
+       if (data->diag) {
+               if (!usb_driver_claim_interface(&btusb_driver,
+                                               data->diag, data))
+                       __set_diag_interface(hdev);
+               else
+                       data->diag = NULL;
+       }
+#endif
+
        err = hci_register_dev(hdev);
        if (err < 0) {
                hci_free_dev(hdev);
@@ -2818,12 +3075,25 @@ static void btusb_disconnect(struct usb_interface *intf)
        if (data->isoc)
                usb_set_intfdata(data->isoc, NULL);
 
+       if (data->diag)
+               usb_set_intfdata(data->diag, NULL);
+
        hci_unregister_dev(hdev);
 
-       if (intf == data->isoc)
+       if (intf == data->intf) {
+               if (data->isoc)
+                       usb_driver_release_interface(&btusb_driver, data->isoc);
+               if (data->diag)
+                       usb_driver_release_interface(&btusb_driver, data->diag);
+       } else if (intf == data->isoc) {
+               if (data->diag)
+                       usb_driver_release_interface(&btusb_driver, data->diag);
                usb_driver_release_interface(&btusb_driver, data->intf);
-       else if (data->isoc)
-               usb_driver_release_interface(&btusb_driver, data->isoc);
+       } else if (intf == data->diag) {
+               usb_driver_release_interface(&btusb_driver, data->intf);
+               if (data->isoc)
+                       usb_driver_release_interface(&btusb_driver, data->isoc);
+       }
 
        hci_free_dev(hdev);
 }
index 6da5e4ca13ea6d925807660582be5957c4903752..d776dfd5147811ef37071e68ffd9d9ceaa961c0d 100644 (file)
@@ -243,6 +243,7 @@ static struct sk_buff *ath_dequeue(struct hci_uart *hu)
 static const struct hci_uart_proto athp = {
        .id             = HCI_UART_ATH3K,
        .name           = "ATH3K",
+       .manufacturer   = 69,
        .open           = ath_open,
        .close          = ath_close,
        .flush          = ath_flush,
index 645e66e9a94594d26dbb8ee929c88cca11ef447b..cb852cc750b78256cd05fb62eda18dfb28c246aa 100644 (file)
@@ -259,8 +259,8 @@ static int bcm_set_diag(struct hci_dev *hdev, bool enable)
                return -ENETDOWN;
 
        skb = bt_skb_alloc(3, GFP_KERNEL);
-       if (IS_ERR(skb))
-               return PTR_ERR(skb);
+       if (!skb)
+               return -ENOMEM;
 
        *skb_put(skb, 1) = BCM_LM_DIAG_PKT;
        *skb_put(skb, 1) = 0xf0;
@@ -799,6 +799,7 @@ static int bcm_remove(struct platform_device *pdev)
 static const struct hci_uart_proto bcm_proto = {
        .id             = HCI_UART_BCM,
        .name           = "BCM",
+       .manufacturer   = 15,
        .init_speed     = 115200,
        .oper_speed     = 4000000,
        .open           = bcm_open,
index 2952107e3baeb300634706ef72a8f1aeecb32220..4a414a5a31655a4d3d0a8a0b03749ef73ff14357 100644 (file)
@@ -557,6 +557,7 @@ static int intel_setup(struct hci_uart *hu)
 
        bt_dev_dbg(hdev, "start intel_setup");
 
+       hu->hdev->set_diag = btintel_set_diag;
        hu->hdev->set_bdaddr = btintel_set_bdaddr;
 
        calltime = ktime_get();
@@ -1147,6 +1148,7 @@ static struct sk_buff *intel_dequeue(struct hci_uart *hu)
 static const struct hci_uart_proto intel_proto = {
        .id             = HCI_UART_INTEL,
        .name           = "Intel",
+       .manufacturer   = 2,
        .init_speed     = 115200,
        .oper_speed     = 3000000,
        .open           = intel_open,
index 01a83a3f8a1d1d603a93a1fd9810b61120438b9c..96bcec5598c221e4cec2841bbc4eb521d20778c1 100644 (file)
@@ -587,6 +587,13 @@ static int hci_uart_register_dev(struct hci_uart *hu)
        hdev->bus = HCI_UART;
        hci_set_drvdata(hdev, hu);
 
+       /* Only when vendor specific setup callback is provided, consider
+        * the manufacturer information valid. This avoids filling in the
+        * value for Ericsson when nothing is specified.
+        */
+       if (hu->proto->setup)
+               hdev->manufacturer = hu->proto->manufacturer;
+
        hdev->open  = hci_uart_open;
        hdev->close = hci_uart_close;
        hdev->flush = hci_uart_flush;
index b4a0393b2862f3a37eeb9893122c67362620346d..77eae64000b398bd1501dae92135b880bd18bcbc 100644 (file)
@@ -947,6 +947,7 @@ static int qca_setup(struct hci_uart *hu)
 static struct hci_uart_proto qca_proto = {
        .id             = HCI_UART_QCA,
        .name           = "QCA",
+       .manufacturer   = 29,
        .init_speed     = 115200,
        .oper_speed     = 3000000,
        .open           = qca_open,
index 2f7bb35a890efc287d85e3fd9d46d022c19fc300..82c92f1b65b4af7c317c8a0af91723a481ac3874 100644 (file)
@@ -59,6 +59,7 @@ struct hci_uart;
 struct hci_uart_proto {
        unsigned int id;
        const char *name;
+       unsigned int manufacturer;
        unsigned int init_speed;
        unsigned int oper_speed;
        int (*open)(struct hci_uart *hu);
index 1a82f3a17681b77926a11c29ba23cbdc27d8b6b5..0ebca8ba7bc4103eeeb48c2fe245404091e0c9d3 100644 (file)
@@ -36,7 +36,6 @@ config ARM_CCI400_PORT_CTRL
 
 config ARM_CCI500_PMU
        bool "ARM CCI500 PMU support"
-       default y
        depends on (ARM && CPU_V7) || ARM64
        depends on PERF_EVENTS
        select ARM_CCI_PMU
index 5837eb8a212fbdcd8446ff9da77f393cc05c128a..85da8b9832568b2e4daab35eea661d4d99a5ac26 100644 (file)
@@ -197,6 +197,7 @@ static void __init of_cpu_clk_setup(struct device_node *node)
        for_each_node_by_type(dn, "cpu") {
                struct clk_init_data init;
                struct clk *clk;
+               struct clk *parent_clk;
                char *clk_name = kzalloc(5, GFP_KERNEL);
                int cpu, err;
 
@@ -208,8 +209,9 @@ static void __init of_cpu_clk_setup(struct device_node *node)
                        goto bail_out;
 
                sprintf(clk_name, "cpu%d", cpu);
+               parent_clk = of_clk_get(node, 0);
 
-               cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
+               cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
                cpuclk[cpu].clk_name = clk_name;
                cpuclk[cpu].cpu = cpu;
                cpuclk[cpu].reg_base = clock_complex_base;
index 7c1e1f58e2da2e7dc7909fce407bc477651add3a..2fe37f708dc70828ffa10fc165ecc830fff49c86 100644 (file)
@@ -164,7 +164,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
         * the values for DIV_COPY and DIV_HPM dividers need not be set.
         */
        div0 = cfg_data->div0;
-       if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) {
+       if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
                div1 = cfg_data->div1;
                if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK)
                        div1 = readl(base + E4210_DIV_CPU1) &
@@ -185,7 +185,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
                alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
                WARN_ON(alt_div >= MAX_DIV);
 
-               if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
+               if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
                        /*
                         * In Exynos4210, ATB clock parent is also mout_core. So
                         * ATB clock also needs to be mantained at safe speed.
@@ -206,7 +206,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
        writel(div0, base + E4210_DIV_CPU0);
        wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL);
 
-       if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) {
+       if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
                writel(div1, base + E4210_DIV_CPU1);
                wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
                                DIV_MASK_ALL);
@@ -225,7 +225,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
        unsigned long mux_reg;
 
        /* find out the divider values to use for clock data */
-       if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
+       if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
                while ((cfg_data->prate * 1000) != ndata->new_rate) {
                        if (cfg_data->prate == 0)
                                return -EINVAL;
@@ -240,7 +240,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
        writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU);
        wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1);
 
-       if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
+       if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
                div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK);
                div_mask |= E4210_DIV0_ATB_MASK;
        }
index 676ee8f6d8136729a9665cfb9c29e7faed123781..8831e1a05367ad9c7473e3ee723adc3f29dc9936 100644 (file)
@@ -374,7 +374,6 @@ static struct ti_dt_clk omap3xxx_clks[] = {
        DT_CLK(NULL, "gpio2_ick", "gpio2_ick"),
        DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
        DT_CLK(NULL, "uart3_ick", "uart3_ick"),
-       DT_CLK(NULL, "uart4_ick", "uart4_ick"),
        DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
        DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
        DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
@@ -519,6 +518,7 @@ static struct ti_dt_clk am35xx_clks[] = {
 static struct ti_dt_clk omap36xx_clks[] = {
        DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"),
        DT_CLK(NULL, "uart4_fck", "uart4_fck"),
+       DT_CLK(NULL, "uart4_ick", "uart4_ick"),
        { .node_name = NULL },
 };
 
index 9b5b289e633456206e81268d00bc212a7f9f62cc..a911d7de33778d7bc7648e59f0ee60c1a6c83027 100644 (file)
@@ -18,7 +18,6 @@
 
 #include "clock.h"
 
-#define DRA7_DPLL_ABE_DEFFREQ                          180633600
 #define DRA7_DPLL_GMAC_DEFFREQ                         1000000000
 #define DRA7_DPLL_USB_DEFFREQ                          960000000
 
@@ -313,27 +312,12 @@ static struct ti_dt_clk dra7xx_clks[] = {
 int __init dra7xx_dt_clk_init(void)
 {
        int rc;
-       struct clk *abe_dpll_mux, *sys_clkin2, *dpll_ck, *hdcp_ck;
+       struct clk *dpll_ck, *hdcp_ck;
 
        ti_dt_clocks_register(dra7xx_clks);
 
        omap2_clk_disable_autoidle_all();
 
-       abe_dpll_mux = clk_get_sys(NULL, "abe_dpll_sys_clk_mux");
-       sys_clkin2 = clk_get_sys(NULL, "sys_clkin2");
-       dpll_ck = clk_get_sys(NULL, "dpll_abe_ck");
-
-       rc = clk_set_parent(abe_dpll_mux, sys_clkin2);
-       if (!rc)
-               rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ);
-       if (rc)
-               pr_err("%s: failed to configure ABE DPLL!\n", __func__);
-
-       dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck");
-       rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2);
-       if (rc)
-               pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__);
-
        dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
        rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
        if (rc)
index 90d7d8a21c4918d52b910fab900e78acd68c5aad..1ddc288fce4eb123e63d941971bc94bff41f69a5 100644 (file)
@@ -222,7 +222,7 @@ int omap2_dflt_clk_enable(struct clk_hw *hw)
                }
        }
 
-       if (unlikely(!clk->enable_reg)) {
+       if (unlikely(IS_ERR(clk->enable_reg))) {
                pr_err("%s: %s missing enable_reg\n", __func__,
                       clk_hw_get_name(hw));
                ret = -EINVAL;
@@ -264,7 +264,7 @@ void omap2_dflt_clk_disable(struct clk_hw *hw)
        u32 v;
 
        clk = to_clk_hw_omap(hw);
-       if (!clk->enable_reg) {
+       if (IS_ERR(clk->enable_reg)) {
                /*
                 * 'independent' here refers to a clock which is not
                 * controlled by its parent.
index bb2c2b05096455066826a13d5ad1156248f5e64e..d3c1742ded1af7655c3e2e77031ab3801ea84761 100644 (file)
@@ -148,7 +148,7 @@ static void __init rk_timer_init(struct device_node *np)
        bc_timer.freq = clk_get_rate(timer_clk);
 
        irq = irq_of_parse_and_map(np, 0);
-       if (irq == NO_IRQ) {
+       if (!irq) {
                pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
                return;
        }
index edacf3902e107d9ac60c84cdaba4f4ad1822213c..1cea08cf603eb30d5028e157dc8927aef4a004a9 100644 (file)
@@ -152,7 +152,7 @@ static void __init keystone_timer_init(struct device_node *np)
        int irq, error;
 
        irq  = irq_of_parse_and_map(np, 0);
-       if (irq == NO_IRQ) {
+       if (!irq) {
                pr_err("%s: failed to map interrupts\n", __func__);
                return;
        }
index 798277227de7f3a897a4ad79fcaabe787412fcfb..cec1ee2d2f744b968fe653f47dc5067dfe4dccb1 100644 (file)
@@ -149,6 +149,9 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
 {
        struct acpi_cpufreq_data *data = policy->driver_data;
 
+       if (unlikely(!data))
+               return -ENODEV;
+
        return cpufreq_show_cpus(data->freqdomain_cpus, buf);
 }
 
index ef5ed9470de9a59d371e34e7db24a434d1f11a9f..25c4c15103a0cd8759e006eaa10d9f9edbfb5872 100644 (file)
@@ -1436,8 +1436,10 @@ static void cpufreq_offline_finish(unsigned int cpu)
         * since this is a core component, and is essential for the
         * subsequent light-weight ->init() to succeed.
         */
-       if (cpufreq_driver->exit)
+       if (cpufreq_driver->exit) {
                cpufreq_driver->exit(policy);
+               policy->freq_table = NULL;
+       }
 }
 
 /**
index 3af9dd7332e6927d8dd860b5af410fba738bff4a..aa33b92b3e3e8866345e9893e3b0a880b8b1a17a 100644 (file)
@@ -776,6 +776,11 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
        local_irq_save(flags);
        rdmsrl(MSR_IA32_APERF, aperf);
        rdmsrl(MSR_IA32_MPERF, mperf);
+       if (cpu->prev_mperf == mperf) {
+               local_irq_restore(flags);
+               return;
+       }
+
        tsc = rdtsc();
        local_irq_restore(flags);
 
index 3927ed9fdbd51f16d765aede8fef14418994ab55..ca848cc6a8fd1313bc56e5b93674b3d795814779 100644 (file)
@@ -492,7 +492,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
        if (err) {
                put_device(&devfreq->dev);
                mutex_unlock(&devfreq->lock);
-               goto err_dev;
+               goto err_out;
        }
 
        mutex_unlock(&devfreq->lock);
@@ -518,7 +518,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
 err_init:
        list_del(&devfreq->node);
        device_unregister(&devfreq->dev);
-err_dev:
        kfree(devfreq);
 err_out:
        return ERR_PTR(err);
@@ -795,8 +794,10 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
                ret = PTR_ERR(governor);
                goto out;
        }
-       if (df->governor == governor)
+       if (df->governor == governor) {
+               ret = 0;
                goto out;
+       }
 
        if (df->governor) {
                ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
index a165b4bfd3300e97d409f2053b71adb276392336..dd24375b76ddcba72409d3c5c1285f19c172a45f 100644 (file)
@@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
        return desc;
 }
 
+void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
+{
+       memset(&desc->lld, 0, sizeof(desc->lld));
+       INIT_LIST_HEAD(&desc->descs_list);
+       desc->direction = DMA_TRANS_NONE;
+       desc->xfer_size = 0;
+       desc->active_xfer = false;
+}
+
 /* Call must be protected by lock. */
 static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
 {
@@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
                desc = list_first_entry(&atchan->free_descs_list,
                                        struct at_xdmac_desc, desc_node);
                list_del(&desc->desc_node);
-               desc->active_xfer = false;
+               at_xdmac_init_used_desc(desc);
        }
 
        return desc;
@@ -875,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
 
        if (xt->src_inc) {
                if (xt->src_sgl)
-                       chan_cc |=  AT_XDMAC_CC_SAM_UBS_DS_AM;
+                       chan_cc |=  AT_XDMAC_CC_SAM_UBS_AM;
                else
                        chan_cc |=  AT_XDMAC_CC_SAM_INCREMENTED_AM;
        }
 
        if (xt->dst_inc) {
                if (xt->dst_sgl)
-                       chan_cc |=  AT_XDMAC_CC_DAM_UBS_DS_AM;
+                       chan_cc |=  AT_XDMAC_CC_DAM_UBS_AM;
                else
                        chan_cc |=  AT_XDMAC_CC_DAM_INCREMENTED_AM;
        }
index 3ff284c8e3d5aef72f229017c883c73cbe13403f..09479d4be4db3d776fd1f3400724d13f26808428 100644 (file)
@@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
        mutex_lock(&dma_list_mutex);
 
        if (chan->client_count == 0) {
+               struct dma_device *device = chan->device;
+
+               dma_cap_set(DMA_PRIVATE, device->cap_mask);
+               device->privatecnt++;
                err = dma_chan_get(chan);
-               if (err)
+               if (err) {
                        pr_debug("%s: failed to get %s: (%d)\n",
                                __func__, dma_chan_name(chan), err);
+                       chan = NULL;
+                       if (--device->privatecnt == 0)
+                               dma_cap_clear(DMA_PRIVATE, device->cap_mask);
+               }
        } else
                chan = NULL;
 
index cf1c87fa1edd557eb57f53dd41c11c02a440ea82..bedce038c6e281bb1e1bf6ba89585c14d532a5b2 100644 (file)
@@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
        INIT_LIST_HEAD(&dw->dma.channels);
        for (i = 0; i < nr_channels; i++) {
                struct dw_dma_chan      *dwc = &dw->chan[i];
-               int                     r = nr_channels - i - 1;
 
                dwc->chan.device = &dw->dma;
                dma_cookie_init(&dwc->chan);
@@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
 
                /* 7 is highest priority & 0 is lowest. */
                if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
-                       dwc->priority = r;
+                       dwc->priority = nr_channels - i - 1;
                else
                        dwc->priority = i;
 
@@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
                /* Hardware configuration */
                if (autocfg) {
                        unsigned int dwc_params;
+                       unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
                        void __iomem *addr = chip->regs + r * sizeof(u32);
 
                        dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
index 18c14e1f1414e650969ff3c9e34431072b3abd83..48d6d9e94f6763c91bcf069848d9ef13e2eed48d 100644 (file)
@@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
        struct idma64_desc *desc = idma64c->desc;
        struct idma64_hw_desc *hw;
        size_t bytes = desc->length;
-       u64 llp;
-       u32 ctlhi;
+       u64 llp = channel_readq(idma64c, LLP);
+       u32 ctlhi = channel_readl(idma64c, CTL_HI);
        unsigned int i = 0;
 
-       llp = channel_readq(idma64c, LLP);
        do {
                hw = &desc->hw[i];
-       } while ((hw->llp != llp) && (++i < desc->ndesc));
+               if (hw->llp == llp)
+                       break;
+               bytes -= hw->len;
+       } while (++i < desc->ndesc);
 
        if (!i)
                return bytes;
 
-       do {
-               bytes -= desc->hw[--i].len;
-       } while (i);
+       /* The current chunk is not fully transfered yet */
+       bytes += desc->hw[--i].len;
 
-       ctlhi = channel_readl(idma64c, CTL_HI);
        return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
 }
 
index 5cb61ce01036fef2dc5248d11f99859e2dcb9d86..fc4156afa070306cd2fee4502f361717f364706b 100644 (file)
@@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan)
                return;
 
        /* clear the channel mapping in DRCMR */
-       reg = pxad_drcmr(chan->drcmr);
-       writel_relaxed(0, chan->phy->base + reg);
+       if (chan->drcmr <= DRCMR_CHLNUM) {
+               reg = pxad_drcmr(chan->drcmr);
+               writel_relaxed(0, chan->phy->base + reg);
+       }
 
        spin_lock_irqsave(&pdev->phy_lock, flags);
        for (i = 0; i < 32; i++)
@@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
                "%s(); phy=%p(%d) misaligned=%d\n", __func__,
                phy, phy->idx, misaligned);
 
-       reg = pxad_drcmr(phy->vchan->drcmr);
-       writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+       if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
+               reg = pxad_drcmr(phy->vchan->drcmr);
+               writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+       }
 
        dalgn = phy_readl_relaxed(phy, DALGN);
        if (misaligned)
@@ -887,6 +891,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
        struct dma_async_tx_descriptor *tx;
        struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
 
+       INIT_LIST_HEAD(&vd->node);
        tx = vchan_tx_prep(vc, vd, tx_flags);
        tx->tx_submit = pxad_tx_submit;
        dev_dbg(&chan->vc.chan.dev->device,
@@ -910,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan,
                width = chan->cfg.src_addr_width;
                dev_addr = chan->cfg.src_addr;
                *dev_src = dev_addr;
-               *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC;
+               *dcmd |= PXA_DCMD_INCTRGADDR;
+               if (chan->drcmr <= DRCMR_CHLNUM)
+                       *dcmd |= PXA_DCMD_FLOWSRC;
        }
        if (dir == DMA_MEM_TO_DEV) {
                maxburst = chan->cfg.dst_maxburst;
                width = chan->cfg.dst_addr_width;
                dev_addr = chan->cfg.dst_addr;
                *dev_dst = dev_addr;
-               *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG;
+               *dcmd |= PXA_DCMD_INCSRCADDR;
+               if (chan->drcmr <= DRCMR_CHLNUM)
+                       *dcmd |= PXA_DCMD_FLOWTRG;
        }
        if (dir == DMA_MEM_TO_MEM)
                *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
@@ -1177,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan,
        else
                curr = phy_readl_relaxed(chan->phy, DTADR);
 
+       /*
+        * curr has to be actually read before checking descriptor
+        * completion, so that a curr inside a status updater
+        * descriptor implies the following test returns true, and
+        * preventing reordering of curr load and the test.
+        */
+       rmb();
+       if (is_desc_completed(vd))
+               goto out;
+
        for (i = 0; i < sw_desc->nb_desc - 1; i++) {
                hw_desc = sw_desc->hw_desc[i];
                if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
index a1a500d96ff2788db7355a65284a9a3b54c0a1e0..1661d518224a7e4e57ca6c8c717096b5a87333e1 100644 (file)
@@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract)
 static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
 {
        struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
-       struct sun4i_dma_promise *promise;
+       struct sun4i_dma_promise *promise, *tmp;
 
        /* Free all the demands and completed demands */
-       list_for_each_entry(promise, &contract->demands, list)
+       list_for_each_entry_safe(promise, tmp, &contract->demands, list)
                kfree(promise);
 
-       list_for_each_entry(promise, &contract->completed_demands, list)
+       list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list)
                kfree(promise);
 
        kfree(contract);
index b23e8d52d1263abc11cc126e9e0b80e1dcc5cc1b..8d57b1b12e411ef902d26af984e7d34a741a4cf2 100644 (file)
@@ -59,7 +59,6 @@
 #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN                0xD070
 #define XGENE_DMA_RING_BLK_MEM_RDY             0xD074
 #define XGENE_DMA_RING_BLK_MEM_RDY_VAL         0xFFFFFFFF
-#define XGENE_DMA_RING_DESC_CNT(v)             (((v) & 0x0001FFFE) >> 1)
 #define XGENE_DMA_RING_ID_GET(owner, num)      (((owner) << 6) | (num))
 #define XGENE_DMA_RING_DST_ID(v)               ((1 << 10) | (v))
 #define XGENE_DMA_RING_CMD_OFFSET              0x2C
@@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
        return flyby_type[src_cnt];
 }
 
-static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring)
-{
-       u32 __iomem *cmd_base = ring->cmd_base;
-       u32 ring_state = ioread32(&cmd_base[1]);
-
-       return XGENE_DMA_RING_DESC_CNT(ring_state);
-}
-
 static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
                                     dma_addr_t *paddr)
 {
@@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
        dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
 }
 
-static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
-                                  struct xgene_dma_desc_sw *desc_sw)
+static void xgene_chan_xfer_request(struct xgene_dma_chan *chan,
+                                   struct xgene_dma_desc_sw *desc_sw)
 {
+       struct xgene_dma_ring *ring = &chan->tx_ring;
        struct xgene_dma_desc_hw *desc_hw;
 
-       /* Check if can push more descriptor to hw for execution */
-       if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2))
-               return -EBUSY;
-
        /* Get hw descriptor from DMA tx ring */
        desc_hw = &ring->desc_hw[ring->head];
 
@@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
                memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
        }
 
+       /* Increment the pending transaction count */
+       chan->pending += ((desc_sw->flags &
+                         XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
+
        /* Notify the hw that we have descriptor ready for execution */
        iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
                  2 : 1, ring->cmd);
-
-       return 0;
 }
 
 /**
@@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
 static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
 {
        struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
-       int ret;
 
        /*
         * If the list of pending descriptors is empty, then we
@@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
                if (chan->pending >= chan->max_outstanding)
                        return;
 
-               ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw);
-               if (ret)
-                       return;
+               xgene_chan_xfer_request(chan, desc_sw);
 
                /*
                 * Delete this element from ld pending queue and append it to
                 * ld running queue
                 */
                list_move_tail(&desc_sw->node, &chan->ld_running);
-
-               /* Increment the pending transaction count */
-               chan->pending++;
        }
 }
 
@@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
                 * Decrement the pending transaction count
                 * as we have processed one
                 */
-               chan->pending--;
+               chan->pending -= ((desc_sw->flags &
+                                 XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
 
                /*
                 * Delete this node from ld running queue and append it to
@@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
                                     struct xgene_dma_ring *ring,
                                     enum xgene_dma_ring_cfgsize cfgsize)
 {
+       int ret;
+
        /* Setup DMA ring descriptor variables */
        ring->pdma = chan->pdma;
        ring->cfgsize = cfgsize;
        ring->num = chan->pdma->ring_num++;
        ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
 
-       ring->size = xgene_dma_get_ring_size(chan, cfgsize);
-       if (ring->size <= 0)
-               return ring->size;
+       ret = xgene_dma_get_ring_size(chan, cfgsize);
+       if (ret <= 0)
+               return ret;
+       ring->size = ret;
 
        /* Allocate memory for DMA ring descriptor */
        ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
@@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
                 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
 
        /* Set the max outstanding request possible to this channel */
-       chan->max_outstanding = rx_ring->slots;
+       chan->max_outstanding = tx_ring->slots;
 
        return ret;
 }
index 39915a6b7986e2fba00d285370f57f27ed3eeb9a..c017fcd8e07c29b65b7a480a1b817b4645c40f33 100644 (file)
@@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
        struct dma_chan *chan;
        struct zx_dma_chan *c;
 
-       if (request > d->dma_requests)
+       if (request >= d->dma_requests)
                return NULL;
 
        chan = dma_get_any_slave_channel(&d->slave);
index e29560e6b40b0e5f28a141e7c88ceba1bdfa22ff..950c87f5d279335210088e4154eda135b24304d5 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/efi.h>
+#include <linux/sort.h>
 #include <asm/efi.h>
 
 #include "efistub.h"
@@ -305,6 +306,44 @@ fail:
  */
 #define EFI_RT_VIRTUAL_BASE    0x40000000
 
+static int cmp_mem_desc(const void *l, const void *r)
+{
+       const efi_memory_desc_t *left = l, *right = r;
+
+       return (left->phys_addr > right->phys_addr) ? 1 : -1;
+}
+
+/*
+ * Returns whether region @left ends exactly where region @right starts,
+ * or false if either argument is NULL.
+ */
+static bool regions_are_adjacent(efi_memory_desc_t *left,
+                                efi_memory_desc_t *right)
+{
+       u64 left_end;
+
+       if (left == NULL || right == NULL)
+               return false;
+
+       left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE;
+
+       return left_end == right->phys_addr;
+}
+
+/*
+ * Returns whether region @left and region @right have compatible memory type
+ * mapping attributes, and are both EFI_MEMORY_RUNTIME regions.
+ */
+static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left,
+                                                     efi_memory_desc_t *right)
+{
+       static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT |
+                                        EFI_MEMORY_WC | EFI_MEMORY_UC |
+                                        EFI_MEMORY_RUNTIME;
+
+       return ((left->attribute ^ right->attribute) & mem_type_mask) == 0;
+}
+
 /*
  * efi_get_virtmap() - create a virtual mapping for the EFI memory map
  *
@@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
                     int *count)
 {
        u64 efi_virt_base = EFI_RT_VIRTUAL_BASE;
-       efi_memory_desc_t *out = runtime_map;
+       efi_memory_desc_t *in, *prev = NULL, *out = runtime_map;
        int l;
 
-       for (l = 0; l < map_size; l += desc_size) {
-               efi_memory_desc_t *in = (void *)memory_map + l;
+       /*
+        * To work around potential issues with the Properties Table feature
+        * introduced in UEFI 2.5, which may split PE/COFF executable images
+        * in memory into several RuntimeServicesCode and RuntimeServicesData
+        * regions, we need to preserve the relative offsets between adjacent
+        * EFI_MEMORY_RUNTIME regions with the same memory type attributes.
+        * The easiest way to find adjacent regions is to sort the memory map
+        * before traversing it.
+        */
+       sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL);
+
+       for (l = 0; l < map_size; l += desc_size, prev = in) {
                u64 paddr, size;
 
+               in = (void *)memory_map + l;
                if (!(in->attribute & EFI_MEMORY_RUNTIME))
                        continue;
 
+               paddr = in->phys_addr;
+               size = in->num_pages * EFI_PAGE_SIZE;
+
                /*
                 * Make the mapping compatible with 64k pages: this allows
                 * a 4k page size kernel to kexec a 64k page size kernel and
                 * vice versa.
                 */
-               paddr = round_down(in->phys_addr, SZ_64K);
-               size = round_up(in->num_pages * EFI_PAGE_SIZE +
-                               in->phys_addr - paddr, SZ_64K);
-
-               /*
-                * Avoid wasting memory on PTEs by choosing a virtual base that
-                * is compatible with section mappings if this region has the
-                * appropriate size and physical alignment. (Sections are 2 MB
-                * on 4k granule kernels)
-                */
-               if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
-                       efi_virt_base = round_up(efi_virt_base, SZ_2M);
+               if (!regions_are_adjacent(prev, in) ||
+                   !regions_have_compatible_memory_type_attrs(prev, in)) {
+
+                       paddr = round_down(in->phys_addr, SZ_64K);
+                       size += in->phys_addr - paddr;
+
+                       /*
+                        * Avoid wasting memory on PTEs by choosing a virtual
+                        * base that is compatible with section mappings if this
+                        * region has the appropriate size and physical
+                        * alignment. (Sections are 2 MB on 4k granule kernels)
+                        */
+                       if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
+                               efi_virt_base = round_up(efi_virt_base, SZ_2M);
+                       else
+                               efi_virt_base = round_up(efi_virt_base, SZ_64K);
+               }
 
                in->virt_addr = efi_virt_base + in->phys_addr - paddr;
                efi_virt_base += size;
index 77f1d7c6ea3af627324b147e63b21b6cbdd16302..9416e0f5c1db2bf8c5601ddee999b1ade5efabc0 100644 (file)
@@ -672,8 +672,12 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
                /* disp clock */
                adev->clock.default_dispclk =
                        le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
-               if (adev->clock.default_dispclk == 0)
-                       adev->clock.default_dispclk = 54000; /* 540 Mhz */
+               /* set a reasonable default for DP */
+               if (adev->clock.default_dispclk < 53900) {
+                       DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
+                                adev->clock.default_dispclk / 100);
+                       adev->clock.default_dispclk = 60000;
+               }
                adev->clock.dp_extclk =
                        le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
                adev->clock.current_dispclk = adev->clock.default_dispclk;
index 1c3fc99c5465bd10489ac1b31e17484426b7adb9..8e995148f56e263ecde7e5e7a390645b585f2c52 100644 (file)
@@ -208,44 +208,6 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
        return ret;
 }
 
-static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
-                                    cgs_handle_t *handle)
-{
-       CGS_FUNC_ADEV;
-       int r;
-       uint32_t dma_handle;
-       struct drm_gem_object *obj;
-       struct amdgpu_bo *bo;
-       struct drm_device *dev = adev->ddev;
-       struct drm_file *file_priv = NULL, *priv;
-
-       mutex_lock(&dev->struct_mutex);
-       list_for_each_entry(priv, &dev->filelist, lhead) {
-               rcu_read_lock();
-               if (priv->pid == get_pid(task_pid(current)))
-                       file_priv = priv;
-               rcu_read_unlock();
-               if (file_priv)
-                       break;
-       }
-       mutex_unlock(&dev->struct_mutex);
-       r = dev->driver->prime_fd_to_handle(dev,
-                                           file_priv, dmabuf_fd,
-                                           &dma_handle);
-       spin_lock(&file_priv->table_lock);
-
-       /* Check if we currently have a reference on the object */
-       obj = idr_find(&file_priv->object_idr, dma_handle);
-       if (obj == NULL) {
-               spin_unlock(&file_priv->table_lock);
-               return -EINVAL;
-       }
-       spin_unlock(&file_priv->table_lock);
-       bo = gem_to_amdgpu_bo(obj);
-       *handle = (cgs_handle_t)bo;
-       return 0;
-}
-
 static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
 {
        struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@@ -810,7 +772,6 @@ static const struct cgs_ops amdgpu_cgs_ops = {
 };
 
 static const struct cgs_os_ops amdgpu_cgs_os_ops = {
-       amdgpu_cgs_import_gpu_mem,
        amdgpu_cgs_add_irq_source,
        amdgpu_cgs_irq_get,
        amdgpu_cgs_irq_put
index 749420f1ea6fbf2bc1417cfd5ea0210cf3c6243d..fd16652aa277c75d8ed5ca28e9088c153699addd 100644 (file)
@@ -156,7 +156,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
        uint64_t *chunk_array_user;
        uint64_t *chunk_array;
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
-       unsigned size, i;
+       unsigned size;
+       int i;
        int ret;
 
        if (cs->in.num_chunks == 0)
@@ -176,7 +177,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 
        /* get chunks */
        INIT_LIST_HEAD(&p->validated);
-       chunk_array_user = (uint64_t __user *)(cs->in.chunks);
+       chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
        if (copy_from_user(chunk_array, chunk_array_user,
                           sizeof(uint64_t)*cs->in.num_chunks)) {
                ret = -EFAULT;
@@ -196,7 +197,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
                struct drm_amdgpu_cs_chunk user_chunk;
                uint32_t __user *cdata;
 
-               chunk_ptr = (void __user *)chunk_array[i];
+               chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
                if (copy_from_user(&user_chunk, chunk_ptr,
                                       sizeof(struct drm_amdgpu_cs_chunk))) {
                        ret = -EFAULT;
@@ -207,7 +208,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
                p->chunks[i].length_dw = user_chunk.length_dw;
 
                size = p->chunks[i].length_dw;
-               cdata = (void __user *)user_chunk.chunk_data;
+               cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
                p->chunks[i].user_ptr = cdata;
 
                p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
index e3d70772b53104f1f6a48020088d8391d10985b3..dc29ed8145c256c61c6c47da3671664175f4f30d 100644 (file)
@@ -85,8 +85,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
        /* We borrow the event spin lock for protecting flip_status */
        spin_lock_irqsave(&crtc->dev->event_lock, flags);
 
-       /* set the proper interrupt */
-       amdgpu_irq_get(adev, &adev->pageflip_irq, work->crtc_id);
        /* do the flip (mmio) */
        adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
        /* set the flip status */
index adb48353f2e1a10f169df7c2cd4fc6d6f8e2c23a..b190c2a83680260dba3cfccca1fa6fad6ee6feae 100644 (file)
@@ -242,11 +242,11 @@ static struct pci_device_id pciidlist[] = {
        {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
 #endif
        /* topaz */
-       {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-       {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-       {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-       {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-       {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+       {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
        /* tonga */
        {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
        {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
index 8a122b1b77861028c123301726b8bb440537ad55..96290d9cddcab6ad8f0e9e8927a71ff97a093c80 100644 (file)
@@ -402,3 +402,19 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
                return true;
        return false;
 }
+
+void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
+{
+       struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev;
+       struct drm_fb_helper *fb_helper;
+       int ret;
+
+       if (!afbdev)
+               return;
+
+       fb_helper = &afbdev->helper;
+
+       ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+       if (ret)
+               DRM_DEBUG("failed to restore crtc mode\n");
+}
index 8c735f544b6608b0f814dfe2396650ddf9c8a34b..5d11e798230ce759af5d13d5c318aa77cfe755d2 100644 (file)
@@ -485,7 +485,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
  * Outdated mess for old drm with Xorg being in charge (void function now).
  */
 /**
- * amdgpu_driver_firstopen_kms - drm callback for last close
+ * amdgpu_driver_lastclose_kms - drm callback for last close
  *
  * @dev: drm dev pointer
  *
@@ -493,6 +493,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
  */
 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
 {
+       struct amdgpu_device *adev = dev->dev_private;
+
+       amdgpu_fbdev_restore_mode(adev);
        vga_switcheroo_process_delayed_switch();
 }
 
index 64efe5b52e6500f840f0ad7edbdc6a9f64db6f66..7bd470d9ac30556825260575671c24a3229f28d6 100644 (file)
@@ -567,6 +567,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev);
 void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
 int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
 bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
+void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev);
 
 void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev);
 
index 1e14531353e05ec7aadd69ea9d6e019310a25682..53d551f2d8395ccc24dc799887160e0977419ef2 100644 (file)
@@ -455,8 +455,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                return -ENOMEM;
 
        r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
-       if (r)
+       if (r) {
+               kfree(ib);
                return r;
+       }
        ib->length_dw = 0;
 
        /* walk over the address space and update the page directory */
index cd6edc40c9cd0c3dc2f09e96a2207f35e8b53d28..1e0bba29e16796f97c8eee38fc9d3100c405f6bc 100644 (file)
@@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action)
                        amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
                }
                if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
-                       amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
-                                                              ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+                       amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level);
                if (ext_encoder)
                        amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE);
        } else {
index 82e8d073051759f7b0307b7675282a8dfea280e8..a1a35a5df8e71357eea132019d3500a35a89fce4 100644 (file)
@@ -6185,6 +6185,11 @@ static int ci_dpm_late_init(void *handle)
        if (!amdgpu_dpm)
                return 0;
 
+       /* init the sysfs and debugfs files late */
+       ret = amdgpu_pm_sysfs_init(adev);
+       if (ret)
+               return ret;
+
        ret = ci_set_temperature_range(adev);
        if (ret)
                return ret;
@@ -6232,9 +6237,6 @@ static int ci_dpm_sw_init(void *handle)
        adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
        if (amdgpu_dpm == 1)
                amdgpu_pm_print_power_states(adev);
-       ret = amdgpu_pm_sysfs_init(adev);
-       if (ret)
-               goto dpm_failed;
        mutex_unlock(&adev->pm.mutex);
        DRM_INFO("amdgpu: dpm initialized\n");
 
index 4b6ce74753cded5179b17eaf698ddd16766760b1..484710cfdf8243d563afe908c2b9c9884879f971 100644 (file)
@@ -1567,6 +1567,9 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
        int ret, i;
        u16 tmp16;
 
+       if (pci_is_root_bus(adev->pdev->bus))
+               return;
+
        if (amdgpu_pcie_gen2 == 0)
                return;
 
index 44fa96ad47099b765ac81e5c439766a8f9849392..2e3373ed4c942d9fc753851d50adb9a3034ebff8 100644 (file)
@@ -596,6 +596,12 @@ static int cz_dpm_late_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        if (amdgpu_dpm) {
+               int ret;
+               /* init the sysfs and debugfs files late */
+               ret = amdgpu_pm_sysfs_init(adev);
+               if (ret)
+                       return ret;
+
                /* powerdown unused blocks for now */
                cz_dpm_powergate_uvd(adev, true);
                cz_dpm_powergate_vce(adev, true);
@@ -632,10 +638,6 @@ static int cz_dpm_sw_init(void *handle)
        if (amdgpu_dpm == 1)
                amdgpu_pm_print_power_states(adev);
 
-       ret = amdgpu_pm_sysfs_init(adev);
-       if (ret)
-               goto dpm_init_failed;
-
        mutex_unlock(&adev->pm.mutex);
        DRM_INFO("amdgpu: dpm initialized\n");
 
index e4d101b1252a47eaf7a2c7e35c2d8d83f737d762..d4c82b6257273475d15c3274cda7a30404fca231 100644 (file)
@@ -255,6 +255,24 @@ static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
                return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 }
 
+static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       /* Enable pflip interrupts */
+       for (i = 0; i < adev->mode_info.num_crtc; i++)
+               amdgpu_irq_get(adev, &adev->pageflip_irq, i);
+}
+
+static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       /* Disable pflip interrupts */
+       for (i = 0; i < adev->mode_info.num_crtc; i++)
+               amdgpu_irq_put(adev, &adev->pageflip_irq, i);
+}
+
 /**
  * dce_v10_0_page_flip - pageflip callback.
  *
@@ -2663,9 +2681,10 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                dce_v10_0_vga_enable(crtc, true);
                amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
                dce_v10_0_vga_enable(crtc, false);
-               /* Make sure VBLANK interrupt is still enabled */
+               /* Make sure VBLANK and PFLIP interrupts are still enabled */
                type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
                amdgpu_irq_update(adev, &adev->crtc_irq, type);
+               amdgpu_irq_update(adev, &adev->pageflip_irq, type);
                drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
                dce_v10_0_crtc_load_lut(crtc);
                break;
@@ -3025,6 +3044,8 @@ static int dce_v10_0_hw_init(void *handle)
                dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
        }
 
+       dce_v10_0_pageflip_interrupt_init(adev);
+
        return 0;
 }
 
@@ -3039,6 +3060,8 @@ static int dce_v10_0_hw_fini(void *handle)
                dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
        }
 
+       dce_v10_0_pageflip_interrupt_fini(adev);
+
        return 0;
 }
 
@@ -3050,6 +3073,8 @@ static int dce_v10_0_suspend(void *handle)
 
        dce_v10_0_hpd_fini(adev);
 
+       dce_v10_0_pageflip_interrupt_fini(adev);
+
        return 0;
 }
 
@@ -3075,6 +3100,8 @@ static int dce_v10_0_resume(void *handle)
        /* initialize hpd */
        dce_v10_0_hpd_init(adev);
 
+       dce_v10_0_pageflip_interrupt_init(adev);
+
        return 0;
 }
 
@@ -3369,7 +3396,6 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
        drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
-       amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
        queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
 
        return 0;
index 6411e824467164831eef8af634051f95b8faba69..7e1cf5e4eebf468dfd7c6fc0d97aeccb805e0fbe 100644 (file)
@@ -233,6 +233,24 @@ static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
                return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 }
 
+static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       /* Enable pflip interrupts */
+       for (i = 0; i < adev->mode_info.num_crtc; i++)
+               amdgpu_irq_get(adev, &adev->pageflip_irq, i);
+}
+
+static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       /* Disable pflip interrupts */
+       for (i = 0; i < adev->mode_info.num_crtc; i++)
+               amdgpu_irq_put(adev, &adev->pageflip_irq, i);
+}
+
 /**
  * dce_v11_0_page_flip - pageflip callback.
  *
@@ -2640,9 +2658,10 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                dce_v11_0_vga_enable(crtc, true);
                amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
                dce_v11_0_vga_enable(crtc, false);
-               /* Make sure VBLANK interrupt is still enabled */
+               /* Make sure VBLANK and PFLIP interrupts are still enabled */
                type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
                amdgpu_irq_update(adev, &adev->crtc_irq, type);
+               amdgpu_irq_update(adev, &adev->pageflip_irq, type);
                drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
                dce_v11_0_crtc_load_lut(crtc);
                break;
@@ -2888,7 +2907,7 @@ static int dce_v11_0_early_init(void *handle)
 
        switch (adev->asic_type) {
        case CHIP_CARRIZO:
-               adev->mode_info.num_crtc = 4;
+               adev->mode_info.num_crtc = 3;
                adev->mode_info.num_hpd = 6;
                adev->mode_info.num_dig = 9;
                break;
@@ -3000,6 +3019,8 @@ static int dce_v11_0_hw_init(void *handle)
                dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
        }
 
+       dce_v11_0_pageflip_interrupt_init(adev);
+
        return 0;
 }
 
@@ -3014,6 +3035,8 @@ static int dce_v11_0_hw_fini(void *handle)
                dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
        }
 
+       dce_v11_0_pageflip_interrupt_fini(adev);
+
        return 0;
 }
 
@@ -3025,6 +3048,8 @@ static int dce_v11_0_suspend(void *handle)
 
        dce_v11_0_hpd_fini(adev);
 
+       dce_v11_0_pageflip_interrupt_fini(adev);
+
        return 0;
 }
 
@@ -3051,6 +3076,8 @@ static int dce_v11_0_resume(void *handle)
        /* initialize hpd */
        dce_v11_0_hpd_init(adev);
 
+       dce_v11_0_pageflip_interrupt_init(adev);
+
        return 0;
 }
 
@@ -3345,7 +3372,6 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
        drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
-       amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
        queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
 
        return 0;
index c86911c2ea2a896f414473f798d782e9c08518cf..34b9c2a9d8d489c7958af39e0fdbd4e484a572c6 100644 (file)
@@ -204,6 +204,24 @@ static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
                return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 }
 
+static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       /* Enable pflip interrupts */
+       for (i = 0; i < adev->mode_info.num_crtc; i++)
+               amdgpu_irq_get(adev, &adev->pageflip_irq, i);
+}
+
+static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       /* Disable pflip interrupts */
+       for (i = 0; i < adev->mode_info.num_crtc; i++)
+               amdgpu_irq_put(adev, &adev->pageflip_irq, i);
+}
+
 /**
  * dce_v8_0_page_flip - pageflip callback.
  *
@@ -2575,9 +2593,10 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                dce_v8_0_vga_enable(crtc, true);
                amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
                dce_v8_0_vga_enable(crtc, false);
-               /* Make sure VBLANK interrupt is still enabled */
+               /* Make sure VBLANK and PFLIP interrupts are still enabled */
                type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
                amdgpu_irq_update(adev, &adev->crtc_irq, type);
+               amdgpu_irq_update(adev, &adev->pageflip_irq, type);
                drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
                dce_v8_0_crtc_load_lut(crtc);
                break;
@@ -2933,6 +2952,8 @@ static int dce_v8_0_hw_init(void *handle)
                dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
        }
 
+       dce_v8_0_pageflip_interrupt_init(adev);
+
        return 0;
 }
 
@@ -2947,6 +2968,8 @@ static int dce_v8_0_hw_fini(void *handle)
                dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
        }
 
+       dce_v8_0_pageflip_interrupt_fini(adev);
+
        return 0;
 }
 
@@ -2958,6 +2981,8 @@ static int dce_v8_0_suspend(void *handle)
 
        dce_v8_0_hpd_fini(adev);
 
+       dce_v8_0_pageflip_interrupt_fini(adev);
+
        return 0;
 }
 
@@ -2981,6 +3006,8 @@ static int dce_v8_0_resume(void *handle)
        /* initialize hpd */
        dce_v8_0_hpd_init(adev);
 
+       dce_v8_0_pageflip_interrupt_init(adev);
+
        return 0;
 }
 
@@ -3376,7 +3403,6 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
        drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
-       amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
        queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
 
        return 0;
index 774528ab8704387f00525618b1c48578387b70df..fab5471d25d7e3dc3a3605d22c52fd669e919f7b 100644 (file)
@@ -1262,6 +1262,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
        addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
        status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
        mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+       /* reset addr and status */
+       WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
+
+       if (!addr && !status)
+               return 0;
+
        dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
                entry->src_id, entry->src_data);
        dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
@@ -1269,8 +1275,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
        dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
                status);
        gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
-       /* reset addr and status */
-       WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
 
        return 0;
 }
index 9a07742620d0361ad054930ff3b07c75c9bbcf2c..7bc9e9fcf3d26cbbaa6d7aa76fbef0349964ec6f 100644 (file)
@@ -1262,6 +1262,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
        addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
        status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
        mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+       /* reset addr and status */
+       WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
+
+       if (!addr && !status)
+               return 0;
+
        dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
                entry->src_id, entry->src_data);
        dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
@@ -1269,8 +1275,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
        dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
                status);
        gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
-       /* reset addr and status */
-       WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
 
        return 0;
 }
index 94ec04a9c4d5c975eeb329dc770d3dd055b74ae5..9745ed3a9aef866443e269022142c52ec3fc3d65 100644 (file)
@@ -2995,6 +2995,12 @@ static int kv_dpm_late_init(void *handle)
 {
        /* powerdown unused blocks for now */
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int ret;
+
+       /* init the sysfs and debugfs files late */
+       ret = amdgpu_pm_sysfs_init(adev);
+       if (ret)
+               return ret;
 
        kv_dpm_powergate_acp(adev, true);
        kv_dpm_powergate_samu(adev, true);
@@ -3038,9 +3044,6 @@ static int kv_dpm_sw_init(void *handle)
        adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
        if (amdgpu_dpm == 1)
                amdgpu_pm_print_power_states(adev);
-       ret = amdgpu_pm_sysfs_init(adev);
-       if (ret)
-               goto dpm_failed;
        mutex_unlock(&adev->pm.mutex);
        DRM_INFO("amdgpu: dpm initialized\n");
 
index b55ceb14fdcd91e92f7a5924a232490a6419a80a..0bac8702e9348c2ee9c86ed52d6aaf490ef5032f 100644 (file)
@@ -1005,6 +1005,9 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
        u32 mask;
        int ret;
 
+       if (pci_is_root_bus(adev->pdev->bus))
+               return;
+
        if (amdgpu_pcie_gen2 == 0)
                return;
 
index 488642f08267902c628c9db541030c6a27cb2def..3b47ae313e36bc7a0385365c082a519207fd7470 100644 (file)
 
 #include "cgs_common.h"
 
-/**
- * cgs_import_gpu_mem() - Import dmabuf handle
- * @cgs_device:  opaque device handle
- * @dmabuf_fd:   DMABuf file descriptor
- * @handle:      memory handle (output)
- *
- * Must be called in the process context that dmabuf_fd belongs to.
- *
- * Return:  0 on success, -errno otherwise
- */
-typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd,
-                                   cgs_handle_t *handle);
-
 /**
  * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources
  * @private_data:  private data provided to cgs_add_irq_source
@@ -114,16 +101,12 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
 typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type);
 
 struct cgs_os_ops {
-       cgs_import_gpu_mem_t import_gpu_mem;
-
        /* IRQ handling */
        cgs_add_irq_source_t add_irq_source;
        cgs_irq_get_t irq_get;
        cgs_irq_put_t irq_put;
 };
 
-#define cgs_import_gpu_mem(dev,dmabuf_fd,handle)               \
-       CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle)
 #define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \
        CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler,    \
                    private_data)
index e23df5fd3836b1b70169a1ee125782c1e754b875..5bca390d9ae26022df1be85d008ee7107d8aa9b8 100644 (file)
@@ -53,8 +53,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
                                  struct drm_dp_mst_port *port,
                                  int offset, int size, u8 *bytes);
 
-static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
-                                   struct drm_dp_mst_branch *mstb);
+static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+                                    struct drm_dp_mst_branch *mstb);
 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
                                           struct drm_dp_mst_branch *mstb,
                                           struct drm_dp_mst_port *port);
@@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
        struct drm_dp_mst_port *port, *tmp;
        bool wake_tx = false;
 
-       cancel_work_sync(&mstb->mgr->work);
-
        /*
         * destroy all ports - don't need lock
         * as there are no more references to the mst branch
@@ -863,29 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref)
 {
        struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
        struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+
        if (!port->input) {
                port->vcpi.num_slots = 0;
 
                kfree(port->cached_edid);
 
-               /* we can't destroy the connector here, as
-                  we might be holding the mode_config.mutex
-                  from an EDID retrieval */
+               /*
+                * The only time we don't have a connector
+                * on an output port is if the connector init
+                * fails.
+                */
                if (port->connector) {
+                       /* we can't destroy the connector here, as
+                        * we might be holding the mode_config.mutex
+                        * from an EDID retrieval */
+
                        mutex_lock(&mgr->destroy_connector_lock);
                        list_add(&port->next, &mgr->destroy_connector_list);
                        mutex_unlock(&mgr->destroy_connector_lock);
                        schedule_work(&mgr->destroy_connector_work);
                        return;
                }
+               /* no need to clean up vcpi
+                * as if we have no connector we never setup a vcpi */
                drm_dp_port_teardown_pdt(port, port->pdt);
-
-               if (!port->input && port->vcpi.vcpi > 0)
-                       drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
        }
        kfree(port);
-
-       (*mgr->cbs->hotplug)(mgr);
 }
 
 static void drm_dp_put_port(struct drm_dp_mst_port *port)
@@ -1027,8 +1029,8 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
        }
 }
 
-static void build_mst_prop_path(struct drm_dp_mst_port *port,
-                               struct drm_dp_mst_branch *mstb,
+static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
+                               int pnum,
                                char *proppath,
                                size_t proppath_size)
 {
@@ -1041,7 +1043,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port,
                snprintf(temp, sizeof(temp), "-%d", port_num);
                strlcat(proppath, temp, proppath_size);
        }
-       snprintf(temp, sizeof(temp), "-%d", port->port_num);
+       snprintf(temp, sizeof(temp), "-%d", pnum);
        strlcat(proppath, temp, proppath_size);
 }
 
@@ -1105,22 +1107,32 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
                drm_dp_port_teardown_pdt(port, old_pdt);
 
                ret = drm_dp_port_setup_pdt(port);
-               if (ret == true) {
+               if (ret == true)
                        drm_dp_send_link_address(mstb->mgr, port->mstb);
-                       port->mstb->link_address_sent = true;
-               }
        }
 
        if (created && !port->input) {
                char proppath[255];
-               build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
-               port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
 
-               if (port->port_num >= 8) {
+               build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
+               port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+               if (!port->connector) {
+                       /* remove it from the port list */
+                       mutex_lock(&mstb->mgr->lock);
+                       list_del(&port->next);
+                       mutex_unlock(&mstb->mgr->lock);
+                       /* drop port list reference */
+                       drm_dp_put_port(port);
+                       goto out;
+               }
+               if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
                        port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+                       drm_mode_connector_set_tile_property(port->connector);
                }
+               (*mstb->mgr->cbs->register_connector)(port->connector);
        }
 
+out:
        /* put reference to this port */
        drm_dp_put_port(port);
 }
@@ -1202,10 +1214,9 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
 {
        struct drm_dp_mst_port *port;
        struct drm_dp_mst_branch *mstb_child;
-       if (!mstb->link_address_sent) {
+       if (!mstb->link_address_sent)
                drm_dp_send_link_address(mgr, mstb);
-               mstb->link_address_sent = true;
-       }
+
        list_for_each_entry(port, &mstb->ports, next) {
                if (port->input)
                        continue;
@@ -1458,8 +1469,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
        mutex_unlock(&mgr->qlock);
 }
 
-static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
-                                   struct drm_dp_mst_branch *mstb)
+static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+                                    struct drm_dp_mst_branch *mstb)
 {
        int len;
        struct drm_dp_sideband_msg_tx *txmsg;
@@ -1467,11 +1478,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
 
        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
        if (!txmsg)
-               return -ENOMEM;
+               return;
 
        txmsg->dst = mstb;
        len = build_link_address(txmsg);
 
+       mstb->link_address_sent = true;
        drm_dp_queue_down_tx(mgr, txmsg);
 
        ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
@@ -1499,11 +1511,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
                        }
                        (*mgr->cbs->hotplug)(mgr);
                }
-       } else
+       } else {
+               mstb->link_address_sent = false;
                DRM_DEBUG_KMS("link address failed %d\n", ret);
+       }
 
        kfree(txmsg);
-       return 0;
 }
 
 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
@@ -1978,6 +1991,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
        drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
                           DP_MST_EN | DP_UPSTREAM_IS_SRC);
        mutex_unlock(&mgr->lock);
+       flush_work(&mgr->work);
+       flush_work(&mgr->destroy_connector_work);
 }
 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
 
@@ -2263,10 +2278,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
 
        if (port->cached_edid)
                edid = drm_edid_duplicate(port->cached_edid);
-       else
+       else {
                edid = drm_get_edid(connector, &port->aux.ddc);
-
-       drm_mode_connector_set_tile_property(connector);
+               drm_mode_connector_set_tile_property(connector);
+       }
        drm_dp_put_port(port);
        return edid;
 }
@@ -2671,7 +2686,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
 {
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
        struct drm_dp_mst_port *port;
-
+       bool send_hotplug = false;
        /*
         * Not a regular list traverse as we have to drop the destroy
         * connector lock before destroying the connector, to avoid AB->BA
@@ -2694,7 +2709,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
                if (!port->input && port->vcpi.vcpi > 0)
                        drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
                kfree(port);
+               send_hotplug = true;
        }
+       if (send_hotplug)
+               (*mgr->cbs->hotplug)(mgr);
 }
 
 /**
@@ -2747,6 +2765,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
  */
 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
 {
+       flush_work(&mgr->work);
        flush_work(&mgr->destroy_connector_work);
        mutex_lock(&mgr->payload_lock);
        kfree(mgr->payloads);
@@ -2782,12 +2801,13 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
        if (msgs[num - 1].flags & I2C_M_RD)
                reading = true;
 
-       if (!reading) {
+       if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
                DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
                ret = -EIO;
                goto out;
        }
 
+       memset(&msg, 0, sizeof(msg));
        msg.req_type = DP_REMOTE_I2C_READ;
        msg.u.i2c_read.num_transactions = num - 1;
        msg.u.i2c_read.port_number = port->port_num;
index 418d299f3b129b307f86a970fdf49d3a826af4c8..ca08c472311bd3f6238f7513bc4ac26737228884 100644 (file)
@@ -345,7 +345,11 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
                struct drm_crtc *crtc = mode_set->crtc;
                int ret;
 
-               if (crtc->funcs->cursor_set) {
+               if (crtc->funcs->cursor_set2) {
+                       ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
+                       if (ret)
+                               error = true;
+               } else if (crtc->funcs->cursor_set) {
                        ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
                        if (ret)
                                error = true;
index d734780b31c0fdcd67d2d622333b0a78c8098e10..a18164f2f6d28290c09462dd7a755168873a42d3 100644 (file)
@@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
 }
 
 #define DRM_OUTPUT_POLL_PERIOD (10*HZ)
-static void __drm_kms_helper_poll_enable(struct drm_device *dev)
+/**
+ * drm_kms_helper_poll_enable_locked - re-enable output polling.
+ * @dev: drm_device
+ *
+ * This function re-enables the output polling work without
+ * locking the mode_config mutex.
+ *
+ * This is like drm_kms_helper_poll_enable() however it is to be
+ * called from a context where the mode_config mutex is locked
+ * already.
+ */
+void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
 {
        bool poll = false;
        struct drm_connector *connector;
@@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev)
        if (poll)
                schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
 }
+EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
+
 
 static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
                                                              uint32_t maxX, uint32_t maxY, bool merge_type_bits)
@@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
 
        /* Re-enable polling in case the global poll config changed. */
        if (drm_kms_helper_poll != dev->mode_config.poll_running)
-               __drm_kms_helper_poll_enable(dev);
+               drm_kms_helper_poll_enable_locked(dev);
 
        dev->mode_config.poll_running = drm_kms_helper_poll;
 
@@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable);
 void drm_kms_helper_poll_enable(struct drm_device *dev)
 {
        mutex_lock(&dev->mode_config.mutex);
-       __drm_kms_helper_poll_enable(dev);
+       drm_kms_helper_poll_enable_locked(dev);
        mutex_unlock(&dev->mode_config.mutex);
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_enable);
index 0f6cd33b531f104f5094513a0992eb99361e65b3..684bd4a138439ef254f7123c66ffde989fede279 100644 (file)
@@ -235,18 +235,12 @@ static ssize_t dpms_show(struct device *device,
                           char *buf)
 {
        struct drm_connector *connector = to_drm_connector(device);
-       struct drm_device *dev = connector->dev;
-       uint64_t dpms_status;
-       int ret;
+       int dpms;
 
-       ret = drm_object_property_get_value(&connector->base,
-                                           dev->mode_config.dpms_property,
-                                           &dpms_status);
-       if (ret)
-               return 0;
+       dpms = READ_ONCE(connector->dpms);
 
        return snprintf(buf, PAGE_SIZE, "%s\n",
-                       drm_get_dpms_name((int)dpms_status));
+                       drm_get_dpms_name(dpms));
 }
 
 static ssize_t enabled_show(struct device *device,
index cbdb78ef3baca57cd1ddf701425b953f140377f2..e6cbaca821a47b9740f4d35d7cfce00bbd0aa11a 100644 (file)
@@ -37,7 +37,6 @@
  * DECON stands for Display and Enhancement controller.
  */
 
-#define DECON_DEFAULT_FRAMERATE 60
 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128
 
 #define WINDOWS_NR     2
@@ -165,16 +164,6 @@ static u32 decon_calc_clkdiv(struct decon_context *ctx,
        return (clkdiv < 0x100) ? clkdiv : 0xff;
 }
 
-static bool decon_mode_fixup(struct exynos_drm_crtc *crtc,
-               const struct drm_display_mode *mode,
-               struct drm_display_mode *adjusted_mode)
-{
-       if (adjusted_mode->vrefresh == 0)
-               adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE;
-
-       return true;
-}
-
 static void decon_commit(struct exynos_drm_crtc *crtc)
 {
        struct decon_context *ctx = crtc->ctx;
@@ -637,7 +626,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
 static const struct exynos_drm_crtc_ops decon_crtc_ops = {
        .enable = decon_enable,
        .disable = decon_disable,
-       .mode_fixup = decon_mode_fixup,
        .commit = decon_commit,
        .enable_vblank = decon_enable_vblank,
        .disable_vblank = decon_disable_vblank,
index d66ade0efac892b84ce7e26c4f9e132870806d33..124fb9a56f02b596b5a6c4cf6cbf3f28151f4470 100644 (file)
@@ -1383,28 +1383,6 @@ static int exynos_dp_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int exynos_dp_suspend(struct device *dev)
-{
-       struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
-       exynos_dp_disable(&dp->encoder);
-       return 0;
-}
-
-static int exynos_dp_resume(struct device *dev)
-{
-       struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
-       exynos_dp_enable(&dp->encoder);
-       return 0;
-}
-#endif
-
-static const struct dev_pm_ops exynos_dp_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume)
-};
-
 static const struct of_device_id exynos_dp_match[] = {
        { .compatible = "samsung,exynos5-dp" },
        {},
@@ -1417,7 +1395,6 @@ struct platform_driver dp_driver = {
        .driver         = {
                .name   = "exynos-dp",
                .owner  = THIS_MODULE,
-               .pm     = &exynos_dp_pm_ops,
                .of_match_table = exynos_dp_match,
        },
 };
index c68a6a2a9b5794558015abdeefb0e58c4958049d..7f55ba6771c6b94e5f45bee6bdec078c27c74f5b 100644 (file)
@@ -28,7 +28,6 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
 
 int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
 {
@@ -39,7 +38,6 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
 
 int exynos_drm_device_subdrv_probe(struct drm_device *dev)
 {
@@ -69,7 +67,6 @@ int exynos_drm_device_subdrv_probe(struct drm_device *dev)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe);
 
 int exynos_drm_device_subdrv_remove(struct drm_device *dev)
 {
@@ -87,7 +84,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove);
 
 int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
 {
@@ -111,7 +107,6 @@ err:
        }
        return ret;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open);
 
 void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
 {
@@ -122,4 +117,3 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
                        subdrv->close(dev, subdrv->dev, file);
        }
 }
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
index 0872aa2f450f273a992bc414081e8501e11bf787..ed28823d3b35ef704a5dded0c1c55c1eff6ef3ed 100644 (file)
@@ -41,20 +41,6 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
                exynos_crtc->ops->disable(exynos_crtc);
 }
 
-static bool
-exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
-                           const struct drm_display_mode *mode,
-                           struct drm_display_mode *adjusted_mode)
-{
-       struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-
-       if (exynos_crtc->ops->mode_fixup)
-               return exynos_crtc->ops->mode_fixup(exynos_crtc, mode,
-                                                   adjusted_mode);
-
-       return true;
-}
-
 static void
 exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
@@ -99,7 +85,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
 static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
        .enable         = exynos_drm_crtc_enable,
        .disable        = exynos_drm_crtc_disable,
-       .mode_fixup     = exynos_drm_crtc_mode_fixup,
        .mode_set_nofb  = exynos_drm_crtc_mode_set_nofb,
        .atomic_begin   = exynos_crtc_atomic_begin,
        .atomic_flush   = exynos_crtc_atomic_flush,
index 831d2e4cacf9d0bb951f5bbd9d7bbfab4b681c91..ae9e6b2d3758a97104ac6be69f1a970e6c0f3bb6 100644 (file)
@@ -304,6 +304,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
 {
        struct drm_connector *connector;
@@ -340,6 +341,7 @@ static int exynos_drm_resume(struct drm_device *dev)
 
        return 0;
 }
+#endif
 
 static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
 {
index b7ba21dfb69641f36410550711f024fde541bb72..6c717ba672dbc8ad41fcc3adedfc82c32c6de5bc 100644 (file)
@@ -82,7 +82,6 @@ struct exynos_drm_plane {
  *
  * @enable: enable the device
  * @disable: disable the device
- * @mode_fixup: fix mode data before applying it
  * @commit: set current hw specific display mode to hw.
  * @enable_vblank: specific driver callback for enabling vblank interrupt.
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
@@ -103,9 +102,6 @@ struct exynos_drm_crtc;
 struct exynos_drm_crtc_ops {
        void (*enable)(struct exynos_drm_crtc *crtc);
        void (*disable)(struct exynos_drm_crtc *crtc);
-       bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
-                               const struct drm_display_mode *mode,
-                               struct drm_display_mode *adjusted_mode);
        void (*commit)(struct exynos_drm_crtc *crtc);
        int (*enable_vblank)(struct exynos_drm_crtc *crtc);
        void (*disable_vblank)(struct exynos_drm_crtc *crtc);
index 2a652359af644b51f257cde7528d70b6016897da..dd3a5e6d58c8f04c43fb8afd7c7b6243f7312761 100644 (file)
@@ -1206,23 +1206,6 @@ static struct exynos_drm_ipp_ops fimc_dst_ops = {
        .set_addr = fimc_dst_set_addr,
 };
 
-static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
-{
-       DRM_DEBUG_KMS("enable[%d]\n", enable);
-
-       if (enable) {
-               clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
-               clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
-               ctx->suspended = false;
-       } else {
-               clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
-               clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
-               ctx->suspended = true;
-       }
-
-       return 0;
-}
-
 static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
 {
        struct fimc_context *ctx = dev_id;
@@ -1780,6 +1763,24 @@ static int fimc_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+       DRM_DEBUG_KMS("enable[%d]\n", enable);
+
+       if (enable) {
+               clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
+               clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
+               ctx->suspended = false;
+       } else {
+               clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
+               clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
+               ctx->suspended = true;
+       }
+
+       return 0;
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int fimc_suspend(struct device *dev)
 {
@@ -1806,7 +1807,6 @@ static int fimc_resume(struct device *dev)
 }
 #endif
 
-#ifdef CONFIG_PM
 static int fimc_runtime_suspend(struct device *dev)
 {
        struct fimc_context *ctx = get_fimc_context(dev);
index 750a9e6b9e8d92c312e3bb685fb524f249ad0488..3d1aba67758baf4a4e25e4c383e300d5a6dd954e 100644 (file)
@@ -41,7 +41,6 @@
  * CPU Interface.
  */
 
-#define FIMD_DEFAULT_FRAMERATE 60
 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128
 
 /* position control register for hardware window 0, 2 ~ 4.*/
@@ -377,16 +376,6 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
        return (clkdiv < 0x100) ? clkdiv : 0xff;
 }
 
-static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc,
-               const struct drm_display_mode *mode,
-               struct drm_display_mode *adjusted_mode)
-{
-       if (adjusted_mode->vrefresh == 0)
-               adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE;
-
-       return true;
-}
-
 static void fimd_commit(struct exynos_drm_crtc *crtc)
 {
        struct fimd_context *ctx = crtc->ctx;
@@ -882,13 +871,12 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
                return;
 
        val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
-       writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
+       writel(val, ctx->regs + DP_MIE_CLKCON);
 }
 
 static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
        .enable = fimd_enable,
        .disable = fimd_disable,
-       .mode_fixup = fimd_mode_fixup,
        .commit = fimd_commit,
        .enable_vblank = fimd_enable_vblank,
        .disable_vblank = fimd_disable_vblank,
index 3734c34aed16a22938509454cf794e5b4069ac35..c17efdb238a6e24f6fcecac58c395b8964b12703 100644 (file)
@@ -1059,7 +1059,6 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
 
 int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
                                 struct drm_file *file)
@@ -1230,7 +1229,6 @@ err:
        g2d_put_cmdlist(g2d, node);
        return ret;
 }
-EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
 
 int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
                          struct drm_file *file)
@@ -1293,7 +1291,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
 out:
        return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
 
 static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
 {
index f12fbc36b120065902c50253a4e91e9cc8952df5..407afedb60031a00f7f0cc5cb599cf7b3e57a9b8 100644 (file)
@@ -56,39 +56,35 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
        nr_pages = obj->size >> PAGE_SHIFT;
 
        if (!is_drm_iommu_supported(dev)) {
-               dma_addr_t start_addr;
-               unsigned int i = 0;
-
                obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
                if (!obj->pages) {
                        DRM_ERROR("failed to allocate pages.\n");
                        return -ENOMEM;
                }
+       }
 
-               obj->cookie = dma_alloc_attrs(dev->dev,
-                                       obj->size,
-                                       &obj->dma_addr, GFP_KERNEL,
-                                       &obj->dma_attrs);
-               if (!obj->cookie) {
-                       DRM_ERROR("failed to allocate buffer.\n");
+       obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr,
+                                     GFP_KERNEL, &obj->dma_attrs);
+       if (!obj->cookie) {
+               DRM_ERROR("failed to allocate buffer.\n");
+               if (obj->pages)
                        drm_free_large(obj->pages);
-                       return -ENOMEM;
-               }
+               return -ENOMEM;
+       }
+
+       if (obj->pages) {
+               dma_addr_t start_addr;
+               unsigned int i = 0;
 
                start_addr = obj->dma_addr;
                while (i < nr_pages) {
-                       obj->pages[i] = phys_to_page(start_addr);
+                       obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev,
+                                                              start_addr));
                        start_addr += PAGE_SIZE;
                        i++;
                }
        } else {
-               obj->pages = dma_alloc_attrs(dev->dev, obj->size,
-                                       &obj->dma_addr, GFP_KERNEL,
-                                       &obj->dma_attrs);
-               if (!obj->pages) {
-                       DRM_ERROR("failed to allocate buffer.\n");
-                       return -ENOMEM;
-               }
+               obj->pages = obj->cookie;
        }
 
        DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
@@ -110,15 +106,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
        DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
                        (unsigned long)obj->dma_addr, obj->size);
 
-       if (!is_drm_iommu_supported(dev)) {
-               dma_free_attrs(dev->dev, obj->size, obj->cookie,
-                               (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
-               drm_free_large(obj->pages);
-       } else
-               dma_free_attrs(dev->dev, obj->size, obj->pages,
-                               (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
+       dma_free_attrs(dev->dev, obj->size, obj->cookie,
+                       (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
 
-       obj->dma_addr = (dma_addr_t)NULL;
+       if (!is_drm_iommu_supported(dev))
+               drm_free_large(obj->pages);
 }
 
 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -156,18 +148,14 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
         * once dmabuf's refcount becomes 0.
         */
        if (obj->import_attach)
-               goto out;
-
-       exynos_drm_free_buf(exynos_gem_obj);
-
-out:
-       drm_gem_free_mmap_offset(obj);
+               drm_prime_gem_destroy(obj, exynos_gem_obj->sgt);
+       else
+               exynos_drm_free_buf(exynos_gem_obj);
 
        /* release file pointer to gem object. */
        drm_gem_object_release(obj);
 
        kfree(exynos_gem_obj);
-       exynos_gem_obj = NULL;
 }
 
 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
@@ -190,8 +178,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
        return exynos_gem_obj->size;
 }
 
-
-struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
                                                      unsigned long size)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
@@ -212,6 +199,13 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
                return ERR_PTR(ret);
        }
 
+       ret = drm_gem_create_mmap_offset(obj);
+       if (ret < 0) {
+               drm_gem_object_release(obj);
+               kfree(exynos_gem_obj);
+               return ERR_PTR(ret);
+       }
+
        DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
 
        return exynos_gem_obj;
@@ -313,7 +307,7 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
        drm_gem_object_unreference_unlocked(obj);
 }
 
-int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
+static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
                                      struct vm_area_struct *vma)
 {
        struct drm_device *drm_dev = exynos_gem_obj->base.dev;
@@ -342,7 +336,8 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
 
 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
                                      struct drm_file *file_priv)
-{      struct exynos_drm_gem_obj *exynos_gem_obj;
+{
+       struct exynos_drm_gem_obj *exynos_gem_obj;
        struct drm_exynos_gem_info *args = data;
        struct drm_gem_object *obj;
 
@@ -402,6 +397,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
                               struct drm_mode_create_dumb *args)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
+       unsigned int flags;
        int ret;
 
        /*
@@ -413,16 +409,12 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
        args->pitch = args->width * ((args->bpp + 7) / 8);
        args->size = args->pitch * args->height;
 
-       if (is_drm_iommu_supported(dev)) {
-               exynos_gem_obj = exynos_drm_gem_create(dev,
-                       EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
-                       args->size);
-       } else {
-               exynos_gem_obj = exynos_drm_gem_create(dev,
-                       EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
-                       args->size);
-       }
+       if (is_drm_iommu_supported(dev))
+               flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
+       else
+               flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
 
+       exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size);
        if (IS_ERR(exynos_gem_obj)) {
                dev_warn(dev->dev, "FB allocation failed.\n");
                return PTR_ERR(exynos_gem_obj);
@@ -460,14 +452,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
                goto unlock;
        }
 
-       ret = drm_gem_create_mmap_offset(obj);
-       if (ret)
-               goto out;
-
        *offset = drm_vma_node_offset_addr(&obj->vma_node);
        DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
 
-out:
        drm_gem_object_unreference(obj);
 unlock:
        mutex_unlock(&dev->struct_mutex);
@@ -543,7 +530,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 
 err_close_vm:
        drm_gem_vm_close(vma);
-       drm_gem_free_mmap_offset(obj);
 
        return ret;
 }
@@ -588,6 +574,8 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
        if (ret < 0)
                goto err_free_large;
 
+       exynos_gem_obj->sgt = sgt;
+
        if (sgt->nents == 1) {
                /* always physically continuous memory if sgt->nents is 1. */
                exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
index cd62f8410d1e5d86f6dd221aebf924be1fe71db9..b62d1007c0e05f88e4cfc38970bb7baa9a87fe7a 100644 (file)
@@ -39,6 +39,7 @@
  *     - this address could be physical address without IOMMU and
  *     device address with IOMMU.
  * @pages: Array of backing pages.
+ * @sgt: Imported sg_table.
  *
  * P.S. this object would be transferred to user as kms_bo.handle so
  *     user can access the buffer through kms_bo.handle.
@@ -52,6 +53,7 @@ struct exynos_drm_gem_obj {
        dma_addr_t              dma_addr;
        struct dma_attrs        dma_attrs;
        struct page             **pages;
+       struct sg_table         *sgt;
 };
 
 struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
@@ -59,10 +61,6 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
 /* destroy a buffer with gem object */
 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
 
-/* create a private gem object and initialize it. */
-struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
-                                                     unsigned long size);
-
 /* create a new buffer with gem object */
 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
                                                unsigned int flags,
index 425e7062538812c0613c055b9b4fdeea709c4be0..2f5c118f4c8ef5ea27a68532756ca77549e325f4 100644 (file)
@@ -786,6 +786,7 @@ static int rotator_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM
 static int rotator_clk_crtl(struct rot_context *rot, bool enable)
 {
        if (enable) {
@@ -822,7 +823,6 @@ static int rotator_resume(struct device *dev)
 }
 #endif
 
-#ifdef CONFIG_PM
 static int rotator_runtime_suspend(struct device *dev)
 {
        struct rot_context *rot = dev_get_drvdata(dev);
index 3e4be5a3becdddf9fd2a23e6be26f02da90a28f2..6ade068884328680ffe024dd91eabb9ffe6d9013 100644 (file)
@@ -462,11 +462,17 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
        drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
 
        drm_mode_connector_set_path_property(connector, pathprop);
+       return connector;
+}
+
+static void intel_dp_register_mst_connector(struct drm_connector *connector)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct drm_device *dev = connector->dev;
        drm_modeset_lock_all(dev);
        intel_connector_add_to_fbdev(intel_connector);
        drm_modeset_unlock_all(dev);
        drm_connector_register(&intel_connector->base);
-       return connector;
 }
 
 static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@ -512,6 +518,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
 
 static struct drm_dp_mst_topology_cbs mst_cbs = {
        .add_connector = intel_dp_add_mst_connector,
+       .register_connector = intel_dp_register_mst_connector,
        .destroy_connector = intel_dp_destroy_mst_connector,
        .hotplug = intel_dp_mst_hotplug,
 };
index 53c0173a39fe182d5d2e50ac2ffc6637f77526fd..b17785719598c9ca867340dc77aaed858f0c72db 100644 (file)
@@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
 
        /* Enable polling and queue hotplug re-enabling. */
        if (hpd_disabled) {
-               drm_kms_helper_poll_enable(dev);
+               drm_kms_helper_poll_enable_locked(dev);
                mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
                                 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
        }
index 72e0edd7bbde77d3b12812bead2a3f676589385a..7412caedcf7f98a2a5e494c41e2bad97f34d4e34 100644 (file)
@@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
        status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
 
        read_pointer = ring->next_context_status_buffer;
-       write_pointer = status_pointer & 0x07;
+       write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
        if (read_pointer > write_pointer)
-               write_pointer += 6;
+               write_pointer += GEN8_CSB_ENTRIES;
 
        spin_lock(&ring->execlist_lock);
 
        while (read_pointer < write_pointer) {
                read_pointer++;
                status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
-                               (read_pointer % 6) * 8);
+                               (read_pointer % GEN8_CSB_ENTRIES) * 8);
                status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
-                               (read_pointer % 6) * 8 + 4);
+                               (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4);
 
                if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
                        continue;
@@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
        spin_unlock(&ring->execlist_lock);
 
        WARN(submit_contexts > 2, "More than two context complete events?\n");
-       ring->next_context_status_buffer = write_pointer % 6;
+       ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
 
        I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
-                  _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8));
+                  _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
+                                ((u32)ring->next_context_status_buffer &
+                                 GEN8_CSB_PTR_MASK) << 8));
 }
 
 static int execlists_context_queue(struct drm_i915_gem_request *request)
@@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u8 next_context_status_buffer_hw;
 
        I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
        I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
@@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
                   _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
                   _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
        POSTING_READ(RING_MODE_GEN7(ring));
-       ring->next_context_status_buffer = 0;
+
+       /*
+        * Instead of resetting the Context Status Buffer (CSB) read pointer to
+        * zero, we need to read the write pointer from hardware and use its
+        * value because "this register is power context save restored".
+        * Effectively, these states have been observed:
+        *
+        *      | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
+        * BDW  | CSB regs not reset       | CSB regs reset       |
+        * CHT  | CSB regs not reset       | CSB regs not reset   |
+        */
+       next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
+                                                  & GEN8_CSB_PTR_MASK);
+
+       /*
+        * When the CSB registers are reset (also after power-up / gpu reset),
+        * CSB write pointer is set to all 1's, which is not valid, use '5' in
+        * this special case, so the first element read is CSB[0].
+        */
+       if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
+               next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
+
+       ring->next_context_status_buffer = next_context_status_buffer_hw;
        DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
 
        memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
index 64f89f9982a20f745cb41ef5950cd637437000ca..3c63bb32ad81c657e418b1b7143ed934d036c66f 100644 (file)
@@ -25,6 +25,8 @@
 #define _INTEL_LRC_H_
 
 #define GEN8_LR_CONTEXT_ALIGN 4096
+#define GEN8_CSB_ENTRIES 6
+#define GEN8_CSB_PTR_MASK 0x07
 
 /* Execlists regs */
 #define RING_ELSP(ring)                        ((ring)->mmio_base+0x230)
index af7fdb3bd663aef062a5cd41a2cdacbb4492515d..7401cf90b0dbcd1eb335e0c6defc42d22c9bb631 100644 (file)
@@ -246,7 +246,8 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
        }
 
        if (power_well->data == SKL_DISP_PW_1) {
-               intel_prepare_ddi(dev);
+               if (!dev_priv->power_domains.initializing)
+                       intel_prepare_ddi(dev);
                gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
        }
 }
index cc6c228e11c83566d1ac1a2c59fcefa959345463..e905c00acf1a37baef92d66a6f38b888372b9834 100644 (file)
@@ -469,9 +469,13 @@ nouveau_display_create(struct drm_device *dev)
        if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
                dev->mode_config.max_width = 4096;
                dev->mode_config.max_height = 4096;
-       } else {
+       } else
+       if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) {
                dev->mode_config.max_width = 8192;
                dev->mode_config.max_height = 8192;
+       } else {
+               dev->mode_config.max_width = 16384;
+               dev->mode_config.max_height = 16384;
        }
 
        dev->mode_config.preferred_depth = 24;
index 2791701685dc82bf4e2655ce3ea8ea6c3b278e49..59f27e774acb5e9c98c9854bd72195efdbec3a48 100644 (file)
@@ -178,8 +178,30 @@ nouveau_fbcon_sync(struct fb_info *info)
        return 0;
 }
 
+static int
+nouveau_fbcon_open(struct fb_info *info, int user)
+{
+       struct nouveau_fbdev *fbcon = info->par;
+       struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+       int ret = pm_runtime_get_sync(drm->dev->dev);
+       if (ret < 0 && ret != -EACCES)
+               return ret;
+       return 0;
+}
+
+static int
+nouveau_fbcon_release(struct fb_info *info, int user)
+{
+       struct nouveau_fbdev *fbcon = info->par;
+       struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+       pm_runtime_put(drm->dev->dev);
+       return 0;
+}
+
 static struct fb_ops nouveau_fbcon_ops = {
        .owner = THIS_MODULE,
+       .fb_open = nouveau_fbcon_open,
+       .fb_release = nouveau_fbcon_release,
        .fb_check_var = drm_fb_helper_check_var,
        .fb_set_par = drm_fb_helper_set_par,
        .fb_fillrect = nouveau_fbcon_fillrect,
@@ -195,6 +217,8 @@ static struct fb_ops nouveau_fbcon_ops = {
 
 static struct fb_ops nouveau_fbcon_sw_ops = {
        .owner = THIS_MODULE,
+       .fb_open = nouveau_fbcon_open,
+       .fb_release = nouveau_fbcon_release,
        .fb_check_var = drm_fb_helper_check_var,
        .fb_set_par = drm_fb_helper_set_par,
        .fb_fillrect = drm_fb_helper_cfb_fillrect,
index 65af31441e9c29496647084d927c11f393974653..a7d69ce7abc1ad33b5fd283a5f4b5321feb89f18 100644 (file)
@@ -267,6 +267,12 @@ init_i2c(struct nvbios_init *init, int index)
                index = NVKM_I2C_BUS_PRI;
                if (init->outp && init->outp->i2c_upper_default)
                        index = NVKM_I2C_BUS_SEC;
+       } else
+       if (index == 0x80) {
+               index = NVKM_I2C_BUS_PRI;
+       } else
+       if (index == 0x81) {
+               index = NVKM_I2C_BUS_SEC;
        }
 
        bus = nvkm_i2c_bus_find(i2c, index);
index e0ec2a6b7b795c964e119eae2dfed644d24e4ae2..212800ecdce99e4eb1a3a23ebdab9c207cd860da 100644 (file)
@@ -8,7 +8,10 @@ struct nvbios_source {
        void *(*init)(struct nvkm_bios *, const char *);
        void  (*fini)(void *);
        u32   (*read)(void *, u32 offset, u32 length, struct nvkm_bios *);
+       u32   (*size)(void *);
        bool rw;
+       bool ignore_checksum;
+       bool no_pcir;
 };
 
 int nvbios_extend(struct nvkm_bios *, u32 length);
index 792f017525f689bb1d38b86c0bf2e746f9495c8d..b2557e87afdd6d0e95910b3b4b91e37ce9a3e269 100644 (file)
@@ -45,7 +45,7 @@ shadow_fetch(struct nvkm_bios *bios, struct shadow *mthd, u32 upto)
                u32 read = mthd->func->read(data, start, limit - start, bios);
                bios->size = start + read;
        }
-       return bios->size >= limit;
+       return bios->size >= upto;
 }
 
 static int
@@ -55,14 +55,22 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
        struct nvbios_image image;
        int score = 1;
 
-       if (!shadow_fetch(bios, mthd, offset + 0x1000)) {
-               nvkm_debug(subdev, "%08x: header fetch failed\n", offset);
-               return 0;
-       }
+       if (mthd->func->no_pcir) {
+               image.base = 0;
+               image.type = 0;
+               image.size = mthd->func->size(mthd->data);
+               image.last = 1;
+       } else {
+               if (!shadow_fetch(bios, mthd, offset + 0x1000)) {
+                       nvkm_debug(subdev, "%08x: header fetch failed\n",
+                                  offset);
+                       return 0;
+               }
 
-       if (!nvbios_image(bios, idx, &image)) {
-               nvkm_debug(subdev, "image %d invalid\n", idx);
-               return 0;
+               if (!nvbios_image(bios, idx, &image)) {
+                       nvkm_debug(subdev, "image %d invalid\n", idx);
+                       return 0;
+               }
        }
        nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
                   image.base, image.type, image.size);
@@ -74,7 +82,8 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
 
        switch (image.type) {
        case 0x00:
-               if (nvbios_checksum(&bios->data[image.base], image.size)) {
+               if (!mthd->func->ignore_checksum &&
+                   nvbios_checksum(&bios->data[image.base], image.size)) {
                        nvkm_debug(subdev, "%08x: checksum failed\n",
                                   image.base);
                        if (mthd->func->rw)
index bd60d7dd09f51a45b70f120597ca38adaf8c102b..4bf486b57101367708bba2b6fe4bdd1d985f1d19 100644 (file)
@@ -21,6 +21,7 @@
  *
  */
 #include "priv.h"
+
 #include <core/pci.h>
 
 #if defined(__powerpc__)
@@ -33,17 +34,26 @@ static u32
 of_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
 {
        struct priv *priv = data;
-       if (offset + length <= priv->size) {
+       if (offset < priv->size) {
+               length = min_t(u32, length, priv->size - offset);
                memcpy_fromio(bios->data + offset, priv->data + offset, length);
                return length;
        }
        return 0;
 }
 
+static u32
+of_size(void *data)
+{
+       struct priv *priv = data;
+       return priv->size;
+}
+
 static void *
 of_init(struct nvkm_bios *bios, const char *name)
 {
-       struct pci_dev *pdev = bios->subdev.device->func->pci(bios->subdev.device)->pdev;
+       struct nvkm_device *device = bios->subdev.device;
+       struct pci_dev *pdev = device->func->pci(device)->pdev;
        struct device_node *dn;
        struct priv *priv;
        if (!(dn = pci_device_to_OF_node(pdev)))
@@ -62,7 +72,10 @@ nvbios_of = {
        .init = of_init,
        .fini = (void(*)(void *))kfree,
        .read = of_read,
+       .size = of_size,
        .rw = false,
+       .ignore_checksum = true,
+       .no_pcir = true,
 };
 #else
 const struct nvbios_source
index 814cb51cc87372bd4c18225b16b1401d10285b60..385a90f91ed6a14e394ba1e8b4743d9c38c06412 100644 (file)
@@ -35,6 +35,8 @@ static const struct nvkm_device_agp_quirk
 nvkm_device_agp_quirks[] = {
        /* VIA Apollo PRO133x / GeForce FX 5600 Ultra - fdo#20341 */
        { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
+       /* SiS 761 does not support AGP cards, use PCI mode */
+       { PCI_VENDOR_ID_SI, 0x0761, PCI_ANY_ID, PCI_ANY_ID, 0 },
        {},
 };
 
@@ -137,8 +139,10 @@ nvkm_agp_ctor(struct nvkm_pci *pci)
        while (quirk->hostbridge_vendor) {
                if (info.device->vendor == quirk->hostbridge_vendor &&
                    info.device->device == quirk->hostbridge_device &&
-                   pci->pdev->vendor == quirk->chip_vendor &&
-                   pci->pdev->device == quirk->chip_device) {
+                   (quirk->chip_vendor == (u16)PCI_ANY_ID ||
+                   pci->pdev->vendor == quirk->chip_vendor) &&
+                   (quirk->chip_device == (u16)PCI_ANY_ID ||
+                   pci->pdev->device == quirk->chip_device)) {
                        nvkm_info(subdev, "forcing default agp mode to %dX, "
                                          "use NvAGP=<mode> to override\n",
                                  quirk->mode);
index dd845f82cc24aaa5b46cf5680ffd607d3a8ebb2b..183aea1abebc4afe5ad28f4694bc92ccc788ee8d 100644 (file)
@@ -242,6 +242,10 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
        bo->is_primary = true;
 
        ret = qxl_bo_reserve(bo, false);
+       if (ret)
+               return ret;
+       ret = qxl_bo_pin(bo, bo->type, NULL);
+       qxl_bo_unreserve(bo);
        if (ret)
                return ret;
 
@@ -257,7 +261,11 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
        }
        drm_vblank_put(dev, qcrtc->index);
 
-       qxl_bo_unreserve(bo);
+       ret = qxl_bo_reserve(bo, false);
+       if (!ret) {
+               qxl_bo_unpin(bo);
+               qxl_bo_unreserve(bo);
+       }
 
        return 0;
 }
@@ -618,7 +626,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
                  adjusted_mode->hdisplay,
                  adjusted_mode->vdisplay);
 
-       if (qcrtc->index == 0)
+       if (bo->is_primary == false)
                recreate_primary = true;
 
        if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
index 41c422fee31a02dbc932964bc4686921e533fdd3..c4a552637c9353d70cab76083b7d7786dc436d29 100644 (file)
@@ -144,14 +144,17 @@ static void qxl_dirty_update(struct qxl_fbdev *qfbdev,
 
        spin_lock_irqsave(&qfbdev->dirty.lock, flags);
 
-       if (qfbdev->dirty.y1 < y)
-               y = qfbdev->dirty.y1;
-       if (qfbdev->dirty.y2 > y2)
-               y2 = qfbdev->dirty.y2;
-       if (qfbdev->dirty.x1 < x)
-               x = qfbdev->dirty.x1;
-       if (qfbdev->dirty.x2 > x2)
-               x2 = qfbdev->dirty.x2;
+       if ((qfbdev->dirty.y2 - qfbdev->dirty.y1) &&
+           (qfbdev->dirty.x2 - qfbdev->dirty.x1)) {
+               if (qfbdev->dirty.y1 < y)
+                       y = qfbdev->dirty.y1;
+               if (qfbdev->dirty.y2 > y2)
+                       y2 = qfbdev->dirty.y2;
+               if (qfbdev->dirty.x1 < x)
+                       x = qfbdev->dirty.x1;
+               if (qfbdev->dirty.x2 > x2)
+                       x2 = qfbdev->dirty.x2;
+       }
 
        qfbdev->dirty.x1 = x;
        qfbdev->dirty.x2 = x2;
index b66ec331c17cd51f1b81022ebd29d18944258b43..4efa8e261baf59546ca24eb39920bc4159358ab7 100644 (file)
@@ -307,7 +307,7 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
                idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
                if (idr_ret < 0)
                        return idr_ret;
-               bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
+               bo = to_qxl_bo(entry->tv.bo);
 
                (*release)->release_offset = create_rel->release_offset + 64;
 
@@ -316,8 +316,6 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
                info = qxl_release_map(qdev, *release);
                info->id = idr_ret;
                qxl_release_unmap(qdev, *release, info);
-
-               qxl_bo_unref(&bo);
                return 0;
        }
 
index c3872598b85a3856787b1bf0b7113633a468e020..65adb9c723772d9f0011573d320182823b18d37a 100644 (file)
@@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
                } else
                        atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
                if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-                       args.ucAction = ATOM_LCD_BLON;
-                       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                       struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+                       atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
                }
                break;
        case DRM_MODE_DPMS_STANDBY:
@@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
                                atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
                }
                if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
-                       atombios_dig_transmitter_setup(encoder,
-                                                      ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+                       atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
                if (ext_encoder)
                        atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
                break;
index d2e9e9efc159c053b954aed21840ebe7d91f2739..6743174acdbcd22b5d357d0275f1e564dc653d81 100644 (file)
@@ -1633,18 +1633,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
        radeon_fbdev_init(rdev);
        drm_kms_helper_poll_init(rdev->ddev);
 
-       if (rdev->pm.dpm_enabled) {
-               /* do dpm late init */
-               ret = radeon_pm_late_init(rdev);
-               if (ret) {
-                       rdev->pm.dpm_enabled = false;
-                       DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
-               }
-               /* set the dpm state for PX since there won't be
-                * a modeset to call this.
-                */
-               radeon_pm_compute_clocks(rdev);
-       }
+       /* do pm late init */
+       ret = radeon_pm_late_init(rdev);
 
        return 0;
 }
index 5e09c061847f50c688650d12625a462e8c4737cd..744f5c49c66463c56187dbc2130a77539a264130 100644 (file)
@@ -265,7 +265,6 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
 {
        struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
        struct drm_device *dev = master->base.dev;
-       struct radeon_device *rdev = dev->dev_private;
        struct radeon_connector *radeon_connector;
        struct drm_connector *connector;
 
@@ -284,14 +283,22 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
        radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master);
 
        drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+       drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
        drm_mode_connector_set_path_property(connector, pathprop);
 
+       return connector;
+}
+
+static void radeon_dp_register_mst_connector(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct radeon_device *rdev = dev->dev_private;
+
        drm_modeset_lock_all(dev);
        radeon_fb_add_connector(rdev, connector);
        drm_modeset_unlock_all(dev);
 
        drm_connector_register(connector);
-       return connector;
 }
 
 static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@ -324,6 +331,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
 
 struct drm_dp_mst_topology_cbs mst_cbs = {
        .add_connector = radeon_dp_add_mst_connector,
+       .register_connector = radeon_dp_register_mst_connector,
        .destroy_connector = radeon_dp_destroy_mst_connector,
        .hotplug = radeon_dp_mst_hotplug,
 };
index 7214858ffceaa8c20409206533dcc332fd663705..26da2f4d7b4f56fca3948af07bca9c061bb5ddaf 100644 (file)
@@ -48,40 +48,10 @@ struct radeon_fbdev {
        struct radeon_device *rdev;
 };
 
-/**
- * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev.
- *
- * @info: fbdev info
- *
- * This function hides the cursor on all CRTCs used by fbdev.
- */
-static int radeon_fb_helper_set_par(struct fb_info *info)
-{
-       int ret;
-
-       ret = drm_fb_helper_set_par(info);
-
-       /* XXX: with universal plane support fbdev will automatically disable
-        * all non-primary planes (including the cursor)
-        */
-       if (ret == 0) {
-               struct drm_fb_helper *fb_helper = info->par;
-               int i;
-
-               for (i = 0; i < fb_helper->crtc_count; i++) {
-                       struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc;
-
-                       radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
-               }
-       }
-
-       return ret;
-}
-
 static struct fb_ops radeonfb_ops = {
        .owner = THIS_MODULE,
        .fb_check_var = drm_fb_helper_check_var,
-       .fb_set_par = radeon_fb_helper_set_par,
+       .fb_set_par = drm_fb_helper_set_par,
        .fb_fillrect = drm_fb_helper_cfb_fillrect,
        .fb_copyarea = drm_fb_helper_cfb_copyarea,
        .fb_imageblit = drm_fb_helper_cfb_imageblit,
@@ -427,3 +397,19 @@ void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector
 {
        drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
 }
+
+void radeon_fbdev_restore_mode(struct radeon_device *rdev)
+{
+       struct radeon_fbdev *rfbdev = rdev->mode_info.rfbdev;
+       struct drm_fb_helper *fb_helper;
+       int ret;
+
+       if (!rfbdev)
+               return;
+
+       fb_helper = &rfbdev->helper;
+
+       ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+       if (ret)
+               DRM_DEBUG("failed to restore crtc mode\n");
+}
index 4a119c255ba9709692b234c51a928d826cc22ec2..0e932bf932c11f95a59a57bb3c9126e01a6baf3d 100644 (file)
@@ -598,7 +598,7 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
  * Outdated mess for old drm with Xorg being in charge (void function now).
  */
 /**
- * radeon_driver_firstopen_kms - drm callback for last close
+ * radeon_driver_lastclose_kms - drm callback for last close
  *
  * @dev: drm dev pointer
  *
@@ -606,6 +606,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
  */
 void radeon_driver_lastclose_kms(struct drm_device *dev)
 {
+       struct radeon_device *rdev = dev->dev_private;
+
+       radeon_fbdev_restore_mode(rdev);
        vga_switcheroo_process_delayed_switch();
 }
 
index aecc3e3dec0ca093441e3871df414627b51e92ec..457b026a0972782fc6d777b1069b683c1fda037f 100644 (file)
@@ -980,6 +980,7 @@ int radeon_fbdev_init(struct radeon_device *rdev);
 void radeon_fbdev_fini(struct radeon_device *rdev);
 void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
 bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
+void radeon_fbdev_restore_mode(struct radeon_device *rdev);
 
 void radeon_fb_output_poll_changed(struct radeon_device *rdev);
 
index 05751f3f84449d40457b3f989f0d7ab874935bbf..44489cce7458402cf8a48a6176e3ed4625b603d5 100644 (file)
@@ -1326,14 +1326,6 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
        INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
 
        if (rdev->pm.num_power_states > 1) {
-               /* where's the best place to put these? */
-               ret = device_create_file(rdev->dev, &dev_attr_power_profile);
-               if (ret)
-                       DRM_ERROR("failed to create device file for power profile\n");
-               ret = device_create_file(rdev->dev, &dev_attr_power_method);
-               if (ret)
-                       DRM_ERROR("failed to create device file for power method\n");
-
                if (radeon_debugfs_pm_init(rdev)) {
                        DRM_ERROR("Failed to register debugfs file for PM!\n");
                }
@@ -1391,20 +1383,6 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
                goto dpm_failed;
        rdev->pm.dpm_enabled = true;
 
-       ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
-       if (ret)
-               DRM_ERROR("failed to create device file for dpm state\n");
-       ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
-       if (ret)
-               DRM_ERROR("failed to create device file for dpm state\n");
-       /* XXX: these are noops for dpm but are here for backwards compat */
-       ret = device_create_file(rdev->dev, &dev_attr_power_profile);
-       if (ret)
-               DRM_ERROR("failed to create device file for power profile\n");
-       ret = device_create_file(rdev->dev, &dev_attr_power_method);
-       if (ret)
-               DRM_ERROR("failed to create device file for power method\n");
-
        if (radeon_debugfs_pm_init(rdev)) {
                DRM_ERROR("Failed to register debugfs file for dpm!\n");
        }
@@ -1545,9 +1523,44 @@ int radeon_pm_late_init(struct radeon_device *rdev)
        int ret = 0;
 
        if (rdev->pm.pm_method == PM_METHOD_DPM) {
-               mutex_lock(&rdev->pm.mutex);
-               ret = radeon_dpm_late_enable(rdev);
-               mutex_unlock(&rdev->pm.mutex);
+               if (rdev->pm.dpm_enabled) {
+                       ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
+                       if (ret)
+                               DRM_ERROR("failed to create device file for dpm state\n");
+                       ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
+                       if (ret)
+                               DRM_ERROR("failed to create device file for dpm state\n");
+                       /* XXX: these are noops for dpm but are here for backwards compat */
+                       ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+                       if (ret)
+                               DRM_ERROR("failed to create device file for power profile\n");
+                       ret = device_create_file(rdev->dev, &dev_attr_power_method);
+                       if (ret)
+                               DRM_ERROR("failed to create device file for power method\n");
+
+                       mutex_lock(&rdev->pm.mutex);
+                       ret = radeon_dpm_late_enable(rdev);
+                       mutex_unlock(&rdev->pm.mutex);
+                       if (ret) {
+                               rdev->pm.dpm_enabled = false;
+                               DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+                       } else {
+                               /* set the dpm state for PX since there won't be
+                                * a modeset to call this.
+                                */
+                               radeon_pm_compute_clocks(rdev);
+                       }
+               }
+       } else {
+               if (rdev->pm.num_power_states > 1) {
+                       /* where's the best place to put these? */
+                       ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+                       if (ret)
+                               DRM_ERROR("failed to create device file for power profile\n");
+                       ret = device_create_file(rdev->dev, &dev_attr_power_method);
+                       if (ret)
+                               DRM_ERROR("failed to create device file for power method\n");
+               }
        }
        return ret;
 }
index e9115d3f67b0ca0a34ff68ce564b316895c81939..e72bf46042e0a42f469cbfd8ff285b1ae9abb155 100644 (file)
@@ -2928,6 +2928,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
        { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
        { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
        { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
+       { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
        { 0, 0, 0, 0 },
 };
 
index db8b49101a8b620f742af39c24154933bf3d1ff7..512263919282328cb55505abf2542987c5f9f9cd 100644 (file)
@@ -34,8 +34,8 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
 
-       seq_printf(m, "fence %ld %lld\n",
-                  atomic64_read(&vgdev->fence_drv.last_seq),
+       seq_printf(m, "fence %llu %lld\n",
+                  (u64)atomic64_read(&vgdev->fence_drv.last_seq),
                   vgdev->fence_drv.sync_seq);
        return 0;
 }
index 1da632631dac808e8273fe3aa77a5426950f9156..67097c9ce9c143e2d6ac3534c4379d0f024d4887 100644 (file)
@@ -61,7 +61,7 @@ static void virtio_timeline_value_str(struct fence *f, char *str, int size)
 {
        struct virtio_gpu_fence *fence = to_virtio_fence(f);
 
-       snprintf(str, size, "%lu", atomic64_read(&fence->drv->last_seq));
+       snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq));
 }
 
 static const struct fence_ops virtio_fence_ops = {
index 5ae8f921da2a478bef55b617c3a28f66ed1e2773..8a76821177a6c0c1a3cc9659b4f119943d48b3ce 100644 (file)
@@ -681,6 +681,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
                                         0, 0,
                                         DRM_MM_SEARCH_DEFAULT,
                                         DRM_MM_CREATE_DEFAULT);
+       if (ret) {
+               (void) vmw_cmdbuf_man_process(man);
+               ret = drm_mm_insert_node_generic(&man->mm, info->node,
+                                                info->page_size, 0, 0,
+                                                DRM_MM_SEARCH_DEFAULT,
+                                                DRM_MM_CREATE_DEFAULT);
+       }
+
        spin_unlock_bh(&man->lock);
        info->done = !ret;
 
index 64b50409fa0749558844cf561aac983e36197241..03f63c749c02333f412c82184f20def8ce1d8d74 100644 (file)
@@ -657,7 +657,8 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
        struct vmw_resource *res = &user_srf->srf.res;
 
        *p_base = NULL;
-       ttm_base_object_unref(&user_srf->backup_base);
+       if (user_srf->backup_base)
+               ttm_base_object_unref(&user_srf->backup_base);
        vmw_resource_unreference(&res);
 }
 
index 3dd2de31a2f8d380f71ff61c562a53d8638f9eb5..472b88285c755e5f18d25ba2c935dbdaca449546 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/delay.h>
+#include <linux/dmi.h>
 #include <linux/i2c.h>
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
@@ -51,6 +52,22 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
 }
 
 #ifdef CONFIG_ACPI
+/*
+ * The HCNT/LCNT information coming from ACPI should be the most accurate
+ * for given platform. However, some systems get it wrong. On such systems
+ * we get better results by calculating those based on the input clock.
+ */
+static const struct dmi_system_id dw_i2c_no_acpi_params[] = {
+       {
+               .ident = "Dell Inspiron 7348",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"),
+               },
+       },
+       { }
+};
+
 static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
                               u16 *hcnt, u16 *lcnt, u32 *sda_hold)
 {
@@ -58,6 +75,9 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
        acpi_handle handle = ACPI_HANDLE(&pdev->dev);
        union acpi_object *obj;
 
+       if (dmi_check_system(dw_i2c_no_acpi_params))
+               return;
+
        if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf)))
                return;
 
@@ -253,12 +273,6 @@ static int dw_i2c_probe(struct platform_device *pdev)
        adap->dev.parent = &pdev->dev;
        adap->dev.of_node = pdev->dev.of_node;
 
-       r = i2c_add_numbered_adapter(adap);
-       if (r) {
-               dev_err(&pdev->dev, "failure adding adapter\n");
-               return r;
-       }
-
        if (dev->pm_runtime_disabled) {
                pm_runtime_forbid(&pdev->dev);
        } else {
@@ -268,6 +282,13 @@ static int dw_i2c_probe(struct platform_device *pdev)
                pm_runtime_enable(&pdev->dev);
        }
 
+       r = i2c_add_numbered_adapter(adap);
+       if (r) {
+               dev_err(&pdev->dev, "failure adding adapter\n");
+               pm_runtime_disable(&pdev->dev);
+               return r;
+       }
+
        return 0;
 }
 
index d8361dada584556baccc2c6bd861eb11028c6d51..d8b5a8fee1e6c85588dd569b80894306b1c76b1a 100644 (file)
@@ -690,15 +690,16 @@ static int rcar_i2c_probe(struct platform_device *pdev)
                return ret;
        }
 
+       pm_runtime_enable(dev);
+       platform_set_drvdata(pdev, priv);
+
        ret = i2c_add_numbered_adapter(adap);
        if (ret < 0) {
                dev_err(dev, "reg adap failed: %d\n", ret);
+               pm_runtime_disable(dev);
                return ret;
        }
 
-       pm_runtime_enable(dev);
-       platform_set_drvdata(pdev, priv);
-
        dev_info(dev, "probed\n");
 
        return 0;
index 50bfd8cef5f224aebb189a5b6635b62316f6117c..5df819610d5280cc1fee176344be4d226fc5ea56 100644 (file)
@@ -1243,17 +1243,19 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
        i2c->adap.nr = i2c->pdata->bus_num;
        i2c->adap.dev.of_node = pdev->dev.of_node;
 
+       platform_set_drvdata(pdev, i2c);
+
+       pm_runtime_enable(&pdev->dev);
+
        ret = i2c_add_numbered_adapter(&i2c->adap);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to add bus to i2c core\n");
+               pm_runtime_disable(&pdev->dev);
                s3c24xx_i2c_deregister_cpufreq(i2c);
                clk_unprepare(i2c->clk);
                return ret;
        }
 
-       platform_set_drvdata(pdev, i2c);
-
-       pm_runtime_enable(&pdev->dev);
        pm_runtime_enable(&i2c->adap.dev);
 
        dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev));
index 5f89f1e3c2f24fc562a519eb173d33de8c280f42..a59c3111f7fb98df957e19d1fa93faac16322e20 100644 (file)
@@ -694,12 +694,12 @@ static int i2c_device_probe(struct device *dev)
                goto err_clear_wakeup_irq;
 
        status = dev_pm_domain_attach(&client->dev, true);
-       if (status != -EPROBE_DEFER) {
-               status = driver->probe(client, i2c_match_id(driver->id_table,
-                                       client));
-               if (status)
-                       goto err_detach_pm_domain;
-       }
+       if (status == -EPROBE_DEFER)
+               goto err_clear_wakeup_irq;
+
+       status = driver->probe(client, i2c_match_id(driver->id_table, client));
+       if (status)
+               goto err_detach_pm_domain;
 
        return 0;
 
index b1ab13f3e182bb520cc986512d11e9016ecf1362..59a2dafc8c574df13b5d186f9b0987c06c8e58c6 100644 (file)
@@ -1232,14 +1232,32 @@ static bool cma_match_private_data(struct rdma_id_private *id_priv,
        return true;
 }
 
+static bool cma_protocol_roce_dev_port(struct ib_device *device, int port_num)
+{
+       enum rdma_link_layer ll = rdma_port_get_link_layer(device, port_num);
+       enum rdma_transport_type transport =
+               rdma_node_get_transport(device->node_type);
+
+       return ll == IB_LINK_LAYER_ETHERNET && transport == RDMA_TRANSPORT_IB;
+}
+
+static bool cma_protocol_roce(const struct rdma_cm_id *id)
+{
+       struct ib_device *device = id->device;
+       const int port_num = id->port_num ?: rdma_start_port(device);
+
+       return cma_protocol_roce_dev_port(device, port_num);
+}
+
 static bool cma_match_net_dev(const struct rdma_id_private *id_priv,
                              const struct net_device *net_dev)
 {
        const struct rdma_addr *addr = &id_priv->id.route.addr;
 
        if (!net_dev)
-               /* This request is an AF_IB request */
-               return addr->src_addr.ss_family == AF_IB;
+               /* This request is an AF_IB request or a RoCE request */
+               return addr->src_addr.ss_family == AF_IB ||
+                      cma_protocol_roce(&id_priv->id);
 
        return !addr->dev_addr.bound_dev_if ||
               (net_eq(dev_net(net_dev), &init_net) &&
@@ -1294,6 +1312,10 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
                if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
                        /* Assuming the protocol is AF_IB */
                        *net_dev = NULL;
+               } else if (cma_protocol_roce_dev_port(req.device, req.port)) {
+                       /* TODO find the net dev matching the request parameters
+                        * through the RoCE GID table */
+                       *net_dev = NULL;
                } else {
                        return ERR_CAST(*net_dev);
                }
@@ -1593,11 +1615,16 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
                if (ret)
                        goto err;
        } else {
-               /* An AF_IB connection */
-               WARN_ON_ONCE(ss_family != AF_IB);
-
-               cma_translate_ib((struct sockaddr_ib *)cma_src_addr(id_priv),
-                                &rt->addr.dev_addr);
+               if (!cma_protocol_roce(listen_id) &&
+                   cma_any_addr(cma_src_addr(id_priv))) {
+                       rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
+                       rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
+                       ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
+               } else if (!cma_any_addr(cma_src_addr(id_priv))) {
+                       ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
+                       if (ret)
+                               goto err;
+               }
        }
        rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
 
@@ -1635,13 +1662,12 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
                if (ret)
                        goto err;
        } else {
-               /* An AF_IB connection */
-               WARN_ON_ONCE(ss_family != AF_IB);
-
-               if (!cma_any_addr(cma_src_addr(id_priv)))
-                       cma_translate_ib((struct sockaddr_ib *)
-                                               cma_src_addr(id_priv),
-                                        &id->route.addr.dev_addr);
+               if (!cma_any_addr(cma_src_addr(id_priv))) {
+                       ret = cma_translate_addr(cma_src_addr(id_priv),
+                                                &id->route.addr.dev_addr);
+                       if (ret)
+                               goto err;
+               }
        }
 
        id_priv->state = RDMA_CM_CONNECT;
index 70acda91eb2a934e79e999a1a3297d92a2148ae2..6a0bdfa0ce2e76f4741454a740426e0b891015bc 100644 (file)
@@ -1325,9 +1325,6 @@ static void nes_netdev_get_drvinfo(struct net_device *netdev,
                 "%u.%u", nesadapter->firmware_version >> 16,
                 nesadapter->firmware_version & 0x000000ff);
        strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
-       drvinfo->testinfo_len = 0;
-       drvinfo->eedump_len = 0;
-       drvinfo->regdump_len = 0;
 }
 
 
index 5be13d8991bce0e7c4edec27c8034e847d4cba29..f903502d3883256e8044dbbec9b2ef9baa2fbdc4 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 04a66229584e0089ef72e027f986d3d71a574b0d..7fe9502ce8d3df43a57b8e7325ecd0aa262949cd 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 3935672661423ff007f6cfc405197967e56c9792..596e0ed49a8e2a066097b5612da723535fdcccbc 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 9d737ed5e55d905e6452d2c5fb20f040cf765705..b54986de5f0cad3677461af613862351ca3fb3f2 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 5d13860161a4a64af533b3a5aee3c43e25e475d0..5e55b8bc6fe402af423118c1454b5bc67d21d8ba 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 4087d24a88f6d9a1e6080b3a2b91dff473cd0944..98453e91daa6f0a1d91c9eb50d468a2585bfcdbe 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index e3c9bd9d3ba366d7f5cd8ef30f5cd5f43b3fbe74..3c37dd59c04eaec750e5ddbf98383e8e4b64d669 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 93713a2230b36724c5bef33933de4c512ba89784..3a8add9ddf4611f89272ee21a0a17dc70fe5028f 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index e5a9297dd1bd3eac9f2dda97f2954e002a31ca4f..525bf272671e6973afb13472263dd8f730c4eac3 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 34c49b8105feb4b59ae320997622e4145d24a742..0c15bd885035ee5fe5110e1317cafe09e762fe07 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index db3588df3546b3ceaf8dce0ae8e175e6326265a3..85dc3f989ff72aa565c85efb4d07fdc7772d3c93 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index b0aafe8db0c35a9d2930e59207a0a01f530b1792..b1458be1d402d60b417904ec450e3d7a0b5b5020 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 27dc67c1689ff4245dff59931f91e5c3d22b58c4..3412ea06116e2cca6ba802571edd5e48186edc7f 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 0d09b493cd02b4f8fa12a217c8cfc19e39cc5693..3d98e16cfeaf67fa9300d941eb064b96fd40b274 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 7df43827cb29661a039d1794d24a20e6cbf1b094..f8e3211689a3453164c9044f3bef4601053c1dba 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 0bd04efa16f33f514c30b3b5c32cf62f8488c271..414eaa566bd94e5f05a796a5416ef8f76e7939f6 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 75777a66c6848a1d7e946782052382e8f031c0b9..183fcb6a952f4bdaf0714492e1e3cce4a3d15852 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index ddef6f77a78cf4c3fbf68a1b867a23b843fd3124..de318389a301a58058fb166acd85aef0189e89d5 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 7e5dc6d9f462d68ecc3a99a7ef997e74d4e2b626..9a7a2d9755c021928ea38be608abda733129a76d 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index cb2337f0532b5dcbe0d328eb74a196d976b7b6dc..645a5f6e6c88f0a4166bf0647d26d3b7bf082670 100644 (file)
@@ -7,7 +7,7 @@
  * licenses.  You may choose to be licensed under the terms of the GNU
  * General Public License (GPL) Version 2, available from the file
  * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * BSD license below:
  *
  *     Redistribution and use in source and binary forms, with or
  *     without modification, are permitted provided that the following
index 70440996e8f2cbee289fb137d16dbd7ece78d345..45ca7c1613a7f76a86b8ac81c0d215746490ea04 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 3a4288e0fbace4621df7166e7e64ca7a97a9d283..42b4b4c4e452eae8b712fb0ef295f304a7ab7492 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2014, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index d4f752e258fd4812ec81c2f627ba4ff9e222e4cc..c0b0b876ab905a574dd23c3a87fa13d8d9ce4990 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 656b88c39edab15c27214e2e963ce31104fcb2e5..66de93fb8ea934c15051f7b33fa7808306f19f05 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 14d931a8829dc4616571af436e30fd2b03070825..a08423e478af2778f9529f558221658aa62c1f42 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
index 4cd5428a2399a2cc73757c49382842094714d1bc..edc5b8565d6d9eb5ba6e22e1baefd21d53986b5f 100644 (file)
@@ -495,6 +495,7 @@ void ipoib_dev_cleanup(struct net_device *dev);
 void ipoib_mcast_join_task(struct work_struct *work);
 void ipoib_mcast_carrier_on_task(struct work_struct *work);
 void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
+void ipoib_mcast_free(struct ipoib_mcast *mc);
 
 void ipoib_mcast_restart_task(struct work_struct *work);
 int ipoib_mcast_start_thread(struct net_device *dev);
index f74316e679d2fc2b7b27212d47fc806e95844f01..babba05d7a0eb707f472d7de3cb06843a0844eff 100644 (file)
@@ -1207,8 +1207,10 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
 
 out_unlock:
        spin_unlock_irqrestore(&priv->lock, flags);
-       list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
+       list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
                ipoib_mcast_leave(dev, mcast);
+               ipoib_mcast_free(mcast);
+       }
 }
 
 static void ipoib_reap_neigh(struct work_struct *work)
index 136cbefe00f87aeb79b02d6508d42fdac5741069..d750a86042f3d8da0736c23a52f41737b329c37d 100644 (file)
@@ -106,7 +106,7 @@ static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv,
                queue_delayed_work(priv->wq, &priv->mcast_task, 0);
 }
 
-static void ipoib_mcast_free(struct ipoib_mcast *mcast)
+void ipoib_mcast_free(struct ipoib_mcast *mcast)
 {
        struct net_device *dev = mcast->dev;
        int tx_dropped = 0;
index b76ac580703ce5dc9ef97fac6620adc47ea44273..a8bc2fe170dd83e12ff78706f97f9bc32a72e5cd 100644 (file)
@@ -150,7 +150,7 @@ static void walkera0701_irq_handler(void *handler_data)
                if (w->counter == 24) { /* full frame */
                        walkera0701_parse_frame(w);
                        w->counter = NO_SYNC;
-                       if (abs(pulse_time - SYNC_PULSE) < RESERVE)     /* new frame sync */
+                       if (abs64(pulse_time - SYNC_PULSE) < RESERVE)   /* new frame sync */
                                w->counter = 0;
                } else {
                        if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE)
@@ -161,7 +161,7 @@ static void walkera0701_irq_handler(void *handler_data)
                        } else
                                w->counter = NO_SYNC;
                }
-       } else if (abs(pulse_time - SYNC_PULSE - BIN0_PULSE) <
+       } else if (abs64(pulse_time - SYNC_PULSE - BIN0_PULSE) <
                                RESERVE + BIN1_PULSE - BIN0_PULSE)      /* frame sync .. */
                w->counter = 0;
 
index b052afec9a11f0d323eb735dee3d262eabf116a2..6639b2b8528aa6da9a518d3bd3dc0da3010f4a09 100644 (file)
@@ -266,7 +266,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
 
        error = omap4_keypad_parse_dt(&pdev->dev, keypad_data);
        if (error)
-               return error;
+               goto err_free_keypad;
 
        res = request_mem_region(res->start, resource_size(res), pdev->name);
        if (!res) {
index 867db8a91372017d2af6a11d24f189a7bc796fc2..e317b75357a0182d99ef3c04223596bd1de6fc80 100644 (file)
@@ -93,7 +93,7 @@ static int pm8941_reboot_notify(struct notifier_block *nb,
        default:
                reset_type = PON_PS_HOLD_TYPE_HARD_RESET;
                break;
-       };
+       }
 
        error = regmap_update_bits(pwrkey->regmap,
                                   pwrkey->baseaddr + PON_PS_HOLD_RST_CTL,
index 345df9b03aed7f1eff56cf2f1a743c59c8d5e780..5adbcedcb81cf4391bfdddefe11aae5d3131dd76 100644 (file)
@@ -414,7 +414,7 @@ static int uinput_setup_device(struct uinput_device *udev,
        dev->id.product = user_dev->id.product;
        dev->id.version = user_dev->id.version;
 
-       for_each_set_bit(i, dev->absbit, ABS_CNT) {
+       for (i = 0; i < ABS_CNT; i++) {
                input_abs_set_max(dev, i, user_dev->absmax[i]);
                input_abs_set_min(dev, i, user_dev->absmin[i]);
                input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]);
index 5f191071d44a033d45d1f433475cd3b7d0b4befd..e4eb048d1bf63f8bc2d7027800a81a58cdf4f855 100644 (file)
@@ -241,14 +241,10 @@ static int cyapa_gen6_read_sys_info(struct cyapa *cyapa)
        memcpy(&cyapa->product_id[13], &resp_data[62], 2);
        cyapa->product_id[15] = '\0';
 
+       /* Get the number of Rx electrodes. */
        rotat_align = resp_data[68];
-       if (rotat_align) {
-               cyapa->electrodes_rx = cyapa->electrodes_y;
-               cyapa->electrodes_rx = cyapa->electrodes_y;
-       } else {
-               cyapa->electrodes_rx = cyapa->electrodes_x;
-               cyapa->electrodes_rx = cyapa->electrodes_y;
-       }
+       cyapa->electrodes_rx =
+               rotat_align ? cyapa->electrodes_y : cyapa->electrodes_x;
        cyapa->aligned_electrodes_rx = (cyapa->electrodes_rx + 3) & ~3u;
 
        if (!cyapa->electrodes_x || !cyapa->electrodes_y ||
index 73670f2aebfd5e189ab794c6ffd4b44759bb5acb..c0ec26118732879f7674f68bd0458b3d27602f61 100644 (file)
@@ -60,7 +60,7 @@ struct elan_transport_ops {
        int (*get_sm_version)(struct i2c_client *client,
                              u8* ic_type, u8 *version);
        int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum);
-       int (*get_product_id)(struct i2c_client *client, u8 *id);
+       int (*get_product_id)(struct i2c_client *client, u16 *id);
 
        int (*get_max)(struct i2c_client *client,
                       unsigned int *max_x, unsigned int *max_y);
index fa945304b9a576d4303c778eca929a5f3517092a..5e1665bbaa0baca86e2c865ba88162be2f209d2e 100644 (file)
@@ -40,7 +40,7 @@
 #include "elan_i2c.h"
 
 #define DRIVER_NAME            "elan_i2c"
-#define ELAN_DRIVER_VERSION    "1.6.0"
+#define ELAN_DRIVER_VERSION    "1.6.1"
 #define ETP_MAX_PRESSURE       255
 #define ETP_FWIDTH_REDUCE      90
 #define ETP_FINGER_WIDTH       15
@@ -76,7 +76,7 @@ struct elan_tp_data {
        unsigned int            x_res;
        unsigned int            y_res;
 
-       u                     product_id;
+       u16                     product_id;
        u8                      fw_version;
        u8                      sm_version;
        u8                      iap_version;
@@ -98,15 +98,25 @@ static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count,
                           u16 *signature_address)
 {
        switch (iap_version) {
+       case 0x00:
+       case 0x06:
        case 0x08:
                *validpage_count = 512;
                break;
+       case 0x03:
+       case 0x07:
        case 0x09:
+       case 0x0A:
+       case 0x0B:
+       case 0x0C:
                *validpage_count = 768;
                break;
        case 0x0D:
                *validpage_count = 896;
                break;
+       case 0x0E:
+               *validpage_count = 640;
+               break;
        default:
                /* unknown ic type clear value */
                *validpage_count = 0;
@@ -266,11 +276,10 @@ static int elan_query_device_info(struct elan_tp_data *data)
 
        error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count,
                                &data->fw_signature_address);
-       if (error) {
-               dev_err(&data->client->dev,
-                       "unknown iap version %d\n", data->iap_version);
-               return error;
-       }
+       if (error)
+               dev_warn(&data->client->dev,
+                        "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n",
+                        data->iap_version, data->ic_type);
 
        return 0;
 }
@@ -486,6 +495,9 @@ static ssize_t elan_sysfs_update_fw(struct device *dev,
        const u8 *fw_signature;
        static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF};
 
+       if (data->fw_validpage_count == 0)
+               return -EINVAL;
+
        /* Look for a firmware with the product id appended. */
        fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id);
        if (!fw_name) {
index 683c840c9dd73f31d1279b7db88dfacd1f42b7b0..a679e56c44cd49ddea4361aebdb97d4fe7f1e12e 100644 (file)
@@ -276,7 +276,7 @@ static int elan_i2c_get_sm_version(struct i2c_client *client,
        return 0;
 }
 
-static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id)
+static int elan_i2c_get_product_id(struct i2c_client *client, u16 *id)
 {
        int error;
        u8 val[3];
@@ -287,7 +287,7 @@ static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id)
                return error;
        }
 
-       *id = val[0];
+       *id = le16_to_cpup((__le16 *)val);
        return 0;
 }
 
index ff36a366b2aa1aadbe3c9a7f0e9de3eb687d83c3..cb6aecbc1dc28a20885885c4b361ecf8343bc445 100644 (file)
@@ -183,7 +183,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
        return 0;
 }
 
-static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id)
+static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
 {
        int error;
        u8 val[3];
@@ -195,7 +195,7 @@ static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id)
                return error;
        }
 
-       *id = val[1];
+       *id = be16_to_cpup((__be16 *)val);
        return 0;
 }
 
index 994ae788615698bf3af613f1de765fe0411be995..6025eb430c0a5010c908961ccf8897943fd3c945 100644 (file)
@@ -519,18 +519,14 @@ static int synaptics_set_mode(struct psmouse *psmouse)
        struct synaptics_data *priv = psmouse->private;
 
        priv->mode = 0;
-
-       if (priv->absolute_mode) {
+       if (priv->absolute_mode)
                priv->mode |= SYN_BIT_ABSOLUTE_MODE;
-               if (SYN_CAP_EXTENDED(priv->capabilities))
-                       priv->mode |= SYN_BIT_W_MODE;
-       }
-
-       if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture)
+       if (priv->disable_gesture)
                priv->mode |= SYN_BIT_DISABLE_GESTURE;
-
        if (psmouse->rate >= 80)
                priv->mode |= SYN_BIT_HIGH_RATE;
+       if (SYN_CAP_EXTENDED(priv->capabilities))
+               priv->mode |= SYN_BIT_W_MODE;
 
        if (synaptics_mode_cmd(psmouse, priv->mode))
                return -1;
index 75516996db2070b621c6250cdd9710acf473ad42..316f2c8971011dae527d506ee18d49ce96f316e0 100644 (file)
@@ -212,12 +212,17 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
         * time before the ACK arrives.
         */
        if (ps2_sendbyte(ps2dev, command & 0xff,
-                        command == PS2_CMD_RESET_BAT ? 1000 : 200))
-               goto out;
+                        command == PS2_CMD_RESET_BAT ? 1000 : 200)) {
+               serio_pause_rx(ps2dev->serio);
+               goto out_reset_flags;
+       }
 
-       for (i = 0; i < send; i++)
-               if (ps2_sendbyte(ps2dev, param[i], 200))
-                       goto out;
+       for (i = 0; i < send; i++) {
+               if (ps2_sendbyte(ps2dev, param[i], 200)) {
+                       serio_pause_rx(ps2dev->serio);
+                       goto out_reset_flags;
+               }
+       }
 
        /*
         * The reset command takes a long time to execute.
@@ -234,17 +239,18 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
                                   !(ps2dev->flags & PS2_FLAG_CMD), timeout);
        }
 
+       serio_pause_rx(ps2dev->serio);
+
        if (param)
                for (i = 0; i < receive; i++)
                        param[i] = ps2dev->cmdbuf[(receive - 1) - i];
 
        if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1))
-               goto out;
+               goto out_reset_flags;
 
        rc = 0;
 
- out:
-       serio_pause_rx(ps2dev->serio);
+ out_reset_flags:
        ps2dev->flags = 0;
        serio_continue_rx(ps2dev->serio);
 
index 26b45936f9fdf3334c6f083aeaa7c5ee5bb178dd..1e8cd6f1fe9e875005af95b54787890f81116a5f 100644 (file)
@@ -194,6 +194,7 @@ static int __init parkbd_init(void)
        parkbd_port = parkbd_allocate_serio();
        if (!parkbd_port) {
                parport_release(parkbd_dev);
+               parport_unregister_device(parkbd_dev);
                return -ENOMEM;
        }
 
index 0f5f968592bd02afd9c5381a8839b3428c3bcbea..04edc8f7122fa77d9c043694ba8f47d0c83dab0c 100644 (file)
@@ -668,18 +668,22 @@ static int ads7846_no_filter(void *ads, int data_idx, int *val)
 
 static int ads7846_get_value(struct ads7846 *ts, struct spi_message *m)
 {
+       int value;
        struct spi_transfer *t =
                list_entry(m->transfers.prev, struct spi_transfer, transfer_list);
 
        if (ts->model == 7845) {
-               return be16_to_cpup((__be16 *)&(((char*)t->rx_buf)[1])) >> 3;
+               value = be16_to_cpup((__be16 *)&(((char *)t->rx_buf)[1]));
        } else {
                /*
                 * adjust:  on-wire is a must-ignore bit, a BE12 value, then
                 * padding; built from two 8 bit values written msb-first.
                 */
-               return be16_to_cpup((__be16 *)t->rx_buf) >> 3;
+               value = be16_to_cpup((__be16 *)t->rx_buf);
        }
+
+       /* enforce ADC output is 12 bits width */
+       return (value >> 3) & 0xfff;
 }
 
 static void ads7846_update_value(struct spi_message *m, int val)
index ff0b75813daa21cff6f8baccfbf453a06ebf383b..8275267eac25441f308e6103e82d48830d1feb71 100644 (file)
@@ -94,7 +94,7 @@ struct imx6ul_tsc {
  * TSC module need ADC to get the measure value. So
  * before config TSC, we should initialize ADC module.
  */
-static void imx6ul_adc_init(struct imx6ul_tsc *tsc)
+static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
 {
        int adc_hc = 0;
        int adc_gc;
@@ -122,17 +122,23 @@ static void imx6ul_adc_init(struct imx6ul_tsc *tsc)
 
        timeout = wait_for_completion_timeout
                        (&tsc->completion, ADC_TIMEOUT);
-       if (timeout == 0)
+       if (timeout == 0) {
                dev_err(tsc->dev, "Timeout for adc calibration\n");
+               return -ETIMEDOUT;
+       }
 
        adc_gs = readl(tsc->adc_regs + REG_ADC_GS);
-       if (adc_gs & ADC_CALF)
+       if (adc_gs & ADC_CALF) {
                dev_err(tsc->dev, "ADC calibration failed\n");
+               return -EINVAL;
+       }
 
        /* TSC need the ADC work in hardware trigger */
        adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
        adc_cfg |= ADC_HARDWARE_TRIGGER;
        writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
+
+       return 0;
 }
 
 /*
@@ -188,11 +194,17 @@ static void imx6ul_tsc_set(struct imx6ul_tsc *tsc)
        writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
 }
 
-static void imx6ul_tsc_init(struct imx6ul_tsc *tsc)
+static int imx6ul_tsc_init(struct imx6ul_tsc *tsc)
 {
-       imx6ul_adc_init(tsc);
+       int err;
+
+       err = imx6ul_adc_init(tsc);
+       if (err)
+               return err;
        imx6ul_tsc_channel_config(tsc);
        imx6ul_tsc_set(tsc);
+
+       return 0;
 }
 
 static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc)
@@ -311,9 +323,7 @@ static int imx6ul_tsc_open(struct input_dev *input_dev)
                return err;
        }
 
-       imx6ul_tsc_init(tsc);
-
-       return 0;
+       return imx6ul_tsc_init(tsc);
 }
 
 static void imx6ul_tsc_close(struct input_dev *input_dev)
@@ -337,7 +347,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
        int tsc_irq;
        int adc_irq;
 
-       tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL);
+       tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL);
        if (!tsc)
                return -ENOMEM;
 
@@ -345,7 +355,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
        if (!input_dev)
                return -ENOMEM;
 
-       input_dev->name = "iMX6UL TouchScreen Controller";
+       input_dev->name = "iMX6UL Touchscreen Controller";
        input_dev->id.bustype = BUS_HOST;
 
        input_dev->open = imx6ul_tsc_open;
@@ -406,7 +416,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
        }
 
        adc_irq = platform_get_irq(pdev, 1);
-       if (adc_irq <= 0) {
+       if (adc_irq < 0) {
                dev_err(&pdev->dev, "no adc irq resource?\n");
                return adc_irq;
        }
@@ -491,7 +501,7 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev)
                        goto out;
                }
 
-               imx6ul_tsc_init(tsc);
+               retval = imx6ul_tsc_init(tsc);
        }
 
 out:
index 7cce87650fc8da3e401ec9a9aa1807ea1a2d9dfb..1fafc9f57af6c75a7a8a9e9d6b90e016f4e90272 100644 (file)
@@ -394,12 +394,12 @@ static struct mms114_platform_data *mms114_parse_dt(struct device *dev)
        if (of_property_read_u32(np, "x-size", &pdata->x_size)) {
                dev_err(dev, "failed to get x-size property\n");
                return NULL;
-       };
+       }
 
        if (of_property_read_u32(np, "y-size", &pdata->y_size)) {
                dev_err(dev, "failed to get y-size property\n");
                return NULL;
-       };
+       }
 
        of_property_read_u32(np, "contact-threshold",
                                &pdata->contact_threshold);
index d9da766719c863327d4a8563804994c3edfd01c0..cbe6a890a93a0d1448f46e32edbfdc5231ba7098 100644 (file)
@@ -23,8 +23,7 @@ config IOMMU_IO_PGTABLE
 config IOMMU_IO_PGTABLE_LPAE
        bool "ARMv7/v8 Long Descriptor Format"
        select IOMMU_IO_PGTABLE
-       # SWIOTLB guarantees a dma_to_phys() implementation
-       depends on ARM || ARM64 || (COMPILE_TEST && SWIOTLB)
+       depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST)
        help
          Enable support for the ARM long descriptor pagetable format.
          This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
index f82060e778a23bb7a8901ef2356d42b5363d93a6..08d2775887f7add00e44858cc6bf4a5eece6e260 100644 (file)
@@ -2006,6 +2006,15 @@ static void do_detach(struct iommu_dev_data *dev_data)
 {
        struct amd_iommu *iommu;
 
+       /*
+        * First check if the device is still attached. It might already
+        * be detached from its domain because the generic
+        * iommu_detach_group code detached it and we try again here in
+        * our alias handling.
+        */
+       if (!dev_data->domain)
+               return;
+
        iommu = amd_iommu_rlookup_table[dev_data->devid];
 
        /* decrease reference counters */
index 5ef347a13cb5d54789c07869b0527d81cb24365e..1b066e7d144d6fdc0043cfbfa340ce2e7a209c50 100644 (file)
@@ -1256,6 +1256,9 @@ static int iommu_init_pci(struct amd_iommu *iommu)
        if (!iommu->dev)
                return -ENODEV;
 
+       /* Prevent binding other PCI device drivers to IOMMU devices */
+       iommu->dev->match_driver = false;
+
        pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
                              &iommu->cap);
        pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
index dafaf59dc3b82833fb78d55e8f194ff728999d35..286e890e7d64caa31867044f568e3a3cf6ce2ba9 100644 (file)
@@ -56,6 +56,7 @@
 #define IDR0_TTF_SHIFT                 2
 #define IDR0_TTF_MASK                  0x3
 #define IDR0_TTF_AARCH64               (2 << IDR0_TTF_SHIFT)
+#define IDR0_TTF_AARCH32_64            (3 << IDR0_TTF_SHIFT)
 #define IDR0_S1P                       (1 << 1)
 #define IDR0_S2P                       (1 << 0)
 
 #define CMDQ_TLBI_0_VMID_SHIFT         32
 #define CMDQ_TLBI_0_ASID_SHIFT         48
 #define CMDQ_TLBI_1_LEAF               (1UL << 0)
-#define CMDQ_TLBI_1_ADDR_MASK          ~0xfffUL
+#define CMDQ_TLBI_1_VA_MASK            ~0xfffUL
+#define CMDQ_TLBI_1_IPA_MASK           0xfffffffff000UL
 
 #define CMDQ_PRI_0_SSID_SHIFT          12
 #define CMDQ_PRI_0_SSID_MASK           0xfffffUL
@@ -770,11 +772,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
                break;
        case CMDQ_OP_TLBI_NH_VA:
                cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
-               /* Fallthrough */
+               cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
+               cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
+               break;
        case CMDQ_OP_TLBI_S2_IPA:
                cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
                cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
-               cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_ADDR_MASK;
+               cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
                break;
        case CMDQ_OP_TLBI_NH_ASID:
                cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
@@ -2460,7 +2464,13 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
        }
 
        /* We only support the AArch64 table format at present */
-       if ((reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) < IDR0_TTF_AARCH64) {
+       switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
+       case IDR0_TTF_AARCH32_64:
+               smmu->ias = 40;
+               /* Fallthrough */
+       case IDR0_TTF_AARCH64:
+               break;
+       default:
                dev_err(smmu->dev, "AArch64 table format not supported!\n");
                return -ENXIO;
        }
@@ -2541,8 +2551,7 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
                dev_warn(smmu->dev,
                         "failed to set DMA mask for table walker\n");
 
-       if (!smmu->ias)
-               smmu->ias = smmu->oas;
+       smmu->ias = max(smmu->ias, smmu->oas);
 
        dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
                 smmu->ias, smmu->oas, smmu->features);
index 041bc1810a86131deb77152dd6b5a7cd43338a5d..35365f046923db7df50f43f318310711e7fd5e31 100644 (file)
@@ -2301,6 +2301,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
 
        if (ret) {
                spin_unlock_irqrestore(&device_domain_lock, flags);
+               free_devinfo_mem(info);
                return NULL;
        }
 
index 73c07482f48763c5af3f0d43d73a2f04774bb74d..7df97777662d4d8a9284a8f2cae4cc0e8891210a 100644 (file)
@@ -202,9 +202,9 @@ typedef u64 arm_lpae_iopte;
 
 static bool selftest_running = false;
 
-static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages)
+static dma_addr_t __arm_lpae_dma_addr(void *pages)
 {
-       return phys_to_dma(dev, virt_to_phys(pages));
+       return (dma_addr_t)virt_to_phys(pages);
 }
 
 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
@@ -223,10 +223,10 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
                        goto out_free;
                /*
                 * We depend on the IOMMU being able to work with any physical
-                * address directly, so if the DMA layer suggests it can't by
-                * giving us back some translation, that bodes very badly...
+                * address directly, so if the DMA layer suggests otherwise by
+                * translating or truncating them, that bodes very badly...
                 */
-               if (dma != __arm_lpae_dma_addr(dev, pages))
+               if (dma != virt_to_phys(pages))
                        goto out_unmap;
        }
 
@@ -243,10 +243,8 @@ out_free:
 static void __arm_lpae_free_pages(void *pages, size_t size,
                                  struct io_pgtable_cfg *cfg)
 {
-       struct device *dev = cfg->iommu_dev;
-
        if (!selftest_running)
-               dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages),
+               dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
                                 size, DMA_TO_DEVICE);
        free_pages_exact(pages, size);
 }
@@ -254,12 +252,11 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
                               struct io_pgtable_cfg *cfg)
 {
-       struct device *dev = cfg->iommu_dev;
-
        *ptep = pte;
 
        if (!selftest_running)
-               dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep),
+               dma_sync_single_for_device(cfg->iommu_dev,
+                                          __arm_lpae_dma_addr(ptep),
                                           sizeof(pte), DMA_TO_DEVICE);
 }
 
@@ -629,6 +626,11 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
        if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
                return NULL;
 
+       if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
+               dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
+               return NULL;
+       }
+
        data = kmalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
                return NULL;
index cf351c6374645e7b0e4fe982a937d49b4904cfe9..a7c8c9ffbafd3503228a6c4e63c6870b5d9c784c 100644 (file)
@@ -62,7 +62,7 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
 
        dev_alias->dev_id = alias;
        if (pdev != dev_alias->pdev)
-               dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
+               dev_alias->count += its_pci_msi_vec_count(pdev);
 
        return 0;
 }
index ac7ae2b3cb83726e336ce310d95e2b890da8224d..25ceae9f7348b3208bdd6dec0048a88d4ce34f73 100644 (file)
@@ -719,6 +719,9 @@ static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
 out:
        spin_unlock(&lpi_lock);
 
+       if (!bitmap)
+               *base = *nr_ids = 0;
+
        return bitmap;
 }
 
index 18accb0a79cc51dea2d1851fc9a27891e8d79d6f..c53a53f6efb6a136ec09075996bd89081e50e536 100644 (file)
@@ -1247,7 +1247,7 @@ static void
 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
 {
        struct PStack *st = fi->userdata;
-       struct sk_buff *skb;
+       struct sk_buff *skb, *nskb;
        struct Layer2 *l2 = &st->l2;
        u_char header[MAX_HEADER_LEN];
        int i, hdr_space_needed;
@@ -1262,14 +1262,10 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
                return;
 
        hdr_space_needed = l2headersize(l2, 0);
-       if (hdr_space_needed > skb_headroom(skb)) {
-               struct sk_buff *orig_skb = skb;
-
-               skb = skb_realloc_headroom(skb, hdr_space_needed);
-               if (!skb) {
-                       dev_kfree_skb(orig_skb);
-                       return;
-               }
+       nskb = skb_realloc_headroom(skb, hdr_space_needed);
+       if (!nskb) {
+               skb_queue_head(&l2->i_queue, skb);
+               return;
        }
        spin_lock_irqsave(&l2->lock, flags);
        if (test_bit(FLG_MOD128, &l2->flag))
@@ -1282,7 +1278,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
                       p1);
                dev_kfree_skb(l2->windowar[p1]);
        }
-       l2->windowar[p1] = skb_clone(skb, GFP_ATOMIC);
+       l2->windowar[p1] = skb;
 
        i = sethdraddr(&st->l2, header, CMD);
 
@@ -1295,8 +1291,8 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
                l2->vs = (l2->vs + 1) % 8;
        }
        spin_unlock_irqrestore(&l2->lock, flags);
-       memcpy(skb_push(skb, i), header, i);
-       st->l2.l2l1(st, PH_PULL | INDICATION, skb);
+       memcpy(skb_push(nskb, i), header, i);
+       st->l2.l2l1(st, PH_PULL | INDICATION, nskb);
        test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
        if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
                FsmDelTimer(&st->l2.t203, 13);
index 8b1a66c6ca8aa0e3c5a647945ba895172d3a4974..e72b4e73cd615f1fa0410b97661d7f07ef9bbaca 100644 (file)
@@ -235,7 +235,7 @@ void dsp_pipeline_destroy(struct dsp_pipeline *pipeline)
 
 int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
 {
-       int len, incomplete = 0, found = 0;
+       int incomplete = 0, found = 0;
        char *dup, *tok, *name, *args;
        struct dsp_element_entry *entry, *n;
        struct dsp_pipeline_entry *pipeline_entry;
@@ -247,17 +247,9 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
        if (!list_empty(&pipeline->list))
                _dsp_pipeline_destroy(pipeline);
 
-       if (!cfg)
-               return 0;
-
-       len = strlen(cfg);
-       if (!len)
-               return 0;
-
-       dup = kmalloc(len + 1, GFP_ATOMIC);
+       dup = kstrdup(cfg, GFP_ATOMIC);
        if (!dup)
                return 0;
-       strcpy(dup, cfg);
        while ((tok = strsep(&dup, "|"))) {
                if (!strlen(tok))
                        continue;
index 949cabb88f1c113c9606d26ff72627f6cf9db108..5eb380a2590394ba087e2f160281e1902112910f 100644 (file)
@@ -1476,7 +1476,7 @@ static void
 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
 {
        struct layer2   *l2 = fi->userdata;
-       struct sk_buff  *skb, *nskb, *oskb;
+       struct sk_buff  *skb, *nskb;
        u_char          header[MAX_L2HEADER_LEN];
        u_int           i, p1;
 
@@ -1486,48 +1486,34 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
        skb = skb_dequeue(&l2->i_queue);
        if (!skb)
                return;
-
-       if (test_bit(FLG_MOD128, &l2->flag))
-               p1 = (l2->vs - l2->va) % 128;
-       else
-               p1 = (l2->vs - l2->va) % 8;
-       p1 = (p1 + l2->sow) % l2->window;
-       if (l2->windowar[p1]) {
-               printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
-                      mISDNDevName4ch(&l2->ch), p1);
-               dev_kfree_skb(l2->windowar[p1]);
-       }
-       l2->windowar[p1] = skb;
        i = sethdraddr(l2, header, CMD);
        if (test_bit(FLG_MOD128, &l2->flag)) {
                header[i++] = l2->vs << 1;
                header[i++] = l2->vr << 1;
+       } else
+               header[i++] = (l2->vr << 5) | (l2->vs << 1);
+       nskb = skb_realloc_headroom(skb, i);
+       if (!nskb) {
+               printk(KERN_WARNING "%s: no headroom(%d) copy for IFrame\n",
+                      mISDNDevName4ch(&l2->ch), i);
+               skb_queue_head(&l2->i_queue, skb);
+               return;
+       }
+       if (test_bit(FLG_MOD128, &l2->flag)) {
+               p1 = (l2->vs - l2->va) % 128;
                l2->vs = (l2->vs + 1) % 128;
        } else {
-               header[i++] = (l2->vr << 5) | (l2->vs << 1);
+               p1 = (l2->vs - l2->va) % 8;
                l2->vs = (l2->vs + 1) % 8;
        }
-
-       nskb = skb_clone(skb, GFP_ATOMIC);
-       p1 = skb_headroom(nskb);
-       if (p1 >= i)
-               memcpy(skb_push(nskb, i), header, i);
-       else {
-               printk(KERN_WARNING
-                      "%s: L2 pull_iqueue skb header(%d/%d) too short\n",
-                      mISDNDevName4ch(&l2->ch), i, p1);
-               oskb = nskb;
-               nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
-               if (!nskb) {
-                       dev_kfree_skb(oskb);
-                       printk(KERN_WARNING "%s: no skb mem in %s\n",
-                              mISDNDevName4ch(&l2->ch), __func__);
-                       return;
-               }
-               memcpy(skb_put(nskb, i), header, i);
-               memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
-               dev_kfree_skb(oskb);
+       p1 = (p1 + l2->sow) % l2->window;
+       if (l2->windowar[p1]) {
+               printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
+                      mISDNDevName4ch(&l2->ch), p1);
+               dev_kfree_skb(l2->windowar[p1]);
        }
+       l2->windowar[p1] = skb;
+       memcpy(skb_push(nskb, i), header, i);
        l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
        test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
        if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
index de36237d7c6b45de10fca57cc1870fde5a0620e5..051645498b53f8931e6f1db9a11aeb65e61ac2fd 100644 (file)
@@ -74,7 +74,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                ret = -ENOTSUPP;
                dev_err(&pdev->dev,
                        "IO mapped PCI devices are not supported\n");
-               goto out_release;
+               goto out_iounmap;
        }
 
        pci_set_drvdata(pdev, priv);
@@ -89,7 +89,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base);
        if (ret < 0)
-               goto out_iounmap;
+               goto out_mcb_bus;
        num_cells = ret;
 
        dev_dbg(&pdev->dev, "Found %d cells\n", num_cells);
@@ -98,6 +98,8 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        return 0;
 
+out_mcb_bus:
+       mcb_release_bus(priv->bus);
 out_iounmap:
        iounmap(priv->base);
 out_release:
index e51de52eeb94f71c9d6712a61d31e49f8e6f2f60..48b5890c28e35ad70484d67b29e12a70cb9a4b1b 100644 (file)
@@ -1997,7 +1997,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
        if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
                ret = bitmap_storage_alloc(&store, chunks,
                                           !bitmap->mddev->bitmap_info.external,
-                                          bitmap->cluster_slot);
+                                          mddev_is_clustered(bitmap->mddev)
+                                          ? bitmap->cluster_slot : 0);
        if (ret)
                goto err;
 
index 240c9f0e85e74e864624f0cb972c5b4394eaf11f..8a096456579bead67b182f27e65956341a7c8d73 100644 (file)
@@ -436,7 +436,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
 static struct dm_cache_policy_type wb_policy_type = {
        .name = "cleaner",
        .version = {1, 0, 0},
-       .hint_size = 0,
+       .hint_size = 4,
        .owner = THIS_MODULE,
        .create = wb_create
 };
index ebaa4f803eec3a08a0cd9fcd9a9a1b933618c50c..192bb8beeb6b59e296d9a2e06c0ef8c0a9be8aeb 100644 (file)
@@ -203,7 +203,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
                return -EINVAL;
        }
 
-       tmp_store = kmalloc(sizeof(*tmp_store), GFP_KERNEL);
+       tmp_store = kzalloc(sizeof(*tmp_store), GFP_KERNEL);
        if (!tmp_store) {
                ti->error = "Exception store allocation failed";
                return -ENOMEM;
@@ -215,7 +215,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
        else if (persistent == 'N')
                type = get_type("N");
        else {
-               ti->error = "Persistent flag is not P or N";
+               ti->error = "Exception store type is not P or N";
                r = -EINVAL;
                goto bad_type;
        }
@@ -233,7 +233,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
        if (r)
                goto bad;
 
-       r = type->ctr(tmp_store, 0, NULL);
+       r = type->ctr(tmp_store, (strlen(argv[0]) > 1 ? &argv[0][1] : NULL));
        if (r) {
                ti->error = "Exception store type constructor failed";
                goto bad;
index 0b2536247cf55a3215223b8b0c72ff29a629b87a..fae34e7a0b1e4e4d60b5867eff9422e432fba83b 100644 (file)
@@ -42,8 +42,7 @@ struct dm_exception_store_type {
        const char *name;
        struct module *module;
 
-       int (*ctr) (struct dm_exception_store *store,
-                   unsigned argc, char **argv);
+       int (*ctr) (struct dm_exception_store *store, char *options);
 
        /*
         * Destroys this object when you've finished with it.
@@ -123,6 +122,8 @@ struct dm_exception_store {
        unsigned chunk_shift;
 
        void *context;
+
+       bool userspace_supports_overflow;
 };
 
 /*
index 97e165183e79f2991f8191913e0b44fb91b00310..a0901214aef57de00419a14c573bc128431749c7 100644 (file)
@@ -329,8 +329,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
                 */
                if (min_region_size > (1 << 13)) {
                        /* If not a power of 2, make it the next power of 2 */
-                       if (min_region_size & (min_region_size - 1))
-                               region_size = 1 << fls(region_size);
+                       region_size = roundup_pow_of_two(min_region_size);
                        DMINFO("Choosing default region size of %lu sectors",
                               region_size);
                } else {
index bf71583296f732b6b78c71ae67dee5222824b2f8..117a05e40090a9b78829ed415446d906c8268701 100644 (file)
@@ -7,6 +7,7 @@
 
 #include "dm-exception-store.h"
 
+#include <linux/ctype.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/vmalloc.h>
@@ -843,10 +844,10 @@ static void persistent_drop_snapshot(struct dm_exception_store *store)
                DMWARN("write header failed");
 }
 
-static int persistent_ctr(struct dm_exception_store *store,
-                         unsigned argc, char **argv)
+static int persistent_ctr(struct dm_exception_store *store, char *options)
 {
        struct pstore *ps;
+       int r;
 
        /* allocate the pstore */
        ps = kzalloc(sizeof(*ps), GFP_KERNEL);
@@ -868,14 +869,32 @@ static int persistent_ctr(struct dm_exception_store *store,
 
        ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
        if (!ps->metadata_wq) {
-               kfree(ps);
                DMERR("couldn't start header metadata update thread");
-               return -ENOMEM;
+               r = -ENOMEM;
+               goto err_workqueue;
+       }
+
+       if (options) {
+               char overflow = toupper(options[0]);
+               if (overflow == 'O')
+                       store->userspace_supports_overflow = true;
+               else {
+                       DMERR("Unsupported persistent store option: %s", options);
+                       r = -EINVAL;
+                       goto err_options;
+               }
        }
 
        store->context = ps;
 
        return 0;
+
+err_options:
+       destroy_workqueue(ps->metadata_wq);
+err_workqueue:
+       kfree(ps);
+
+       return r;
 }
 
 static unsigned persistent_status(struct dm_exception_store *store,
@@ -888,7 +907,8 @@ static unsigned persistent_status(struct dm_exception_store *store,
        case STATUSTYPE_INFO:
                break;
        case STATUSTYPE_TABLE:
-               DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
+               DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P",
+                      (unsigned long long)store->chunk_size);
        }
 
        return sz;
index 1ce9a2586e4134a79ec3289808f8229e9aaa2080..9b7c8c8049d6186f54bdfec114c43cb3ce4d77fa 100644 (file)
@@ -70,8 +70,7 @@ static void transient_usage(struct dm_exception_store *store,
        *metadata_sectors = 0;
 }
 
-static int transient_ctr(struct dm_exception_store *store,
-                        unsigned argc, char **argv)
+static int transient_ctr(struct dm_exception_store *store, char *options)
 {
        struct transient_c *tc;
 
index c0bcd6516dfe17f8e7a06ec8c66d1e1d5801f133..c06b74e91cd6aeef00ef4eefae9953d4d8c8f91b 100644 (file)
@@ -1098,7 +1098,7 @@ static void stop_merge(struct dm_snapshot *s)
 }
 
 /*
- * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
+ * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size>
  */
 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 {
@@ -1302,6 +1302,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src,
 
        u.store_swap = snap_dest->store;
        snap_dest->store = snap_src->store;
+       snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow;
        snap_src->store = u.store_swap;
 
        snap_dest->store->snap = snap_dest;
@@ -1739,8 +1740,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
 
                        pe = __find_pending_exception(s, pe, chunk);
                        if (!pe) {
-                               s->snapshot_overflowed = 1;
-                               DMERR("Snapshot overflowed: Unable to allocate exception.");
+                               if (s->store->userspace_supports_overflow) {
+                                       s->snapshot_overflowed = 1;
+                                       DMERR("Snapshot overflowed: Unable to allocate exception.");
+                               } else
+                                       __invalidate_snapshot(s, -ENOMEM);
                                r = -EIO;
                                goto out_unlock;
                        }
@@ -2365,7 +2369,7 @@ static struct target_type origin_target = {
 
 static struct target_type snapshot_target = {
        .name    = "snapshot",
-       .version = {1, 14, 0},
+       .version = {1, 15, 0},
        .module  = THIS_MODULE,
        .ctr     = snapshot_ctr,
        .dtr     = snapshot_dtr,
@@ -2379,7 +2383,7 @@ static struct target_type snapshot_target = {
 
 static struct target_type merge_target = {
        .name    = dm_snapshot_merge_target_name,
-       .version = {1, 3, 0},
+       .version = {1, 4, 0},
        .module  = THIS_MODULE,
        .ctr     = snapshot_ctr,
        .dtr     = snapshot_dtr,
index 6fcbfb0633665a7c7b91d036b771cd997560e3de..3897b90bd462d852e0aec27a792be14655efa150 100644 (file)
@@ -3201,7 +3201,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
                                                metadata_low_callback,
                                                pool);
        if (r)
-               goto out_free_pt;
+               goto out_flags_changed;
 
        pt->callbacks.congested_fn = pool_is_congested;
        dm_table_add_target_callbacks(ti->table, &pt->callbacks);
index 6264781dc69a6066b88d719537c471b7d1cd7b27..1b5c6047e4f19882fbbe9facbc29aeee54dc8723 100644 (file)
@@ -1001,6 +1001,7 @@ static void end_clone_bio(struct bio *clone)
        struct dm_rq_target_io *tio = info->tio;
        struct bio *bio = info->orig;
        unsigned int nr_bytes = info->orig->bi_iter.bi_size;
+       int error = clone->bi_error;
 
        bio_put(clone);
 
@@ -1011,13 +1012,13 @@ static void end_clone_bio(struct bio *clone)
                 * the remainder.
                 */
                return;
-       else if (bio->bi_error) {
+       else if (error) {
                /*
                 * Don't notice the error to the upper layer yet.
                 * The error handling decision is made by the target driver,
                 * when the request is completed.
                 */
-               tio->error = bio->bi_error;
+               tio->error = error;
                return;
        }
 
@@ -2837,8 +2838,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
 
        might_sleep();
 
-       map = dm_get_live_table(md, &srcu_idx);
-
        spin_lock(&_minor_lock);
        idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
        set_bit(DMF_FREEING, &md->flags);
@@ -2852,14 +2851,14 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
         * do not race with internal suspend.
         */
        mutex_lock(&md->suspend_lock);
+       map = dm_get_live_table(md, &srcu_idx);
        if (!dm_suspended_md(md)) {
                dm_table_presuspend_targets(map);
                dm_table_postsuspend_targets(map);
        }
-       mutex_unlock(&md->suspend_lock);
-
        /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
        dm_put_live_table(md, srcu_idx);
+       mutex_unlock(&md->suspend_lock);
 
        /*
         * Rare, but there may be I/O requests still going to complete,
index 4f5ecbe94ccbf97c562d96930635c6aaff0550d3..c702de18207ae76ab56f1235ed5c98a9095ed050 100644 (file)
@@ -5409,9 +5409,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
                 * which will now never happen */
                wake_up_process(mddev->sync_thread->tsk);
 
+       if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
+               return -EBUSY;
        mddev_unlock(mddev);
        wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
                                          &mddev->recovery));
+       wait_event(mddev->sb_wait,
+                  !test_bit(MD_CHANGE_PENDING, &mddev->flags));
        mddev_lock_nointr(mddev);
 
        mutex_lock(&mddev->open_mutex);
@@ -8160,6 +8164,7 @@ void md_check_recovery(struct mddev *mddev)
                        md_reap_sync_thread(mddev);
                        clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
                        clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+                       clear_bit(MD_CHANGE_PENDING, &mddev->flags);
                        goto unlock;
                }
 
index d222522c52e077dcdd012fbf22d929fc6a7cb0ad..d132f06afdd1aa3140922f7965494087cf43eb7a 100644 (file)
@@ -470,8 +470,7 @@ static int multipath_run (struct mddev *mddev)
        return 0;
 
 out_free_conf:
-       if (conf->pool)
-               mempool_destroy(conf->pool);
+       mempool_destroy(conf->pool);
        kfree(conf->multipaths);
        kfree(conf);
        mddev->private = NULL;
index 63e619b2f44eb3ce51a90eb74980ed8a1f91c639..f8e5db0cb5aaae3038e67ec9f348f1faa4c64508 100644 (file)
@@ -376,12 +376,6 @@ static int raid0_run(struct mddev *mddev)
                struct md_rdev *rdev;
                bool discard_supported = false;
 
-               rdev_for_each(rdev, mddev) {
-                       disk_stack_limits(mddev->gendisk, rdev->bdev,
-                                         rdev->data_offset << 9);
-                       if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
-                               discard_supported = true;
-               }
                blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
                blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
                blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
@@ -390,6 +384,12 @@ static int raid0_run(struct mddev *mddev)
                blk_queue_io_opt(mddev->queue,
                                 (mddev->chunk_sectors << 9) * mddev->raid_disks);
 
+               rdev_for_each(rdev, mddev) {
+                       disk_stack_limits(mddev->gendisk, rdev->bdev,
+                                         rdev->data_offset << 9);
+                       if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
+                               discard_supported = true;
+               }
                if (!discard_supported)
                        queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
                else
index 4517f06c41bafe0fb2fbe2a5b454b68f012b2455..ddd8a5f572aa1c023db07b3eab6b72bc0371c1dd 100644 (file)
@@ -881,8 +881,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
        }
 
        if (bio && bio_data_dir(bio) == WRITE) {
-               if (bio->bi_iter.bi_sector >=
-                   conf->mddev->curr_resync_completed) {
+               if (bio->bi_iter.bi_sector >= conf->next_resync) {
                        if (conf->start_next_window == MaxSector)
                                conf->start_next_window =
                                        conf->next_resync +
@@ -1516,7 +1515,7 @@ static void close_sync(struct r1conf *conf)
        conf->r1buf_pool = NULL;
 
        spin_lock_irq(&conf->resync_lock);
-       conf->next_resync = 0;
+       conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE;
        conf->start_next_window = MaxSector;
        conf->current_window_requests +=
                conf->next_window_requests;
@@ -2383,8 +2382,8 @@ static void raid1d(struct md_thread *thread)
                }
                spin_unlock_irqrestore(&conf->device_lock, flags);
                while (!list_empty(&tmp)) {
-                       r1_bio = list_first_entry(&conf->bio_end_io_list,
-                                                 struct r1bio, retry_list);
+                       r1_bio = list_first_entry(&tmp, struct r1bio,
+                                                 retry_list);
                        list_del(&r1_bio->retry_list);
                        raid_end_bio_io(r1_bio);
                }
@@ -2843,8 +2842,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
 
  abort:
        if (conf) {
-               if (conf->r1bio_pool)
-                       mempool_destroy(conf->r1bio_pool);
+               mempool_destroy(conf->r1bio_pool);
                kfree(conf->mirrors);
                safe_put_page(conf->tmppage);
                kfree(conf->poolinfo);
@@ -2946,8 +2944,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
 {
        struct r1conf *conf = priv;
 
-       if (conf->r1bio_pool)
-               mempool_destroy(conf->r1bio_pool);
+       mempool_destroy(conf->r1bio_pool);
        kfree(conf->mirrors);
        safe_put_page(conf->tmppage);
        kfree(conf->poolinfo);
index 0fc33eb888551292bb37461f08d7e704483f93e6..9f69dc526f8cbf271f45b4d7745a442aa6324329 100644 (file)
@@ -2688,8 +2688,8 @@ static void raid10d(struct md_thread *thread)
                }
                spin_unlock_irqrestore(&conf->device_lock, flags);
                while (!list_empty(&tmp)) {
-                       r10_bio = list_first_entry(&conf->bio_end_io_list,
-                                                 struct r10bio, retry_list);
+                       r10_bio = list_first_entry(&tmp, struct r10bio,
+                                                  retry_list);
                        list_del(&r10_bio->retry_list);
                        raid_end_bio_io(r10_bio);
                }
@@ -3486,8 +3486,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
                printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
                       mdname(mddev));
        if (conf) {
-               if (conf->r10bio_pool)
-                       mempool_destroy(conf->r10bio_pool);
+               mempool_destroy(conf->r10bio_pool);
                kfree(conf->mirrors);
                safe_put_page(conf->tmppage);
                kfree(conf);
@@ -3682,8 +3681,7 @@ static int run(struct mddev *mddev)
 
 out_free_conf:
        md_unregister_thread(&mddev->thread);
-       if (conf->r10bio_pool)
-               mempool_destroy(conf->r10bio_pool);
+       mempool_destroy(conf->r10bio_pool);
        safe_put_page(conf->tmppage);
        kfree(conf->mirrors);
        kfree(conf);
@@ -3696,8 +3694,7 @@ static void raid10_free(struct mddev *mddev, void *priv)
 {
        struct r10conf *conf = priv;
 
-       if (conf->r10bio_pool)
-               mempool_destroy(conf->r10bio_pool);
+       mempool_destroy(conf->r10bio_pool);
        safe_put_page(conf->tmppage);
        kfree(conf->mirrors);
        kfree(conf->mirrors_old);
index 15ef2c641b2b93e96004d073463fdcfaaaaceab4..49bb8d3ff9be8c7741a5bebc6b210fde38989a09 100644 (file)
@@ -2271,8 +2271,7 @@ static void shrink_stripes(struct r5conf *conf)
               drop_one_stripe(conf))
                ;
 
-       if (conf->slab_cache)
-               kmem_cache_destroy(conf->slab_cache);
+       kmem_cache_destroy(conf->slab_cache);
        conf->slab_cache = NULL;
 }
 
@@ -3150,6 +3149,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                        spin_unlock_irq(&sh->stripe_lock);
                        if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
                                wake_up(&conf->wait_for_overlap);
+                       if (bi)
+                               s->to_read--;
                        while (bi && bi->bi_iter.bi_sector <
                               sh->dev[i].sector + STRIPE_SECTORS) {
                                struct bio *nextbi =
@@ -3169,6 +3170,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                 */
                clear_bit(R5_LOCKED, &sh->dev[i].flags);
        }
+       s->to_write = 0;
+       s->written = 0;
 
        if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
                if (atomic_dec_and_test(&conf->pending_full_writes))
@@ -3300,7 +3303,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
                 */
                return 0;
 
-       for (i = 0; i < s->failed; i++) {
+       for (i = 0; i < s->failed && i < 2; i++) {
                if (fdev[i]->towrite &&
                    !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
                    !test_bit(R5_OVERWRITE, &fdev[i]->flags))
@@ -3324,7 +3327,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
            sh->sector < sh->raid_conf->mddev->recovery_cp)
                /* reconstruct-write isn't being forced */
                return 0;
-       for (i = 0; i < s->failed; i++) {
+       for (i = 0; i < s->failed && i < 2; i++) {
                if (s->failed_num[i] != sh->pd_idx &&
                    s->failed_num[i] != sh->qd_idx &&
                    !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
index f28cb28a62f87073c214d47368355be6c29c6f25..2c7f8d7c0595e2d849183dfd0db91d2d945daa10 100644 (file)
@@ -42,6 +42,8 @@ int intel_lpss_resume(struct device *dev);
        .thaw = intel_lpss_resume,              \
        .poweroff = intel_lpss_suspend,         \
        .restore = intel_lpss_resume,
+#else
+#define INTEL_LPSS_SLEEP_PM_OPS
 #endif
 
 #define INTEL_LPSS_RUNTIME_PM_OPS              \
index c52162ea3d0ab1daf8bd375220669f3ba53db15e..586098f1b233a6d19da9e74e1d2dd2e396408635 100644 (file)
@@ -80,7 +80,7 @@ static int max77843_chg_init(struct max77693_dev *max77843)
        if (!max77843->i2c_chg) {
                dev_err(&max77843->i2c->dev,
                                "Cannot allocate I2C device for Charger\n");
-               return PTR_ERR(max77843->i2c_chg);
+               return -ENODEV;
        }
        i2c_set_clientdata(max77843->i2c_chg, max77843);
 
index 8af12c884b04eeb870d21ae2bb4bf15e0cfb76c4..103baf0e0c5bfd9aa23537adf12f6035bb427d86 100644 (file)
@@ -105,6 +105,7 @@ EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
 
 void cxl_free_afu_irqs(struct cxl_context *ctx)
 {
+       afu_irq_name_free(ctx);
        cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
 }
 EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
index e762f85ee233a4b510390aa0ce4a5a79266b84c3..2faa1270d085b15f92e185f8f389f5790390fbef 100644 (file)
@@ -275,6 +275,9 @@ static void reclaim_ctx(struct rcu_head *rcu)
        if (ctx->kernelapi)
                kfree(ctx->mapping);
 
+       if (ctx->irq_bitmap)
+               kfree(ctx->irq_bitmap);
+
        kfree(ctx);
 }
 
index 1c30ef77073d607cd250ade57c08b110fffd092f..0cfb9c129f273cbdf0a408c6b5d3008bd308596d 100644 (file)
@@ -677,6 +677,7 @@ int cxl_register_serr_irq(struct cxl_afu *afu);
 void cxl_release_serr_irq(struct cxl_afu *afu);
 int afu_register_irqs(struct cxl_context *ctx, u32 count);
 void afu_release_irqs(struct cxl_context *ctx, void *cookie);
+void afu_irq_name_free(struct cxl_context *ctx);
 irqreturn_t cxl_slice_irq_err(int irq, void *data);
 
 int cxl_debugfs_init(void);
index a30bf285b5bdd75c3f2b357d89dbab251bb98c7c..7ccd2998be92b8b3f7cdca2a0acbf3f9586d0f34 100644 (file)
@@ -120,9 +120,16 @@ int afu_release(struct inode *inode, struct file *file)
                 __func__, ctx->pe);
        cxl_context_detach(ctx);
 
-       mutex_lock(&ctx->mapping_lock);
-       ctx->mapping = NULL;
-       mutex_unlock(&ctx->mapping_lock);
+
+       /*
+        * Delete the context's mapping pointer, unless it's created by the
+        * kernel API, in which case leave it so it can be freed by reclaim_ctx()
+        */
+       if (!ctx->kernelapi) {
+               mutex_lock(&ctx->mapping_lock);
+               ctx->mapping = NULL;
+               mutex_unlock(&ctx->mapping_lock);
+       }
 
        put_device(&ctx->afu->dev);
 
index 583b42afeda2355da2e606f4fbc33546445a8df6..09a406058c4650ddf71114c26201889620003b9e 100644 (file)
@@ -414,7 +414,7 @@ void cxl_release_psl_irq(struct cxl_afu *afu)
        kfree(afu->psl_irq_name);
 }
 
-static void afu_irq_name_free(struct cxl_context *ctx)
+void afu_irq_name_free(struct cxl_context *ctx)
 {
        struct cxl_irq_name *irq_name, *tmp;
 
@@ -524,7 +524,5 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie)
        afu_irq_name_free(ctx);
        cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
 
-       kfree(ctx->irq_bitmap);
-       ctx->irq_bitmap = NULL;
        ctx->irq_count = 0;
 }
index b37f2e8004f5bcd58f970ea274ebd29ef4b1eae3..d2e75c88f4d2165762913c27c57e5d4487e431ad 100644 (file)
@@ -457,6 +457,7 @@ static int activate_afu_directed(struct cxl_afu *afu)
 
        dev_info(&afu->dev, "Activating AFU directed mode\n");
 
+       afu->num_procs = afu->max_procs_virtualised;
        if (afu->spa == NULL) {
                if (cxl_alloc_spa(afu))
                        return -ENOMEM;
@@ -468,7 +469,6 @@ static int activate_afu_directed(struct cxl_afu *afu)
        cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
 
        afu->current_mode = CXL_MODE_DIRECTED;
-       afu->num_procs = afu->max_procs_virtualised;
 
        if ((rc = cxl_chardev_m_afu_add(afu)))
                return rc;
index a5e977192b61f97bfbace09aa93f0b577be67bb7..85761d7eb333173040204a7a5593bf2c7cf06485 100644 (file)
@@ -1035,6 +1035,32 @@ static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
        return 0;
 }
 
+/*
+ * Workaround a PCIe Host Bridge defect on some cards, that can cause
+ * malformed Transaction Layer Packet (TLP) errors to be erroneously
+ * reported. Mask this error in the Uncorrectable Error Mask Register.
+ *
+ * The upper nibble of the PSL revision is used to distinguish between
+ * different cards. The affected ones have it set to 0.
+ */
+static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev)
+{
+       int aer;
+       u32 data;
+
+       if (adapter->psl_rev & 0xf000)
+               return;
+       if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)))
+               return;
+       pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data);
+       if (data & PCI_ERR_UNC_MALF_TLP)
+               if (data & PCI_ERR_UNC_INTN)
+                       return;
+       data |= PCI_ERR_UNC_MALF_TLP;
+       data |= PCI_ERR_UNC_INTN;
+       pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data);
+}
+
 static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
 {
        if (adapter->vsec_status & CXL_STATUS_SECOND_PORT)
@@ -1134,6 +1160,8 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
        if ((rc = cxl_vsec_looks_ok(adapter, dev)))
                return rc;
 
+       cxl_fixup_malformed_tlp(adapter, dev);
+
        if ((rc = setup_cxl_bars(dev)))
                return rc;
 
index 8eec887c8f701ce732a6278a49b1fbe298ca0635..6d7c188fb65c8ce288817e0ffb5728764e2ac133 100644 (file)
@@ -1209,7 +1209,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
                 * after the host receives the enum_resp
                 * message clients may be added or removed
                 */
-               if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS &&
+               if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS ||
                    dev->hbm_state >= MEI_HBM_STOPPED) {
                        dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n",
                                dev->dev_state, dev->hbm_state);
index 781e4db317671ce6146dea121a56f42f90e7c491..7fb0753abe3041bc1814ebc14c1136d103b254e1 100644 (file)
@@ -182,6 +182,7 @@ struct omap_hsmmc_host {
        struct  clk             *fclk;
        struct  clk             *dbclk;
        struct  regulator       *pbias;
+       bool                    pbias_enabled;
        void    __iomem         *base;
        int                     vqmmc_enabled;
        resource_size_t         mapbase;
@@ -328,20 +329,22 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
                        return ret;
                }
 
-               if (!regulator_is_enabled(host->pbias)) {
+               if (host->pbias_enabled == 0) {
                        ret = regulator_enable(host->pbias);
                        if (ret) {
                                dev_err(host->dev, "pbias reg enable fail\n");
                                return ret;
                        }
+                       host->pbias_enabled = 1;
                }
        } else {
-               if (regulator_is_enabled(host->pbias)) {
+               if (host->pbias_enabled == 1) {
                        ret = regulator_disable(host->pbias);
                        if (ret) {
                                dev_err(host->dev, "pbias reg disable fail\n");
                                return ret;
                        }
+                       host->pbias_enabled = 0;
                }
        }
 
@@ -475,7 +478,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
        mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc");
        if (IS_ERR(mmc->supply.vmmc)) {
                ret = PTR_ERR(mmc->supply.vmmc);
-               if (ret != -ENODEV)
+               if ((ret != -ENODEV) && host->dev->of_node)
                        return ret;
                dev_dbg(host->dev, "unable to get vmmc regulator %ld\n",
                        PTR_ERR(mmc->supply.vmmc));
@@ -490,7 +493,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
        mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux");
        if (IS_ERR(mmc->supply.vqmmc)) {
                ret = PTR_ERR(mmc->supply.vqmmc);
-               if (ret != -ENODEV)
+               if ((ret != -ENODEV) && host->dev->of_node)
                        return ret;
                dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n",
                        PTR_ERR(mmc->supply.vqmmc));
@@ -500,7 +503,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
        host->pbias = devm_regulator_get_optional(host->dev, "pbias");
        if (IS_ERR(host->pbias)) {
                ret = PTR_ERR(host->pbias);
-               if (ret != -ENODEV)
+               if ((ret != -ENODEV) && host->dev->of_node)
                        return ret;
                dev_dbg(host->dev, "unable to get pbias regulator %ld\n",
                        PTR_ERR(host->pbias));
@@ -2053,6 +2056,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
        host->base      = base + pdata->reg_offset;
        host->power_mode = MMC_POWER_OFF;
        host->next_data.cookie = 1;
+       host->pbias_enabled = 0;
        host->vqmmc_enabled = 0;
 
        ret = omap_hsmmc_gpio_init(mmc, host, pdata);
index d1556643a41d325abc7b7637ac94c694e778fedc..a0f05de5409f7d0c42f3d2457ff4dc1bb0e74f3c 100644 (file)
@@ -43,6 +43,7 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
 
 static const struct sdhci_pltfm_data soc_data_sama5d2 = {
        .ops = &sdhci_at91_sama5d2_ops,
+       .quirks2 = SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST,
 };
 
 static const struct of_device_id sdhci_at91_dt_match[] = {
index 946d37f94a31b29e8739304ec71cf7b1468eead6..f5edf9d3a18a2088a2b08705d876c413b1d659b5 100644 (file)
@@ -135,6 +135,7 @@ static int armada_38x_quirks(struct platform_device *pdev,
        struct sdhci_pxa *pxa = pltfm_host->priv;
        struct resource *res;
 
+       host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
        host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
                                           "conf-sdio3");
@@ -290,6 +291,9 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
                    uhs == MMC_TIMING_UHS_DDR50) {
                        reg_val &= ~SDIO3_CONF_CLK_INV;
                        reg_val |= SDIO3_CONF_SD_FB_CLK;
+               } else if (uhs == MMC_TIMING_MMC_HS) {
+                       reg_val &= ~SDIO3_CONF_CLK_INV;
+                       reg_val &= ~SDIO3_CONF_SD_FB_CLK;
                } else {
                        reg_val |= SDIO3_CONF_CLK_INV;
                        reg_val &= ~SDIO3_CONF_SD_FB_CLK;
@@ -398,7 +402,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
        if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
                ret = armada_38x_quirks(pdev, host);
                if (ret < 0)
-                       goto err_clk_get;
+                       goto err_mbus_win;
                ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
                if (ret < 0)
                        goto err_mbus_win;
index 64b7fdbd1a9ccab80034e8a38660ef944daf8bae..fbc7efdddcb5a4cb9c2726b91e2ee29acfa85f08 100644 (file)
@@ -1160,6 +1160,8 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
        host->mmc->actual_clock = 0;
 
        sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+       if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST)
+               mdelay(1);
 
        if (clock == 0)
                return;
index 7c02ff46c8ac3ecdaf37e792fd6bcb43c9bd029e..9d4aa31b683ac2d64e16f31f88d8d0893162a225 100644 (file)
@@ -412,6 +412,11 @@ struct sdhci_host {
 #define SDHCI_QUIRK2_ACMD23_BROKEN                     (1<<14)
 /* Broken Clock divider zero in controller */
 #define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN             (1<<15)
+/*
+ * When internal clock is disabled, a delay is needed before modifying the
+ * SD clock frequency or enabling back the internal clock.
+ */
+#define SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST      (1<<16)
 
        int irq;                /* Device IRQ */
        void __iomem *ioaddr;   /* Mapped address */
index 2426db88db36bf95f1f247eeae597ff69238c70d..f04445b992f512c537018b81bf0d685a3ee2f62b 100644 (file)
@@ -879,7 +879,7 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
                                      oob_chunk_size);
 
                /* the last chunk */
-               memcpy16_toio(&s[oob_chunk_size * sparebuf_size],
+               memcpy16_toio(&s[i * sparebuf_size],
                              &d[i * oob_chunk_size],
                              host->used_oobsize - i * oob_chunk_size);
        }
index f97a58d6aae1bbbacdb29ca86ac30c1f21d19e48..e7d333c162befd274f891b8674b5ca8fd905315e 100644 (file)
 #define NFC_ECC_MODE           GENMASK(15, 12)
 #define NFC_RANDOM_SEED                GENMASK(30, 16)
 
+/* NFC_USER_DATA helper macros */
+#define NFC_BUF_TO_USER_DATA(buf)      ((buf)[0] | ((buf)[1] << 8) | \
+                                       ((buf)[2] << 16) | ((buf)[3] << 24))
+
 #define NFC_DEFAULT_TIMEOUT_MS 1000
 
 #define NFC_SRAM_SIZE          1024
@@ -646,15 +650,9 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
                offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize;
 
                /* Fill OOB data in */
-               if (oob_required) {
-                       tmp = 0xffffffff;
-                       memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
-                                   4);
-               } else {
-                       memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE,
-                                   chip->oob_poi + offset - mtd->writesize,
-                                   4);
-               }
+               writel(NFC_BUF_TO_USER_DATA(chip->oob_poi +
+                                           layout->oobfree[i].offset),
+                      nfc->regs + NFC_REG_USER_DATA_BASE);
 
                chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
 
@@ -784,14 +782,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
                offset += ecc->size;
 
                /* Fill OOB data in */
-               if (oob_required) {
-                       tmp = 0xffffffff;
-                       memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
-                                   4);
-               } else {
-                       memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob,
-                                   4);
-               }
+               writel(NFC_BUF_TO_USER_DATA(oob),
+                      nfc->regs + NFC_REG_USER_DATA_BASE);
 
                tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR |
                      (1 << 30);
@@ -1389,6 +1381,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
                                        node);
                nand_release(&chip->mtd);
                sunxi_nand_ecc_cleanup(&chip->nand.ecc);
+               list_del(&chip->node);
        }
 }
 
index b9ebd0d18a522db0da8603f5cb401c40d5645386..f184fb5bd11046d814f0b03e122bc7419bc7cdf5 100644 (file)
@@ -298,8 +298,10 @@ config NLMON
 
 config NET_VRF
        tristate "Virtual Routing and Forwarding (Lite)"
-       depends on IP_MULTIPLE_TABLES && IPV6_MULTIPLE_TABLES
+       depends on IP_MULTIPLE_TABLES
        depends on NET_L3_MASTER_DEV
+       depends on IPV6 || IPV6=n
+       depends on IPV6_MULTIPLE_TABLES || IPV6=n
        ---help---
          This option enables the support for mapping interfaces into VRF's. The
          support enables VRF devices.
index d7fdea11e694c01345f55e955d29e1a2bbe7a8b2..20bfb9ba83ea23d38880676198e279513ed70123 100644 (file)
@@ -237,6 +237,8 @@ struct Outgoing {
                numsegs;        /* number of segments */
 };
 
+#define ARCNET_LED_NAME_SZ (IFNAMSIZ + 6)
+
 struct arcnet_local {
        uint8_t config,         /* current value of CONFIG register */
                timeout,        /* Extended timeout for COM20020 */
@@ -260,6 +262,13 @@ struct arcnet_local {
        /* On preemtive and SMB a lock is needed */
        spinlock_t lock;
 
+       struct led_trigger *tx_led_trig;
+       char tx_led_trig_name[ARCNET_LED_NAME_SZ];
+       struct led_trigger *recon_led_trig;
+       char recon_led_trig_name[ARCNET_LED_NAME_SZ];
+
+       struct timer_list       timer;
+
        /*
         * Buffer management: an ARCnet card has 4 x 512-byte buffers, each of
         * which can be used for either sending or receiving.  The new dynamic
@@ -309,6 +318,8 @@ struct arcnet_local {
                int (*reset)(struct net_device *dev, int really_reset);
                void (*open)(struct net_device *dev);
                void (*close)(struct net_device *dev);
+               void (*datatrigger) (struct net_device * dev, int enable);
+               void (*recontrigger) (struct net_device * dev, int enable);
 
                void (*copy_to_card)(struct net_device *dev, int bufnum,
                                     int offset, void *buf, int count);
@@ -319,6 +330,16 @@ struct arcnet_local {
        void __iomem *mem_start;        /* pointer to ioremap'ed MMIO */
 };
 
+enum arcnet_led_event {
+       ARCNET_LED_EVENT_RECON,
+       ARCNET_LED_EVENT_OPEN,
+       ARCNET_LED_EVENT_STOP,
+       ARCNET_LED_EVENT_TX,
+};
+
+void arcnet_led_event(struct net_device *netdev, enum arcnet_led_event event);
+void devm_arcnet_led_init(struct net_device *netdev, int index, int subid);
+
 #if ARCNET_DEBUG_MAX & D_SKB
 void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc);
 #else
index e41dd36fe832f6892f0cea1a9b8257cd8fa0fe9e..6ea963e3b89a1ab1c78ccb4d63c99a16e1bfbe0b 100644 (file)
@@ -52,6 +52,8 @@
 #include <linux/init.h>
 #include <linux/jiffies.h>
 
+#include <linux/leds.h>
+
 #include "arcdevice.h"
 #include "com9026.h"
 
@@ -189,6 +191,71 @@ static void arcnet_dump_packet(struct net_device *dev, int bufnum,
 
 #endif
 
+/* Trigger a LED event in response to a ARCNET device event */
+void arcnet_led_event(struct net_device *dev, enum arcnet_led_event event)
+{
+       struct arcnet_local *lp = netdev_priv(dev);
+       unsigned long led_delay = 350;
+       unsigned long tx_delay = 50;
+
+       switch (event) {
+       case ARCNET_LED_EVENT_RECON:
+               led_trigger_blink_oneshot(lp->recon_led_trig,
+                                         &led_delay, &led_delay, 0);
+               break;
+       case ARCNET_LED_EVENT_OPEN:
+               led_trigger_event(lp->tx_led_trig, LED_OFF);
+               led_trigger_event(lp->recon_led_trig, LED_OFF);
+               break;
+       case ARCNET_LED_EVENT_STOP:
+               led_trigger_event(lp->tx_led_trig, LED_OFF);
+               led_trigger_event(lp->recon_led_trig, LED_OFF);
+               break;
+       case ARCNET_LED_EVENT_TX:
+               led_trigger_blink_oneshot(lp->tx_led_trig,
+                                         &tx_delay, &tx_delay, 0);
+               break;
+       }
+}
+EXPORT_SYMBOL_GPL(arcnet_led_event);
+
+static void arcnet_led_release(struct device *gendev, void *res)
+{
+       struct arcnet_local *lp = netdev_priv(to_net_dev(gendev));
+
+       led_trigger_unregister_simple(lp->tx_led_trig);
+       led_trigger_unregister_simple(lp->recon_led_trig);
+}
+
+/* Register ARCNET LED triggers for a arcnet device
+ *
+ * This is normally called from a driver's probe function
+ */
+void devm_arcnet_led_init(struct net_device *netdev, int index, int subid)
+{
+       struct arcnet_local *lp = netdev_priv(netdev);
+       void *res;
+
+       res = devres_alloc(arcnet_led_release, 0, GFP_KERNEL);
+       if (!res) {
+               netdev_err(netdev, "cannot register LED triggers\n");
+               return;
+       }
+
+       snprintf(lp->tx_led_trig_name, sizeof(lp->tx_led_trig_name),
+                "arc%d-%d-tx", index, subid);
+       snprintf(lp->recon_led_trig_name, sizeof(lp->recon_led_trig_name),
+                "arc%d-%d-recon", index, subid);
+
+       led_trigger_register_simple(lp->tx_led_trig_name,
+                                   &lp->tx_led_trig);
+       led_trigger_register_simple(lp->recon_led_trig_name,
+                                   &lp->recon_led_trig);
+
+       devres_add(&netdev->dev, res);
+}
+EXPORT_SYMBOL_GPL(devm_arcnet_led_init);
+
 /* Unregister a protocol driver from the arc_proto_map.  Protocol drivers
  * are responsible for registering themselves, but the unregister routine
  * is pretty generic so we'll do it here.
@@ -314,6 +381,16 @@ static void arcdev_setup(struct net_device *dev)
        dev->flags = IFF_BROADCAST;
 }
 
+static void arcnet_timer(unsigned long data)
+{
+       struct net_device *dev = (struct net_device *)data;
+
+       if (!netif_carrier_ok(dev)) {
+               netif_carrier_on(dev);
+               netdev_info(dev, "link up\n");
+       }
+}
+
 struct net_device *alloc_arcdev(const char *name)
 {
        struct net_device *dev;
@@ -325,6 +402,9 @@ struct net_device *alloc_arcdev(const char *name)
                struct arcnet_local *lp = netdev_priv(dev);
 
                spin_lock_init(&lp->lock);
+               init_timer(&lp->timer);
+               lp->timer.data = (unsigned long) dev;
+               lp->timer.function = arcnet_timer;
        }
 
        return dev;
@@ -423,8 +503,11 @@ int arcnet_open(struct net_device *dev)
        lp->hw.intmask(dev, lp->intmask);
        arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
 
+       netif_carrier_off(dev);
        netif_start_queue(dev);
+       mod_timer(&lp->timer, jiffies + msecs_to_jiffies(1000));
 
+       arcnet_led_event(dev, ARCNET_LED_EVENT_OPEN);
        return 0;
 
  out_module_put:
@@ -438,7 +521,11 @@ int arcnet_close(struct net_device *dev)
 {
        struct arcnet_local *lp = netdev_priv(dev);
 
+       arcnet_led_event(dev, ARCNET_LED_EVENT_STOP);
+       del_timer_sync(&lp->timer);
+
        netif_stop_queue(dev);
+       netif_carrier_off(dev);
 
        /* flush TX and disable RX */
        lp->hw.intmask(dev, 0);
@@ -515,7 +602,7 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
        struct ArcProto *proto;
        int txbuf;
        unsigned long flags;
-       int freeskb, retval;
+       int retval;
 
        arc_printk(D_DURING, dev,
                   "transmit requested (status=%Xh, txbufs=%d/%d, len=%d, protocol %x)\n",
@@ -554,15 +641,13 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
                         *  the package later - forget about it now
                         */
                        dev->stats.tx_bytes += skb->len;
-                       freeskb = 1;
+                       dev_kfree_skb(skb);
                } else {
                        /* do it the 'split' way */
                        lp->outgoing.proto = proto;
                        lp->outgoing.skb = skb;
                        lp->outgoing.pkt = pkt;
 
-                       freeskb = 0;
-
                        if (proto->continue_tx &&
                            proto->continue_tx(dev, txbuf)) {
                                arc_printk(D_NORMAL, dev,
@@ -574,7 +659,6 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
                lp->next_tx = txbuf;
        } else {
                retval = NETDEV_TX_BUSY;
-               freeskb = 0;
        }
 
        arc_printk(D_DEBUG, dev, "%s: %d: %s, status: %x\n",
@@ -588,10 +672,9 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
        arc_printk(D_DEBUG, dev, "%s: %d: %s, status: %x\n",
                   __FILE__, __LINE__, __func__, lp->hw.status(dev));
 
-       spin_unlock_irqrestore(&lp->lock, flags);
-       if (freeskb)
-               dev_kfree_skb(skb);
+       arcnet_led_event(dev, ARCNET_LED_EVENT_TX);
 
+       spin_unlock_irqrestore(&lp->lock, flags);
        return retval;          /* no need to try again */
 }
 EXPORT_SYMBOL(arcnet_send_packet);
@@ -843,6 +926,13 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
 
                        arc_printk(D_RECON, dev, "Network reconfiguration detected (status=%Xh)\n",
                                   status);
+                       if (netif_carrier_ok(dev)) {
+                               netif_carrier_off(dev);
+                               netdev_info(dev, "link down\n");
+                       }
+                       mod_timer(&lp->timer, jiffies + msecs_to_jiffies(1000));
+
+                       arcnet_led_event(dev, ARCNET_LED_EVENT_RECON);
                        /* MYRECON bit is at bit 7 of diagstatus */
                        if (diagstatus & 0x80)
                                arc_printk(D_RECON, dev, "Put out that recon myself\n");
@@ -893,6 +983,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
                        lp->num_recons = lp->network_down = 0;
 
                        arc_printk(D_DURING, dev, "not recon: clearing counters anyway.\n");
+                       netif_carrier_on(dev);
                }
 
                if (didsomething)
index a12bf83be7502dd6e4262907a45dc05efb090b80..239de38fbd6a588bbb0e90e3452ea60ca1e5a161 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/pci.h>
 #include <linux/list.h>
 #include <linux/io.h>
+#include <linux/leds.h>
 
 #include "arcdevice.h"
 #include "com20020.h"
@@ -62,12 +63,43 @@ module_param(clockp, int, 0);
 module_param(clockm, int, 0);
 MODULE_LICENSE("GPL");
 
+static void led_tx_set(struct led_classdev *led_cdev,
+                            enum led_brightness value)
+{
+       struct com20020_dev *card;
+       struct com20020_priv *priv;
+       struct com20020_pci_card_info *ci;
+
+       card = container_of(led_cdev, struct com20020_dev, tx_led);
+
+       priv = card->pci_priv;
+       ci = priv->ci;
+
+       outb(!!value, priv->misc + ci->leds[card->index].green);
+}
+
+static void led_recon_set(struct led_classdev *led_cdev,
+                            enum led_brightness value)
+{
+       struct com20020_dev *card;
+       struct com20020_priv *priv;
+       struct com20020_pci_card_info *ci;
+
+       card = container_of(led_cdev, struct com20020_dev, recon_led);
+
+       priv = card->pci_priv;
+       ci = priv->ci;
+
+       outb(!!value, priv->misc + ci->leds[card->index].red);
+}
+
 static void com20020pci_remove(struct pci_dev *pdev);
 
 static int com20020pci_probe(struct pci_dev *pdev,
                             const struct pci_device_id *id)
 {
        struct com20020_pci_card_info *ci;
+       struct com20020_pci_channel_map *mm;
        struct net_device *dev;
        struct arcnet_local *lp;
        struct com20020_priv *priv;
@@ -84,9 +116,22 @@ static int com20020pci_probe(struct pci_dev *pdev,
 
        ci = (struct com20020_pci_card_info *)id->driver_data;
        priv->ci = ci;
+       mm = &ci->misc_map;
 
        INIT_LIST_HEAD(&priv->list_dev);
 
+       if (mm->size) {
+               ioaddr = pci_resource_start(pdev, mm->bar) + mm->offset;
+               r = devm_request_region(&pdev->dev, ioaddr, mm->size,
+                                       "com20020-pci");
+               if (!r) {
+                       pr_err("IO region %xh-%xh already allocated.\n",
+                              ioaddr, ioaddr + mm->size - 1);
+                       return -EBUSY;
+               }
+               priv->misc = ioaddr;
+       }
+
        for (i = 0; i < ci->devcount; i++) {
                struct com20020_pci_channel_map *cm = &ci->chan_map_tbl[i];
                struct com20020_dev *card;
@@ -96,6 +141,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
                        ret = -ENOMEM;
                        goto out_port;
                }
+               dev->dev_port = i;
 
                dev->netdev_ops = &com20020_netdev_ops;
 
@@ -131,6 +177,13 @@ static int com20020pci_probe(struct pci_dev *pdev,
                lp->timeout = timeout;
                lp->hw.owner = THIS_MODULE;
 
+               /* Get the dev_id from the PLX rotary coder */
+               if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
+                       dev->dev_id = 0xc;
+               dev->dev_id ^= inb(priv->misc + ci->rotary) >> 4;
+
+               snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
+
                if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) {
                        pr_err("IO address %Xh is empty!\n", ioaddr);
                        ret = -EIO;
@@ -148,14 +201,41 @@ static int com20020pci_probe(struct pci_dev *pdev,
 
                card->index = i;
                card->pci_priv = priv;
+               card->tx_led.brightness_set = led_tx_set;
+               card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
+                                               GFP_KERNEL, "arc%d-%d-tx",
+                                               dev->dev_id, i);
+               card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+                                               "pci:green:tx:%d-%d",
+                                               dev->dev_id, i);
+
+               card->tx_led.dev = &dev->dev;
+               card->recon_led.brightness_set = led_recon_set;
+               card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
+                                               GFP_KERNEL, "arc%d-%d-recon",
+                                               dev->dev_id, i);
+               card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+                                               "pci:red:recon:%d-%d",
+                                               dev->dev_id, i);
+               card->recon_led.dev = &dev->dev;
                card->dev = dev;
 
+               ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
+               if (ret)
+                       goto out_port;
+
+               ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
+               if (ret)
+                       goto out_port;
+
                dev_set_drvdata(&dev->dev, card);
 
                ret = com20020_found(dev, IRQF_SHARED);
                if (ret)
                        goto out_port;
 
+               devm_arcnet_led_init(dev, dev->dev_id, i);
+
                list_add(&card->list, &priv->list_dev);
        }
 
@@ -234,6 +314,18 @@ static struct com20020_pci_card_info card_info_eae_arc1 = {
                        .size = 0x08,
                },
        },
+       .misc_map = {
+               .bar = 2,
+               .offset = 0x10,
+               .size = 0x04,
+       },
+       .leds = {
+               {
+                       .green = 0x0,
+                       .red = 0x1,
+               },
+       },
+       .rotary = 0x0,
        .flags = ARC_CAN_10MBIT,
 };
 
@@ -251,6 +343,21 @@ static struct com20020_pci_card_info card_info_eae_ma1 = {
                        .size = 0x08,
                }
        },
+       .misc_map = {
+               .bar = 2,
+               .offset = 0x10,
+               .size = 0x04,
+       },
+       .leds = {
+               {
+                       .green = 0x0,
+                       .red = 0x1,
+               }, {
+                       .green = 0x2,
+                       .red = 0x3,
+               },
+       },
+       .rotary = 0x0,
        .flags = ARC_CAN_10MBIT,
 };
 
index c82f323a8c2b8fecd0f1c6a33d253db176088df5..13d9ad4b3f5c977e99f3ac2f38d3f244de3ae203 100644 (file)
@@ -118,7 +118,7 @@ int com20020_check(struct net_device *dev)
                arcnet_outb(STARTIOcmd, ioaddr, COM20020_REG_W_COMMAND);
        }
 
-       lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2) | SUB_NODE;
+       lp->config = (lp->timeout << 3) | (lp->backplane << 2) | SUB_NODE;
        /* set node ID to 0x42 (but transmitter is disabled, so it's okay) */
        arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
        arcnet_outb(0x42, ioaddr, COM20020_REG_W_XREG);
@@ -131,11 +131,6 @@ int com20020_check(struct net_device *dev)
        }
        arc_printk(D_INIT_REASONS, dev, "status after reset: %X\n", status);
 
-       /* Enable TX */
-       lp->config |= TXENcfg;
-       arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
-       arcnet_outb(arcnet_inb(ioaddr, 8), ioaddr, COM20020_REG_W_XREG);
-
        arcnet_outb(CFLAGScmd | RESETclear | CONFIGclear,
                    ioaddr, COM20020_REG_W_COMMAND);
        status = arcnet_inb(ioaddr, COM20020_REG_R_STATUS);
@@ -169,9 +164,33 @@ static int com20020_set_hwaddr(struct net_device *dev, void *addr)
        return 0;
 }
 
+static int com20020_netdev_open(struct net_device *dev)
+{
+       int ioaddr = dev->base_addr;
+       struct arcnet_local *lp = netdev_priv(dev);
+
+       lp->config |= TXENcfg;
+       arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
+
+       return arcnet_open(dev);
+}
+
+static int com20020_netdev_close(struct net_device *dev)
+{
+       int ioaddr = dev->base_addr;
+       struct arcnet_local *lp = netdev_priv(dev);
+
+       arcnet_close(dev);
+
+       /* disable transmitter */
+       lp->config &= ~TXENcfg;
+       arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
+       return 0;
+}
+
 const struct net_device_ops com20020_netdev_ops = {
-       .ndo_open       = arcnet_open,
-       .ndo_stop       = arcnet_close,
+       .ndo_open       = com20020_netdev_open,
+       .ndo_stop       = com20020_netdev_close,
        .ndo_start_xmit = arcnet_send_packet,
        .ndo_tx_timeout = arcnet_timeout,
        .ndo_set_mac_address = com20020_set_hwaddr,
@@ -215,7 +234,7 @@ int com20020_found(struct net_device *dev, int shared)
                arcnet_outb(STARTIOcmd, ioaddr, COM20020_REG_W_COMMAND);
        }
 
-       lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2) | SUB_NODE;
+       lp->config = (lp->timeout << 3) | (lp->backplane << 2) | SUB_NODE;
        /* Default 0x38 + register: Node ID */
        arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
        arcnet_outb(dev->dev_addr[0], ioaddr, COM20020_REG_W_XREG);
@@ -274,7 +293,7 @@ static int com20020_reset(struct net_device *dev, int really_reset)
                   dev->name, arcnet_inb(ioaddr, COM20020_REG_R_STATUS));
 
        arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
-       lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2);
+       lp->config |= (lp->timeout << 3) | (lp->backplane << 2);
        /* power-up defaults */
        arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
        arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
index 22a460f39fb93854022aacfca639287aa11958b8..0bcc5d0a6903c34e305bab525b4989d30d912f59 100644 (file)
@@ -26,6 +26,7 @@
  */
 #ifndef __COM20020_H
 #define __COM20020_H
+#include <linux/leds.h>
 
 int com20020_check(struct net_device *dev);
 int com20020_found(struct net_device *dev, int shared);
@@ -36,6 +37,11 @@ extern const struct net_device_ops com20020_netdev_ops;
 
 #define PLX_PCI_MAX_CARDS 2
 
+struct ledoffsets {
+       int green;
+       int red;
+};
+
 struct com20020_pci_channel_map {
        u32 bar;
        u32 offset;
@@ -47,6 +53,10 @@ struct com20020_pci_card_info {
        int devcount;
 
        struct com20020_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CARDS];
+       struct com20020_pci_channel_map misc_map;
+
+       struct ledoffsets leds[PLX_PCI_MAX_CARDS];
+       int rotary;
 
        unsigned int flags;
 };
@@ -54,12 +64,16 @@ struct com20020_pci_card_info {
 struct com20020_priv {
        struct com20020_pci_card_info *ci;
        struct list_head list_dev;
+       resource_size_t misc;
 };
 
 struct com20020_dev {
        struct list_head list;
        struct net_device *dev;
 
+       struct led_classdev tx_led;
+       struct led_classdev recon_led;
+
        struct com20020_priv *pci_priv;
        int index;
 };
index 90f2615428c017f6616231e110a49ec3b914901a..d0f23cd6e236b0a51d3879ab8010537827d3424c 100644 (file)
@@ -1071,7 +1071,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
                                 NETIF_F_HIGHDMA | NETIF_F_LRO)
 
 #define BOND_ENC_FEATURES      (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |\
-                                NETIF_F_TSO)
+                                NETIF_F_ALL_TSO)
 
 static void bond_compute_features(struct bonding *bond)
 {
index 945c0955a9675198a8b0945ddc49dd6668380838..8b3275d7792acbab2d0ba9efe9fe3e2a6b283231 100644 (file)
@@ -8,15 +8,6 @@
  * Public License ("GPL") version 2 as distributed in the 'COPYING'
  * file from the main directory of the linux kernel source.
  *
- *
- * Your platform definition file should specify something like:
- *
- * static struct at91_can_data ek_can_data = {
- *     transceiver_switch = sam9263ek_transceiver_switch,
- * };
- *
- * at91_add_device_can(&ek_can_data);
- *
  */
 
 #include <linux/clk.h>
@@ -33,7 +24,6 @@
 #include <linux/spinlock.h>
 #include <linux/string.h>
 #include <linux/types.h>
-#include <linux/platform_data/atmel.h>
 
 #include <linux/can/dev.h>
 #include <linux/can/error.h>
@@ -324,15 +314,6 @@ static inline u32 at91_can_id_to_reg_mid(canid_t can_id)
        return reg_mid;
 }
 
-/*
- * Swtich transceiver on or off
- */
-static void at91_transceiver_switch(const struct at91_priv *priv, int on)
-{
-       if (priv->pdata && priv->pdata->transceiver_switch)
-               priv->pdata->transceiver_switch(on);
-}
-
 static void at91_setup_mailboxes(struct net_device *dev)
 {
        struct at91_priv *priv = netdev_priv(dev);
@@ -416,7 +397,6 @@ static void at91_chip_start(struct net_device *dev)
 
        at91_set_bittiming(dev);
        at91_setup_mailboxes(dev);
-       at91_transceiver_switch(priv, 1);
 
        /* enable chip */
        if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
@@ -444,7 +424,6 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state)
        reg_mr = at91_read(priv, AT91_MR);
        at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
 
-       at91_transceiver_switch(priv, 0);
        priv->can.state = state;
 }
 
index e5fac368068a2320eb06d207934c4b969356237b..131026fbc2d77cbc3ccb5903daa10f8920f8ae17 100644 (file)
@@ -87,6 +87,7 @@ static const struct pci_device_id peak_pci_tbl[] = {
        {PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
        {PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
        {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+       {PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,},
 #ifdef CONFIG_CAN_PEAK_PCIEC
        {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
        {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
index 10d8497635e872fc808e8e26489a70f788eae073..d9a42c6467836cdf3aa00adc1d5a4f059b97469e 100644 (file)
@@ -601,7 +601,7 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
                stats->tx_errors++;
                if (likely(skb)) {
                        cf->can_id |= CAN_ERR_LOSTARB;
-                       cf->data[0] = (alc & 0x1f) >> 8;
+                       cf->data[0] = (alc >> 8) & 0x1f;
                }
        }
 
@@ -854,4 +854,4 @@ module_platform_driver(sun4i_can_driver);
 MODULE_AUTHOR("Peter Chen <xingkongcp@gmail.com>");
 MODULE_AUTHOR("Gerhard Bertelsmann <info@gerhard-bertelsmann.de>");
 MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION(DRV_NAME "CAN driver for Allwinner SoCs (A10/A20)");
+MODULE_DESCRIPTION("CAN driver for Allwinner SoCs (A10/A20)");
index 9d56515f4c4da8ef307ff68ccb438a5e7742ceea..6f946fedbb77c1943770b31665a0f5e1e6e4d889 100644 (file)
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
+#include <linux/of_net.h>
 #include <net/dsa.h>
 #include <linux/ethtool.h>
 #include <linux/if_bridge.h>
 #include <linux/brcmphy.h>
+#include <linux/etherdevice.h>
+#include <net/switchdev.h>
 
 #include "bcm_sf2.h"
 #include "bcm_sf2_regs.h"
@@ -264,6 +267,50 @@ static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
        }
 }
 
+static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
+                                           int port)
+{
+       unsigned int off;
+
+       switch (port) {
+       case 7:
+               off = P7_IRQ_OFF;
+               break;
+       case 0:
+               /* Port 0 interrupts are located on the first bank */
+               intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
+               return;
+       default:
+               off = P_IRQ_OFF(port);
+               break;
+       }
+
+       intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
+}
+
+static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
+                                            int port)
+{
+       unsigned int off;
+
+       switch (port) {
+       case 7:
+               off = P7_IRQ_OFF;
+               break;
+       case 0:
+               /* Port 0 interrupts are located on the first bank */
+               intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
+               intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
+               return;
+       default:
+               off = P_IRQ_OFF(port);
+               break;
+       }
+
+       intrl2_1_mask_set(priv, P_IRQ_MASK(off));
+       intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
+}
+
 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
                              struct phy_device *phy)
 {
@@ -280,7 +327,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
        core_writel(priv, 0, CORE_G_PCTL_PORT(port));
 
        /* Re-enable the GPHY and re-apply workarounds */
-       if (port == 0 && priv->hw_params.num_gphy == 1) {
+       if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
                bcm_sf2_gphy_enable_set(ds, true);
                if (phy) {
                        /* if phy_stop() has been called before, phy
@@ -297,9 +344,9 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
                }
        }
 
-       /* Enable port 7 interrupts to get notified */
-       if (port == 7)
-               intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF));
+       /* Enable MoCA port interrupts to get notified */
+       if (port == priv->moca_port)
+               bcm_sf2_port_intr_enable(priv, port);
 
        /* Set this port, and only this one to be in the default VLAN,
         * if member of a bridge, restore its membership prior to
@@ -329,12 +376,10 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
        if (priv->wol_ports_mask & (1 << port))
                return;
 
-       if (port == 7) {
-               intrl2_1_mask_set(priv, P_IRQ_MASK(P7_IRQ_OFF));
-               intrl2_1_writel(priv, P_IRQ_MASK(P7_IRQ_OFF), INTRL2_CPU_CLEAR);
-       }
+       if (port == priv->moca_port)
+               bcm_sf2_port_intr_disable(priv, port);
 
-       if (port == 0 && priv->hw_params.num_gphy == 1)
+       if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
                bcm_sf2_gphy_enable_set(ds, false);
 
        if (dsa_is_cpu_port(ds, port))
@@ -555,6 +600,236 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
        return 0;
 }
 
+/* Address Resolution Logic routines */
+static int bcm_sf2_arl_op_wait(struct bcm_sf2_priv *priv)
+{
+       unsigned int timeout = 10;
+       u32 reg;
+
+       do {
+               reg = core_readl(priv, CORE_ARLA_RWCTL);
+               if (!(reg & ARL_STRTDN))
+                       return 0;
+
+               usleep_range(1000, 2000);
+       } while (timeout--);
+
+       return -ETIMEDOUT;
+}
+
+static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv *priv, unsigned int op)
+{
+       u32 cmd;
+
+       if (op > ARL_RW)
+               return -EINVAL;
+
+       cmd = core_readl(priv, CORE_ARLA_RWCTL);
+       cmd &= ~IVL_SVL_SELECT;
+       cmd |= ARL_STRTDN;
+       if (op)
+               cmd |= ARL_RW;
+       else
+               cmd &= ~ARL_RW;
+       core_writel(priv, cmd, CORE_ARLA_RWCTL);
+
+       return bcm_sf2_arl_op_wait(priv);
+}
+
+static int bcm_sf2_arl_read(struct bcm_sf2_priv *priv, u64 mac,
+                           u16 vid, struct bcm_sf2_arl_entry *ent, u8 *idx,
+                           bool is_valid)
+{
+       unsigned int i;
+       int ret;
+
+       ret = bcm_sf2_arl_op_wait(priv);
+       if (ret)
+               return ret;
+
+       /* Read the 4 bins */
+       for (i = 0; i < 4; i++) {
+               u64 mac_vid;
+               u32 fwd_entry;
+
+               mac_vid = core_readq(priv, CORE_ARLA_MACVID_ENTRY(i));
+               fwd_entry = core_readl(priv, CORE_ARLA_FWD_ENTRY(i));
+               bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
+
+               if (ent->is_valid && is_valid) {
+                       *idx = i;
+                       return 0;
+               }
+
+               /* This is the MAC we just deleted */
+               if (!is_valid && (mac_vid & mac))
+                       return 0;
+       }
+
+       return -ENOENT;
+}
+
+static int bcm_sf2_arl_op(struct bcm_sf2_priv *priv, int op, int port,
+                         const unsigned char *addr, u16 vid, bool is_valid)
+{
+       struct bcm_sf2_arl_entry ent;
+       u32 fwd_entry;
+       u64 mac, mac_vid = 0;
+       u8 idx = 0;
+       int ret;
+
+       /* Convert the array into a 64-bit MAC */
+       mac = bcm_sf2_mac_to_u64(addr);
+
+       /* Perform a read for the given MAC and VID */
+       core_writeq(priv, mac, CORE_ARLA_MAC);
+       core_writel(priv, vid, CORE_ARLA_VID);
+
+       /* Issue a read operation for this MAC */
+       ret = bcm_sf2_arl_rw_op(priv, 1);
+       if (ret)
+               return ret;
+
+       ret = bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
+       /* If this is a read, just finish now */
+       if (op)
+               return ret;
+
+       /* We could not find a matching MAC, so reset to a new entry */
+       if (ret) {
+               fwd_entry = 0;
+               idx = 0;
+       }
+
+       memset(&ent, 0, sizeof(ent));
+       ent.port = port;
+       ent.is_valid = is_valid;
+       ent.vid = vid;
+       ent.is_static = true;
+       memcpy(ent.mac, addr, ETH_ALEN);
+       bcm_sf2_arl_from_entry(&mac_vid, &fwd_entry, &ent);
+
+       core_writeq(priv, mac_vid, CORE_ARLA_MACVID_ENTRY(idx));
+       core_writel(priv, fwd_entry, CORE_ARLA_FWD_ENTRY(idx));
+
+       ret = bcm_sf2_arl_rw_op(priv, 0);
+       if (ret)
+               return ret;
+
+       /* Re-read the entry to check */
+       return bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
+}
+
+static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port,
+                                 const struct switchdev_obj_port_fdb *fdb,
+                                 struct switchdev_trans *trans)
+{
+       /* We do not need to do anything specific here yet */
+       return 0;
+}
+
+static int bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
+                             const struct switchdev_obj_port_fdb *fdb,
+                             struct switchdev_trans *trans)
+{
+       struct bcm_sf2_priv *priv = ds_to_priv(ds);
+
+       return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true);
+}
+
+static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port,
+                             const struct switchdev_obj_port_fdb *fdb)
+{
+       struct bcm_sf2_priv *priv = ds_to_priv(ds);
+
+       return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
+}
+
+static int bcm_sf2_arl_search_wait(struct bcm_sf2_priv *priv)
+{
+       unsigned timeout = 1000;
+       u32 reg;
+
+       do {
+               reg = core_readl(priv, CORE_ARLA_SRCH_CTL);
+               if (!(reg & ARLA_SRCH_STDN))
+                       return 0;
+
+               if (reg & ARLA_SRCH_VLID)
+                       return 0;
+
+               usleep_range(1000, 2000);
+       } while (timeout--);
+
+       return -ETIMEDOUT;
+}
+
+static void bcm_sf2_arl_search_rd(struct bcm_sf2_priv *priv, u8 idx,
+                                 struct bcm_sf2_arl_entry *ent)
+{
+       u64 mac_vid;
+       u32 fwd_entry;
+
+       mac_vid = core_readq(priv, CORE_ARLA_SRCH_RSLT_MACVID(idx));
+       fwd_entry = core_readl(priv, CORE_ARLA_SRCH_RSLT(idx));
+       bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
+}
+
+static int bcm_sf2_sw_fdb_copy(struct net_device *dev, int port,
+                              const struct bcm_sf2_arl_entry *ent,
+                              struct switchdev_obj_port_fdb *fdb,
+                              int (*cb)(struct switchdev_obj *obj))
+{
+       if (!ent->is_valid)
+               return 0;
+
+       if (port != ent->port)
+               return 0;
+
+       ether_addr_copy(fdb->addr, ent->mac);
+       fdb->vid = ent->vid;
+       fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE;
+
+       return cb(&fdb->obj);
+}
+
+static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
+                              struct switchdev_obj_port_fdb *fdb,
+                              int (*cb)(struct switchdev_obj *obj))
+{
+       struct bcm_sf2_priv *priv = ds_to_priv(ds);
+       struct net_device *dev = ds->ports[port];
+       struct bcm_sf2_arl_entry results[2];
+       unsigned int count = 0;
+       int ret;
+
+       /* Start search operation */
+       core_writel(priv, ARLA_SRCH_STDN, CORE_ARLA_SRCH_CTL);
+
+       do {
+               ret = bcm_sf2_arl_search_wait(priv);
+               if (ret)
+                       return ret;
+
+               /* Read both entries, then return their values back */
+               bcm_sf2_arl_search_rd(priv, 0, &results[0]);
+               ret = bcm_sf2_sw_fdb_copy(dev, port, &results[0], fdb, cb);
+               if (ret)
+                       return ret;
+
+               bcm_sf2_arl_search_rd(priv, 1, &results[1]);
+               ret = bcm_sf2_sw_fdb_copy(dev, port, &results[1], fdb, cb);
+               if (ret)
+                       return ret;
+
+               if (!results[0].is_valid && !results[1].is_valid)
+                       break;
+
+       } while (count++ < CORE_ARLA_NUM_ENTRIES);
+
+       return 0;
+}
+
 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
 {
        struct bcm_sf2_priv *priv = dev_id;
@@ -615,6 +890,42 @@ static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
        intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
 }
 
+static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
+                                  struct device_node *dn)
+{
+       struct device_node *port;
+       const char *phy_mode_str;
+       int mode;
+       unsigned int port_num;
+       int ret;
+
+       priv->moca_port = -1;
+
+       for_each_available_child_of_node(dn, port) {
+               if (of_property_read_u32(port, "reg", &port_num))
+                       continue;
+
+               /* Internal PHYs get assigned a specific 'phy-mode' property
+                * value: "internal" to help flag them before MDIO probing
+                * has completed, since they might be turned off at that
+                * time
+                */
+               mode = of_get_phy_mode(port);
+               if (mode < 0) {
+                       ret = of_property_read_string(port, "phy-mode",
+                                                     &phy_mode_str);
+                       if (ret < 0)
+                               continue;
+
+                       if (!strcasecmp(phy_mode_str, "internal"))
+                               priv->int_phy_mask |= 1 << port_num;
+               }
+
+               if (mode == PHY_INTERFACE_MODE_MOCA)
+                       priv->moca_port = port_num;
+       }
+}
+
 static int bcm_sf2_sw_setup(struct dsa_switch *ds)
 {
        const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
@@ -633,6 +944,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
         * level
         */
        dn = ds->pd->of_node->parent;
+       bcm_sf2_identify_ports(priv, ds->pd->of_node);
 
        priv->irq0 = irq_of_parse_and_map(dn, 0);
        priv->irq1 = irq_of_parse_and_map(dn, 1);
@@ -913,7 +1225,7 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
 
        status->link = 0;
 
-       /* Port 7 is special as we do not get link status from CORE_LNKSTS,
+       /* MoCA port is special as we do not get link status from CORE_LNKSTS,
         * which means that we need to force the link at the port override
         * level to get the data to flow. We do use what the interrupt handler
         * did determine before.
@@ -921,7 +1233,7 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
         * For the other ports, we just force the link status, since this is
         * a fixed PHY device.
         */
-       if (port == 7) {
+       if (port == priv->moca_port) {
                status->link = priv->port_sts[port].link;
                /* For MoCA interfaces, also force a link down notification
                 * since some version of the user-space daemon (mocad) use
@@ -1076,6 +1388,10 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = {
        .port_join_bridge       = bcm_sf2_sw_br_join,
        .port_leave_bridge      = bcm_sf2_sw_br_leave,
        .port_stp_update        = bcm_sf2_sw_br_set_stp_state,
+       .port_fdb_prepare       = bcm_sf2_sw_fdb_prepare,
+       .port_fdb_add           = bcm_sf2_sw_fdb_add,
+       .port_fdb_del           = bcm_sf2_sw_fdb_del,
+       .port_fdb_dump          = bcm_sf2_sw_fdb_dump,
 };
 
 static int __init bcm_sf2_init(void)
index 789d7b7737da4ada78f06850eb835cd437c1040a..6bba1c98d764cf2b4222c82211677e30c448a4f0 100644 (file)
@@ -19,6 +19,8 @@
 #include <linux/mutex.h>
 #include <linux/mii.h>
 #include <linux/ethtool.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
 
 #include <net/dsa.h>
 
@@ -50,6 +52,60 @@ struct bcm_sf2_port_status {
        u32 vlan_ctl_mask;
 };
 
+struct bcm_sf2_arl_entry {
+       u8 port;
+       u8 mac[ETH_ALEN];
+       u16 vid;
+       u8 is_valid:1;
+       u8 is_age:1;
+       u8 is_static:1;
+};
+
+static inline void bcm_sf2_mac_from_u64(u64 src, u8 *dst)
+{
+       unsigned int i;
+
+       for (i = 0; i < ETH_ALEN; i++)
+               dst[ETH_ALEN - 1 - i] = (src >> (8 * i)) & 0xff;
+}
+
+static inline u64 bcm_sf2_mac_to_u64(const u8 *src)
+{
+       unsigned int i;
+       u64 dst = 0;
+
+       for (i = 0; i < ETH_ALEN; i++)
+               dst |= (u64)src[ETH_ALEN - 1 - i] << (8 * i);
+
+       return dst;
+}
+
+static inline void bcm_sf2_arl_to_entry(struct bcm_sf2_arl_entry *ent,
+                                       u64 mac_vid, u32 fwd_entry)
+{
+       memset(ent, 0, sizeof(*ent));
+       ent->port = fwd_entry & PORTID_MASK;
+       ent->is_valid = !!(fwd_entry & ARL_VALID);
+       ent->is_age = !!(fwd_entry & ARL_AGE);
+       ent->is_static = !!(fwd_entry & ARL_STATIC);
+       bcm_sf2_mac_from_u64(mac_vid, ent->mac);
+       ent->vid = mac_vid >> VID_SHIFT;
+}
+
+static inline void bcm_sf2_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
+                                         const struct bcm_sf2_arl_entry *ent)
+{
+       *mac_vid = bcm_sf2_mac_to_u64(ent->mac);
+       *mac_vid |= (u64)(ent->vid & VID_MASK) << VID_SHIFT;
+       *fwd_entry = ent->port & PORTID_MASK;
+       if (ent->is_valid)
+               *fwd_entry |= ARL_VALID;
+       if (ent->is_static)
+               *fwd_entry |= ARL_STATIC;
+       if (ent->is_age)
+               *fwd_entry |= ARL_AGE;
+}
+
 struct bcm_sf2_priv {
        /* Base registers, keep those in order with BCM_SF2_REGS_NAME */
        void __iomem                    *core;
@@ -78,6 +134,12 @@ struct bcm_sf2_priv {
 
        /* Mask of ports enabled for Wake-on-LAN */
        u32                             wol_ports_mask;
+
+       /* MoCA port location */
+       int                             moca_port;
+
+       /* Bitmask of ports having an integrated PHY */
+       unsigned int                    int_phy_mask;
 };
 
 struct bcm_sf2_hw_stats {
index fa4e6e78c9ea75526bec5d6ed7356053753fb6cb..97780d43b5c0b6bc37439cf3f48cb5e8928c1e80 100644 (file)
 #define CORE_BRCM_HDR_RX_DIS           0x0980
 #define CORE_BRCM_HDR_TX_DIS           0x0988
 
+#define CORE_ARLA_NUM_ENTRIES          1024
+
+#define CORE_ARLA_RWCTL                        0x1400
+#define  ARL_RW                                (1 << 0)
+#define  IVL_SVL_SELECT                        (1 << 6)
+#define  ARL_STRTDN                    (1 << 7)
+
+#define CORE_ARLA_MAC                  0x1408
+#define CORE_ARLA_VID                  0x1420
+#define  ARLA_VIDTAB_INDX_MASK         0x1fff
+
+#define CORE_ARLA_MACVID0              0x1440
+#define  MAC_MASK                      0xffffffffff
+#define  VID_SHIFT                     48
+#define  VID_MASK                      0xfff
+
+#define CORE_ARLA_FWD_ENTRY0           0x1460
+#define  PORTID_MASK                   0x1ff
+#define  ARL_CON_SHIFT                 9
+#define  ARL_CON_MASK                  0x3
+#define  ARL_PRI_SHIFT                 11
+#define  ARL_PRI_MASK                  0x7
+#define  ARL_AGE                       (1 << 14)
+#define  ARL_STATIC                    (1 << 15)
+#define  ARL_VALID                     (1 << 16)
+
+#define CORE_ARLA_MACVID_ENTRY(x)      (CORE_ARLA_MACVID0 + ((x) * 0x40))
+#define CORE_ARLA_FWD_ENTRY(x)         (CORE_ARLA_FWD_ENTRY0 + ((x) * 0x40))
+
+#define CORE_ARLA_SRCH_CTL             0x1540
+#define  ARLA_SRCH_VLID                        (1 << 0)
+#define  IVL_SVL_SELECT                        (1 << 6)
+#define  ARLA_SRCH_STDN                        (1 << 7)
+
+#define CORE_ARLA_SRCH_ADR             0x1544
+#define  ARLA_SRCH_ADR_VALID           (1 << 15)
+
+#define CORE_ARLA_SRCH_RSLT_0_MACVID   0x1580
+#define CORE_ARLA_SRCH_RSLT_0          0x15a0
+
+#define CORE_ARLA_SRCH_RSLT_MACVID(x)  (CORE_ARLA_SRCH_RSLT_0_MACVID + ((x) * 0x40))
+#define CORE_ARLA_SRCH_RSLT(x)         (CORE_ARLA_SRCH_RSLT_0 + ((x) * 0x40))
+
 #define CORE_MEM_PSM_VDD_CTRL          0x2380
 #define  P_TXQ_PSM_VDD_SHIFT           2
 #define  P_TXQ_PSM_VDD_MASK            0x3
index c29aebe1e62b59801ca7bb2aed36f9c502733fbf..9093577755f69bac2c548518922b1b63215a62a6 100644 (file)
@@ -26,7 +26,7 @@ static int reg_read(struct dsa_switch *ds, int addr, int reg)
        if (bus == NULL)
                return -EINVAL;
 
-       return mdiobus_read(bus, ds->pd->sw_addr + addr, reg);
+       return mdiobus_read_nested(bus, ds->pd->sw_addr + addr, reg);
 }
 
 #define REG_READ(addr, reg)                                    \
@@ -47,7 +47,7 @@ static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
        if (bus == NULL)
                return -EINVAL;
 
-       return mdiobus_write(bus, ds->pd->sw_addr + addr, reg, val);
+       return mdiobus_write_nested(bus, ds->pd->sw_addr + addr, reg, val);
 }
 
 #define REG_WRITE(addr, reg, val)                              \
index ca3330aec7402759e08eb6162392f387cfc8b609..2c8eb6f76ebe02a54f71539d3799f21f879e01f8 100644 (file)
@@ -113,8 +113,6 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
 #endif
        .get_regs_len           = mv88e6xxx_get_regs_len,
        .get_regs               = mv88e6xxx_get_regs,
-       .port_join_bridge       = mv88e6xxx_join_bridge,
-       .port_leave_bridge      = mv88e6xxx_leave_bridge,
        .port_stp_update        = mv88e6xxx_port_stp_update,
        .port_pvid_get          = mv88e6xxx_port_pvid_get,
        .port_pvid_set          = mv88e6xxx_port_pvid_set,
@@ -124,7 +122,7 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
        .port_fdb_prepare       = mv88e6xxx_port_fdb_prepare,
        .port_fdb_add           = mv88e6xxx_port_fdb_add,
        .port_fdb_del           = mv88e6xxx_port_fdb_del,
-       .port_fdb_getnext       = mv88e6xxx_port_fdb_getnext,
+       .port_fdb_dump          = mv88e6xxx_port_fdb_dump,
 };
 
 MODULE_ALIAS("platform:mv88e6171");
index 078a358c1b8322c9e67199bcefe45ebfd2935061..cbf4dd8721a6a27e4fdd0ed7963304472486cda8 100644 (file)
@@ -340,8 +340,6 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
        .set_eeprom             = mv88e6352_set_eeprom,
        .get_regs_len           = mv88e6xxx_get_regs_len,
        .get_regs               = mv88e6xxx_get_regs,
-       .port_join_bridge       = mv88e6xxx_join_bridge,
-       .port_leave_bridge      = mv88e6xxx_leave_bridge,
        .port_stp_update        = mv88e6xxx_port_stp_update,
        .port_pvid_get          = mv88e6xxx_port_pvid_get,
        .port_pvid_set          = mv88e6xxx_port_pvid_set,
@@ -351,7 +349,7 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
        .port_fdb_prepare       = mv88e6xxx_port_fdb_prepare,
        .port_fdb_add           = mv88e6xxx_port_fdb_add,
        .port_fdb_del           = mv88e6xxx_port_fdb_del,
-       .port_fdb_getnext       = mv88e6xxx_port_fdb_getnext,
+       .port_fdb_dump          = mv88e6xxx_port_fdb_dump,
 };
 
 MODULE_ALIAS("platform:mv88e6172");
index 87b405e4f9f6432ff6e7bfbc1502449c6e2762e9..b1b14f519d8b195a28e608e49bb94bb751e226e3 100644 (file)
@@ -11,7 +11,6 @@
  * (at your option) any later version.
  */
 
-#include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
-#include <linux/seq_file.h>
 #include <net/dsa.h>
 #include <net/switchdev.h>
 #include "mv88e6xxx.h"
 
-/* MDIO bus access can be nested in the case of PHYs connected to the
- * internal MDIO bus of the switch, which is accessed via MDIO bus of
- * the Ethernet interface. Avoid lockdep false positives by using
- * mutex_lock_nested().
- */
-static int mv88e6xxx_mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
-{
-       int ret;
-
-       mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
-       ret = bus->read(bus, addr, regnum);
-       mutex_unlock(&bus->mdio_lock);
-
-       return ret;
-}
-
-static int mv88e6xxx_mdiobus_write(struct mii_bus *bus, int addr, u32 regnum,
-                                  u16 val)
-{
-       int ret;
-
-       mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
-       ret = bus->write(bus, addr, regnum, val);
-       mutex_unlock(&bus->mdio_lock);
-
-       return ret;
-}
-
 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
  * use all 32 SMI bus addresses on its SMI bus, and all switch registers
  * will be directly accessible on some {device address,register address}
@@ -68,7 +38,7 @@ static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
        int i;
 
        for (i = 0; i < 16; i++) {
-               ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_CMD);
+               ret = mdiobus_read_nested(bus, sw_addr, SMI_CMD);
                if (ret < 0)
                        return ret;
 
@@ -84,7 +54,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
        int ret;
 
        if (sw_addr == 0)
-               return mv88e6xxx_mdiobus_read(bus, addr, reg);
+               return mdiobus_read_nested(bus, addr, reg);
 
        /* Wait for the bus to become free. */
        ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
@@ -92,8 +62,8 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
                return ret;
 
        /* Transmit the read command. */
-       ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
-                                     SMI_CMD_OP_22_READ | (addr << 5) | reg);
+       ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
+                                  SMI_CMD_OP_22_READ | (addr << 5) | reg);
        if (ret < 0)
                return ret;
 
@@ -103,7 +73,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
                return ret;
 
        /* Read the data. */
-       ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_DATA);
+       ret = mdiobus_read_nested(bus, sw_addr, SMI_DATA);
        if (ret < 0)
                return ret;
 
@@ -147,7 +117,7 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
        int ret;
 
        if (sw_addr == 0)
-               return mv88e6xxx_mdiobus_write(bus, addr, reg, val);
+               return mdiobus_write_nested(bus, addr, reg, val);
 
        /* Wait for the bus to become free. */
        ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
@@ -155,13 +125,13 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
                return ret;
 
        /* Transmit the data to write. */
-       ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_DATA, val);
+       ret = mdiobus_write_nested(bus, sw_addr, SMI_DATA, val);
        if (ret < 0)
                return ret;
 
        /* Transmit the write command. */
-       ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
-                                     SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
+       ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
+                                  SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
        if (ret < 0)
                return ret;
 
@@ -876,13 +846,6 @@ static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
                               GLOBAL_ATU_OP_BUSY);
 }
 
-/* Must be called with SMI lock held */
-static int _mv88e6xxx_scratch_wait(struct dsa_switch *ds)
-{
-       return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
-                              GLOBAL2_SCRATCH_BUSY);
-}
-
 /* Must be called with SMI mutex held */
 static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
                                        int regnum)
@@ -1046,11 +1009,6 @@ static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too)
        return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
 }
 
-static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
-{
-       return _mv88e6xxx_atu_flush(ds, fid, false);
-}
-
 static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port,
                               int to_port, bool static_too)
 {
@@ -1112,130 +1070,21 @@ abort:
        return ret;
 }
 
-/* Must be called with smi lock held */
-static int _mv88e6xxx_update_port_config(struct dsa_switch *ds, int port)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       u8 fid = ps->fid[port];
-       u16 reg = fid << 12;
-
-       if (dsa_is_cpu_port(ds, port))
-               reg |= ds->phys_port_mask;
-       else
-               reg |= (ps->bridge_mask[fid] |
-                      (1 << dsa_upstream_port(ds))) & ~(1 << port);
-
-       return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
-}
-
-/* Must be called with smi lock held */
-static int _mv88e6xxx_update_bridge_config(struct dsa_switch *ds, int fid)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int port;
-       u32 mask;
-       int ret;
-
-       mask = ds->phys_port_mask;
-       while (mask) {
-               port = __ffs(mask);
-               mask &= ~(1 << port);
-               if (ps->fid[port] != fid)
-                       continue;
-
-               ret = _mv88e6xxx_update_port_config(ds, port);
-               if (ret)
-                       return ret;
-       }
-
-       return _mv88e6xxx_flush_fid(ds, fid);
-}
-
-/* Bridge handling functions */
-
-int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int ret = 0;
-       u32 nmask;
-       int fid;
-
-       /* If the bridge group is not empty, join that group.
-        * Otherwise create a new group.
-        */
-       fid = ps->fid[port];
-       nmask = br_port_mask & ~(1 << port);
-       if (nmask)
-               fid = ps->fid[__ffs(nmask)];
-
-       nmask = ps->bridge_mask[fid] | (1 << port);
-       if (nmask != br_port_mask) {
-               netdev_err(ds->ports[port],
-                          "join: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
-                          fid, br_port_mask, nmask);
-               return -EINVAL;
-       }
-
-       mutex_lock(&ps->smi_mutex);
-
-       ps->bridge_mask[fid] = br_port_mask;
-
-       if (fid != ps->fid[port]) {
-               clear_bit(ps->fid[port], ps->fid_bitmap);
-               ps->fid[port] = fid;
-               ret = _mv88e6xxx_update_bridge_config(ds, fid);
-       }
-
-       mutex_unlock(&ps->smi_mutex);
-
-       return ret;
-}
-
-int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
+static int _mv88e6xxx_port_vlan_map_set(struct dsa_switch *ds, int port,
+                                       u16 output_ports)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       u8 fid, newfid;
-       int ret;
-
-       fid = ps->fid[port];
-
-       if (ps->bridge_mask[fid] != br_port_mask) {
-               netdev_err(ds->ports[port],
-                          "leave: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
-                          fid, br_port_mask, ps->bridge_mask[fid]);
-               return -EINVAL;
-       }
-
-       /* If the port was the last port of a bridge, we are done.
-        * Otherwise assign a new fid to the port, and fix up
-        * the bridge configuration.
-        */
-       if (br_port_mask == (1 << port))
-               return 0;
-
-       mutex_lock(&ps->smi_mutex);
-
-       newfid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, 1);
-       if (unlikely(newfid > ps->num_ports)) {
-               netdev_err(ds->ports[port], "all first %d FIDs are used\n",
-                          ps->num_ports);
-               ret = -ENOSPC;
-               goto unlock;
-       }
-
-       ps->fid[port] = newfid;
-       set_bit(newfid, ps->fid_bitmap);
-       ps->bridge_mask[fid] &= ~(1 << port);
-       ps->bridge_mask[newfid] = 1 << port;
+       const u16 mask = (1 << ps->num_ports) - 1;
+       int reg;
 
-       ret = _mv88e6xxx_update_bridge_config(ds, fid);
-       if (!ret)
-               ret = _mv88e6xxx_update_bridge_config(ds, newfid);
+       reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
+       if (reg < 0)
+               return reg;
 
-unlock:
-       mutex_unlock(&ps->smi_mutex);
+       reg &= ~mask;
+       reg |= output_ports & mask;
 
-       return ret;
+       return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
 }
 
 int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
@@ -1373,7 +1222,13 @@ static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
        return 0;
 }
 
-static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid,
+static int _mv88e6xxx_vtu_vid_write(struct dsa_switch *ds, u16 vid)
+{
+       return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
+                                   vid & GLOBAL_VTU_VID_MASK);
+}
+
+static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
                                  struct mv88e6xxx_vtu_stu_entry *entry)
 {
        struct mv88e6xxx_vtu_stu_entry next = { 0 };
@@ -1383,11 +1238,6 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid,
        if (ret < 0)
                return ret;
 
-       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
-                                  vid & GLOBAL_VTU_VID_MASK);
-       if (ret < 0)
-               return ret;
-
        ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
        if (ret < 0)
                return ret;
@@ -1547,6 +1397,7 @@ static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
        struct mv88e6xxx_vtu_stu_entry vlan = {
                .valid = true,
                .vid = vid,
+               .fid = vid, /* We use one FID per VLAN */
        };
        int i;
 
@@ -1580,22 +1431,10 @@ static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
                                return err;
                }
 
-               /* Non-bridged ports and bridge groups use FIDs from 1 to
-                * num_ports; VLANs use FIDs from num_ports+1 to 4095.
-                */
-               vlan.fid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID,
-                                             ps->num_ports + 1);
-               if (unlikely(vlan.fid == VLAN_N_VID)) {
-                       pr_err("no more FID available for VLAN %d\n", vid);
-                       return -ENOSPC;
-               }
-
                /* Clear all MAC addresses from the new database */
                err = _mv88e6xxx_atu_flush(ds, vlan.fid, true);
                if (err)
                        return err;
-
-               set_bit(vlan.fid, ps->fid_bitmap);
        }
 
        *entry = vlan;
@@ -1610,7 +1449,12 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
        int err;
 
        mutex_lock(&ps->smi_mutex);
-       err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
+
+       err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
+       if (err)
+               goto unlock;
+
+       err = _mv88e6xxx_vtu_getnext(ds, &vlan);
        if (err)
                goto unlock;
 
@@ -1635,12 +1479,15 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        struct mv88e6xxx_vtu_stu_entry vlan;
-       bool keep = false;
        int i, err;
 
        mutex_lock(&ps->smi_mutex);
 
-       err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
+       err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
+       if (err)
+               goto unlock;
+
+       err = _mv88e6xxx_vtu_getnext(ds, &vlan);
        if (err)
                goto unlock;
 
@@ -1653,57 +1500,28 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
        vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
 
        /* keep the VLAN unless all ports are excluded */
+       vlan.valid = false;
        for (i = 0; i < ps->num_ports; ++i) {
                if (dsa_is_cpu_port(ds, i))
                        continue;
 
                if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
-                       keep = true;
+                       vlan.valid = true;
                        break;
                }
        }
 
-       vlan.valid = keep;
        err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
        if (err)
                goto unlock;
 
        err = _mv88e6xxx_atu_remove(ds, vlan.fid, port, false);
-       if (err)
-               goto unlock;
-
-       if (!keep)
-               clear_bit(vlan.fid, ps->fid_bitmap);
-
 unlock:
        mutex_unlock(&ps->smi_mutex);
 
        return err;
 }
 
-static int _mv88e6xxx_port_vtu_getnext(struct dsa_switch *ds, int port, u16 vid,
-                                      struct mv88e6xxx_vtu_stu_entry *entry)
-{
-       int err;
-
-       do {
-               if (vid == 4095)
-                       return -ENOENT;
-
-               err = _mv88e6xxx_vtu_getnext(ds, vid, entry);
-               if (err)
-                       return err;
-
-               if (!entry->valid)
-                       return -ENOENT;
-
-               vid = entry->vid;
-       } while (entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED &&
-                entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED);
-
-       return 0;
-}
-
 int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
                           unsigned long *ports, unsigned long *untagged)
 {
@@ -1716,7 +1534,12 @@ int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
                return -ENOENT;
 
        mutex_lock(&ps->smi_mutex);
-       err = _mv88e6xxx_vtu_getnext(ds, *vid, &next);
+       err = _mv88e6xxx_vtu_vid_write(ds, *vid);
+       if (err)
+               goto unlock;
+
+       err = _mv88e6xxx_vtu_getnext(ds, &next);
+unlock:
        mutex_unlock(&ps->smi_mutex);
 
        if (err)
@@ -1801,37 +1624,13 @@ static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
        return _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_LOAD_DB);
 }
 
-static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch *ds, int port, u16 vid)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       struct mv88e6xxx_vtu_stu_entry vlan;
-       int err;
-
-       if (vid == 0)
-               return ps->fid[port];
-
-       err = _mv88e6xxx_port_vtu_getnext(ds, port, vid - 1, &vlan);
-       if (err)
-               return err;
-
-       if (vlan.vid == vid)
-               return vlan.fid;
-
-       return -ENOENT;
-}
-
 static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
                                    const unsigned char *addr, u16 vid,
                                    u8 state)
 {
        struct mv88e6xxx_atu_entry entry = { 0 };
-       int ret;
-
-       ret = _mv88e6xxx_port_vid_to_fid(ds, port, vid);
-       if (ret < 0)
-               return ret;
 
-       entry.fid = ret;
+       entry.fid = vid; /* We use one FID per VLAN */
        entry.state = state;
        ether_addr_copy(entry.mac, addr);
        if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
@@ -1846,6 +1645,10 @@ int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
                               const struct switchdev_obj_port_fdb *fdb,
                               struct switchdev_trans *trans)
 {
+       /* We don't use per-port FDB */
+       if (fdb->vid == 0)
+               return -EOPNOTSUPP;
+
        /* We don't need any dynamic resource from the kernel (yet),
         * so skip the prepare phase.
         */
@@ -1884,7 +1687,6 @@ int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
 }
 
 static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
-                                 const unsigned char *addr,
                                  struct mv88e6xxx_atu_entry *entry)
 {
        struct mv88e6xxx_atu_entry next = { 0 };
@@ -1896,10 +1698,6 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
        if (ret < 0)
                return ret;
 
-       ret = _mv88e6xxx_atu_mac_write(ds, addr);
-       if (ret < 0)
-               return ret;
-
        ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
        if (ret < 0)
                return ret;
@@ -1937,51 +1735,69 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
        return 0;
 }
 
-/* get next entry for port */
-int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
-                              unsigned char *addr, u16 *vid, bool *is_static)
+int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
+                           struct switchdev_obj_port_fdb *fdb,
+                           int (*cb)(struct switchdev_obj *obj))
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       struct mv88e6xxx_atu_entry next;
-       u16 fid;
-       int ret;
+       struct mv88e6xxx_vtu_stu_entry vlan = {
+               .vid = GLOBAL_VTU_VID_MASK, /* all ones */
+       };
+       int err;
 
        mutex_lock(&ps->smi_mutex);
 
-       ret = _mv88e6xxx_port_vid_to_fid(ds, port, *vid);
-       if (ret < 0)
+       err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid);
+       if (err)
                goto unlock;
-       fid = ret;
 
        do {
-               if (is_broadcast_ether_addr(addr)) {
-                       struct mv88e6xxx_vtu_stu_entry vtu;
+               struct mv88e6xxx_atu_entry addr = {
+                       .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+               };
 
-                       ret = _mv88e6xxx_port_vtu_getnext(ds, port, *vid, &vtu);
-                       if (ret < 0)
-                               goto unlock;
+               err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+               if (err)
+                       goto unlock;
 
-                       *vid = vtu.vid;
-                       fid = vtu.fid;
-               }
+               if (!vlan.valid)
+                       break;
 
-               ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next);
-               if (ret < 0)
+               err = _mv88e6xxx_atu_mac_write(ds, addr.mac);
+               if (err)
                        goto unlock;
 
-               ether_addr_copy(addr, next.mac);
+               do {
+                       err = _mv88e6xxx_atu_getnext(ds, vlan.fid, &addr);
+                       if (err)
+                               goto unlock;
 
-               if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
-                       continue;
-       } while (next.trunk || (next.portv_trunkid & BIT(port)) == 0);
+                       if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
+                               break;
+
+                       if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
+                               bool is_static = addr.state ==
+                                       (is_multicast_ether_addr(addr.mac) ?
+                                        GLOBAL_ATU_DATA_STATE_MC_STATIC :
+                                        GLOBAL_ATU_DATA_STATE_UC_STATIC);
+
+                               fdb->vid = vlan.vid;
+                               ether_addr_copy(fdb->addr, addr.mac);
+                               fdb->ndm_state = is_static ? NUD_NOARP :
+                                       NUD_REACHABLE;
+
+                               err = cb(&fdb->obj);
+                               if (err)
+                                       goto unlock;
+                       }
+               } while (!is_broadcast_ether_addr(addr.mac));
+
+       } while (vlan.vid < GLOBAL_VTU_VID_MASK);
 
-       *is_static = next.state == (is_multicast_ether_addr(addr) ?
-                                   GLOBAL_ATU_DATA_STATE_MC_STATIC :
-                                   GLOBAL_ATU_DATA_STATE_UC_STATIC);
 unlock:
        mutex_unlock(&ps->smi_mutex);
 
-       return ret;
+       return err;
 }
 
 static void mv88e6xxx_bridge_work(struct work_struct *work)
@@ -2003,7 +1819,7 @@ static void mv88e6xxx_bridge_work(struct work_struct *work)
 static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int ret, fid;
+       int ret;
        u16 reg;
 
        mutex_lock(&ps->smi_mutex);
@@ -2129,7 +1945,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                        reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
        }
 
-       reg |= PORT_CONTROL_2_8021Q_FALLBACK;
+       reg |= PORT_CONTROL_2_8021Q_SECURE;
 
        if (reg) {
                ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
@@ -2222,19 +2038,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
        if (ret)
                goto abort;
 
-       /* Port based VLAN map: give each port its own address
-        * database, allow the CPU port to talk to each of the 'real'
-        * ports, and allow each of the 'real' ports to only talk to
-        * the upstream port.
+       /* Port based VLAN map: do not give each port its own address
+        * database, and allow every port to egress frames on all other ports.
         */
-       fid = port + 1;
-       ps->fid[port] = fid;
-       set_bit(fid, ps->fid_bitmap);
-
-       if (!dsa_is_cpu_port(ds, port))
-               ps->bridge_mask[fid] = 1 << port;
-
-       ret = _mv88e6xxx_update_port_config(ds, port);
+       reg = BIT(ps->num_ports) - 1; /* all ports */
+       ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg & ~port);
        if (ret)
                goto abort;
 
@@ -2262,273 +2070,9 @@ int mv88e6xxx_setup_ports(struct dsa_switch *ds)
        return 0;
 }
 
-static int mv88e6xxx_regs_show(struct seq_file *s, void *p)
-{
-       struct dsa_switch *ds = s->private;
-
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int reg, port;
-
-       seq_puts(s, "    GLOBAL GLOBAL2 ");
-       for (port = 0 ; port < ps->num_ports; port++)
-               seq_printf(s, " %2d  ", port);
-       seq_puts(s, "\n");
-
-       for (reg = 0; reg < 32; reg++) {
-               seq_printf(s, "%2x: ", reg);
-               seq_printf(s, " %4x    %4x  ",
-                          mv88e6xxx_reg_read(ds, REG_GLOBAL, reg),
-                          mv88e6xxx_reg_read(ds, REG_GLOBAL2, reg));
-
-               for (port = 0 ; port < ps->num_ports; port++)
-                       seq_printf(s, "%4x ",
-                                  mv88e6xxx_reg_read(ds, REG_PORT(port), reg));
-               seq_puts(s, "\n");
-       }
-
-       return 0;
-}
-
-static int mv88e6xxx_regs_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, mv88e6xxx_regs_show, inode->i_private);
-}
-
-static const struct file_operations mv88e6xxx_regs_fops = {
-       .open   = mv88e6xxx_regs_open,
-       .read   = seq_read,
-       .llseek = no_llseek,
-       .release = single_release,
-       .owner  = THIS_MODULE,
-};
-
-static void mv88e6xxx_atu_show_header(struct seq_file *s)
-{
-       seq_puts(s, "DB   T/P  Vec State Addr\n");
-}
-
-static void mv88e6xxx_atu_show_entry(struct seq_file *s, int dbnum,
-                                    unsigned char *addr, int data)
-{
-       bool trunk = !!(data & GLOBAL_ATU_DATA_TRUNK);
-       int portvec = ((data & GLOBAL_ATU_DATA_PORT_VECTOR_MASK) >>
-                      GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT);
-       int state = data & GLOBAL_ATU_DATA_STATE_MASK;
-
-       seq_printf(s, "%03x %5s %10pb   %x   %pM\n",
-                  dbnum, (trunk ? "Trunk" : "Port"), &portvec, state, addr);
-}
-
-static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds,
-                                int dbnum)
-{
-       unsigned char bcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-       unsigned char addr[6];
-       int ret, data, state;
-
-       ret = _mv88e6xxx_atu_mac_write(ds, bcast);
-       if (ret < 0)
-               return ret;
-
-       do {
-               ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
-                                          dbnum);
-               if (ret < 0)
-                       return ret;
-
-               ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
-               if (ret < 0)
-                       return ret;
-
-               data = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
-               if (data < 0)
-                       return data;
-
-               state = data & GLOBAL_ATU_DATA_STATE_MASK;
-               if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
-                       break;
-               ret = _mv88e6xxx_atu_mac_read(ds, addr);
-               if (ret < 0)
-                       return ret;
-               mv88e6xxx_atu_show_entry(s, dbnum, addr, data);
-       } while (state != GLOBAL_ATU_DATA_STATE_UNUSED);
-
-       return 0;
-}
-
-static int mv88e6xxx_atu_show(struct seq_file *s, void *p)
-{
-       struct dsa_switch *ds = s->private;
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int dbnum;
-
-       mv88e6xxx_atu_show_header(s);
-
-       for (dbnum = 0; dbnum < 255; dbnum++) {
-               mutex_lock(&ps->smi_mutex);
-               mv88e6xxx_atu_show_db(s, ds, dbnum);
-               mutex_unlock(&ps->smi_mutex);
-       }
-
-       return 0;
-}
-
-static int mv88e6xxx_atu_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, mv88e6xxx_atu_show, inode->i_private);
-}
-
-static const struct file_operations mv88e6xxx_atu_fops = {
-       .open   = mv88e6xxx_atu_open,
-       .read   = seq_read,
-       .llseek = no_llseek,
-       .release = single_release,
-       .owner  = THIS_MODULE,
-};
-
-static void mv88e6xxx_stats_show_header(struct seq_file *s,
-                                       struct mv88e6xxx_priv_state *ps)
-{
-       int port;
-
-       seq_puts(s, "      Statistic       ");
-       for (port = 0 ; port < ps->num_ports; port++)
-               seq_printf(s, "Port %2d  ", port);
-       seq_puts(s, "\n");
-}
-
-static int mv88e6xxx_stats_show(struct seq_file *s, void *p)
-{
-       struct dsa_switch *ds = s->private;
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       struct mv88e6xxx_hw_stat *stats = mv88e6xxx_hw_stats;
-       int port, stat, max_stats;
-       uint64_t value;
-
-       if (have_sw_in_discards(ds))
-               max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats);
-       else
-               max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
-
-       mv88e6xxx_stats_show_header(s, ps);
-
-       mutex_lock(&ps->smi_mutex);
-
-       for (stat = 0; stat < max_stats; stat++) {
-               seq_printf(s, "%19s: ", stats[stat].string);
-               for (port = 0 ; port < ps->num_ports; port++) {
-                       _mv88e6xxx_stats_snapshot(ds, port);
-                       value = _mv88e6xxx_get_ethtool_stat(ds, stat, stats,
-                                                           port);
-                       seq_printf(s, "%8llu ", value);
-               }
-               seq_puts(s, "\n");
-       }
-       mutex_unlock(&ps->smi_mutex);
-
-       return 0;
-}
-
-static int mv88e6xxx_stats_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, mv88e6xxx_stats_show, inode->i_private);
-}
-
-static const struct file_operations mv88e6xxx_stats_fops = {
-       .open   = mv88e6xxx_stats_open,
-       .read   = seq_read,
-       .llseek = no_llseek,
-       .release = single_release,
-       .owner  = THIS_MODULE,
-};
-
-static int mv88e6xxx_device_map_show(struct seq_file *s, void *p)
-{
-       struct dsa_switch *ds = s->private;
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int target, ret;
-
-       seq_puts(s, "Target Port\n");
-
-       mutex_lock(&ps->smi_mutex);
-       for (target = 0; target < 32; target++) {
-               ret = _mv88e6xxx_reg_write(
-                       ds, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
-                       target << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT);
-               if (ret < 0)
-                       goto out;
-               ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
-                                         GLOBAL2_DEVICE_MAPPING);
-               seq_printf(s, "  %2d   %2d\n", target,
-                          ret & GLOBAL2_DEVICE_MAPPING_PORT_MASK);
-       }
-out:
-       mutex_unlock(&ps->smi_mutex);
-
-       return 0;
-}
-
-static int mv88e6xxx_device_map_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, mv88e6xxx_device_map_show, inode->i_private);
-}
-
-static const struct file_operations mv88e6xxx_device_map_fops = {
-       .open   = mv88e6xxx_device_map_open,
-       .read   = seq_read,
-       .llseek = no_llseek,
-       .release = single_release,
-       .owner  = THIS_MODULE,
-};
-
-static int mv88e6xxx_scratch_show(struct seq_file *s, void *p)
-{
-       struct dsa_switch *ds = s->private;
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int reg, ret;
-
-       seq_puts(s, "Register Value\n");
-
-       mutex_lock(&ps->smi_mutex);
-       for (reg = 0; reg < 0x80; reg++) {
-               ret = _mv88e6xxx_reg_write(
-                       ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
-                       reg << GLOBAL2_SCRATCH_REGISTER_SHIFT);
-               if (ret < 0)
-                       goto out;
-
-               ret = _mv88e6xxx_scratch_wait(ds);
-               if (ret < 0)
-                       goto out;
-
-               ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
-                                         GLOBAL2_SCRATCH_MISC);
-               seq_printf(s, "  %2x   %2x\n", reg,
-                          ret & GLOBAL2_SCRATCH_VALUE_MASK);
-       }
-out:
-       mutex_unlock(&ps->smi_mutex);
-
-       return 0;
-}
-
-static int mv88e6xxx_scratch_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, mv88e6xxx_scratch_show, inode->i_private);
-}
-
-static const struct file_operations mv88e6xxx_scratch_fops = {
-       .open   = mv88e6xxx_scratch_open,
-       .read   = seq_read,
-       .llseek = no_llseek,
-       .release = single_release,
-       .owner  = THIS_MODULE,
-};
-
 int mv88e6xxx_setup_common(struct dsa_switch *ds)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       char *name;
 
        mutex_init(&ps->smi_mutex);
 
@@ -2536,24 +2080,6 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds)
 
        INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
 
-       name = kasprintf(GFP_KERNEL, "dsa%d", ds->index);
-       ps->dbgfs = debugfs_create_dir(name, NULL);
-       kfree(name);
-
-       debugfs_create_file("regs", S_IRUGO, ps->dbgfs, ds,
-                           &mv88e6xxx_regs_fops);
-
-       debugfs_create_file("atu", S_IRUGO, ps->dbgfs, ds,
-                           &mv88e6xxx_atu_fops);
-
-       debugfs_create_file("stats", S_IRUGO, ps->dbgfs, ds,
-                           &mv88e6xxx_stats_fops);
-
-       debugfs_create_file("device_map", S_IRUGO, ps->dbgfs, ds,
-                           &mv88e6xxx_device_map_fops);
-
-       debugfs_create_file("scratch", S_IRUGO, ps->dbgfs, ds,
-                           &mv88e6xxx_scratch_fops);
        return 0;
 }
 
index 8325c11b9be27eb8defe2b952a9d9a93cf1d516d..6f9ed5d45012597d732df6c591bbe0686a1a49af 100644 (file)
@@ -402,18 +402,10 @@ struct mv88e6xxx_priv_state {
        int             id; /* switch product id */
        int             num_ports;      /* number of switch ports */
 
-       /* hw bridging */
-
-       DECLARE_BITMAP(fid_bitmap, VLAN_N_VID); /* FIDs 1 to 4095 available */
-       u16 fid[DSA_MAX_PORTS];                 /* per (non-bridged) port FID */
-       u16 bridge_mask[DSA_MAX_PORTS];         /* br groups (indexed by FID) */
-
        unsigned long port_state_update_mask;
        u8 port_state[DSA_MAX_PORTS];
 
        struct work_struct bridge_work;
-
-       struct dentry *dbgfs;
 };
 
 struct mv88e6xxx_hw_stat {
@@ -464,8 +456,6 @@ int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum,
 int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
 int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
                      struct phy_device *phydev, struct ethtool_eee *e);
-int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask);
-int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask);
 int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state);
 int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *vid);
 int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 vid);
@@ -482,8 +472,9 @@ int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
                           struct switchdev_trans *trans);
 int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
                           const struct switchdev_obj_port_fdb *fdb);
-int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
-                              unsigned char *addr, u16 *vid, bool *is_static);
+int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
+                           struct switchdev_obj_port_fdb *fdb,
+                           int (*cb)(struct switchdev_obj *obj));
 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
                             int reg, int val);
index 815eb94990f5edb50883bf9d9a4f68a1dc535e65..69fc8409a9733ffe2f6f312f0955bb506657355b 100644 (file)
@@ -147,8 +147,12 @@ static void dummy_setup(struct net_device *dev)
        dev->flags |= IFF_NOARP;
        dev->flags &= ~IFF_MULTICAST;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
-       dev->features   |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
+       dev->features   |= NETIF_F_SG | NETIF_F_FRAGLIST;
+       dev->features   |= NETIF_F_ALL_TSO | NETIF_F_UFO;
        dev->features   |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
+       dev->features   |= NETIF_F_GSO_ENCAP_ALL;
+       dev->hw_features |= dev->features;
+       dev->hw_enc_features |= dev->features;
        eth_hw_addr_random(dev);
 }
 
index ae89de7deb132587e0ea0993f1ade6f01c19228d..20bf55dbd76f074d4d525fd133bfe4cf7f83f103 100644 (file)
@@ -1141,8 +1141,6 @@ static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
        strlcpy(info->version, "revision: 1.0", sizeof(info->version));
        strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
        strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
-       info->eedump_len = 0;
-       info->regdump_len = sizeof(struct greth_regs);
 }
 
 static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
index 48ce83e443c2b12bbcbc2e2545c5ac6b1983f3d0..8d50314ac3eb1f308d1cc556270058aba05c7b60 100644 (file)
@@ -847,21 +847,25 @@ static int emac_probe(struct platform_device *pdev)
        if (ndev->irq == -ENXIO) {
                netdev_err(ndev, "No irq resource\n");
                ret = ndev->irq;
-               goto out;
+               goto out_iounmap;
        }
 
        db->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(db->clk)) {
                ret = PTR_ERR(db->clk);
-               goto out;
+               goto out_iounmap;
        }
 
-       clk_prepare_enable(db->clk);
+       ret = clk_prepare_enable(db->clk);
+       if (ret) {
+               dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret);
+               goto out_iounmap;
+       }
 
        ret = sunxi_sram_claim(&pdev->dev);
        if (ret) {
                dev_err(&pdev->dev, "Error couldn't map SRAM to device\n");
-               goto out;
+               goto out_clk_disable_unprepare;
        }
 
        db->phy_node = of_parse_phandle(np, "phy", 0);
@@ -910,6 +914,10 @@ static int emac_probe(struct platform_device *pdev)
 
 out_release_sram:
        sunxi_sram_release(&pdev->dev);
+out_clk_disable_unprepare:
+       clk_disable_unprepare(db->clk);
+out_iounmap:
+       iounmap(db->membase);
 out:
        dev_err(db->dev, "not found (%d).\n", ret);
 
@@ -921,8 +929,12 @@ out:
 static int emac_remove(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
+       struct emac_board_info *db = netdev_priv(ndev);
 
        unregister_netdev(ndev);
+       sunxi_sram_release(&pdev->dev);
+       clk_disable_unprepare(db->clk);
+       iounmap(db->membase);
        free_netdev(ndev);
 
        dev_dbg(&pdev->dev, "released and freed device\n");
index cb367cc59e0b65985069991b30c96d5c43362bd6..5330bcb8a9448a39c35331d38d4d23dbd3581543 100644 (file)
@@ -714,7 +714,6 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
        snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
                 aup->mac_id);
-       info->regdump_len = 0;
 }
 
 static void au1000_set_msglevel(struct net_device *dev, u32 value)
index 2c063b60db4b02bc48246887fb5d98f6b3de0394..96f485ab612e679dc7065b1e214cb9d73c690d43 100644 (file)
@@ -327,9 +327,13 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
        pdata->debugfs_xpcs_reg = 0;
 
        buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
+       if (!buf)
+               return;
+
        pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
        if (!pdata->xgbe_debugfs) {
                netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
+               kfree(buf);
                return;
        }
 
index 45512242baea58caaeedf5bd3732c4f6dd85afeb..112f1bc8bceef908ddab996860e86847e3b610b6 100644 (file)
@@ -1595,7 +1595,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
                                  packet->rdesc_count, 1);
 
        /* Make sure ownership is written to the descriptor */
-       dma_wmb();
+       wmb();
 
        ring->cur = cur_index + 1;
        if (!packet->skb->xmit_more ||
index 14bad8c44c870b5ad2dd51d19886a7b85f95c691..cff8940e169409d567d4f4f9fa696ba446a1d477 100644 (file)
@@ -365,7 +365,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
 
                /* Restart the device on a Fatal Bus Error */
                if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
-                       queue_work(pdata->dev_workqueue, &pdata->restart_work);
+                       schedule_work(&pdata->restart_work);
 
                /* Clear all interrupt signals */
                XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
@@ -1537,7 +1537,7 @@ static void xgbe_tx_timeout(struct net_device *netdev)
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
 
        netdev_warn(netdev, "tx timeout, device restarting\n");
-       queue_work(pdata->dev_workqueue, &pdata->restart_work);
+       schedule_work(&pdata->restart_work);
 }
 
 static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
@@ -1811,6 +1811,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
        struct netdev_queue *txq;
        int processed = 0;
        unsigned int tx_packets = 0, tx_bytes = 0;
+       unsigned int cur;
 
        DBGPR("-->xgbe_tx_poll\n");
 
@@ -1818,10 +1819,11 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
        if (!ring)
                return 0;
 
+       cur = ring->cur;
        txq = netdev_get_tx_queue(netdev, channel->queue_index);
 
        while ((processed < XGBE_TX_DESC_MAX_PROC) &&
-              (ring->dirty != ring->cur)) {
+              (ring->dirty != cur)) {
                rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
                rdesc = rdata->rdesc;
 
index 204fb3afb18292925b27f4cecf28c57c54ad3d45..6040293db9c1694083ace197771a09245feea637 100644 (file)
@@ -375,7 +375,6 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
                 XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
                 XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
                 XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
-       drvinfo->n_stats = XGBE_STATS_COUNT;
 }
 
 static u32 xgbe_get_msglevel(struct net_device *netdev)
index 48694c239d5cee024055731d2aa586e57fe529e5..872b7abb01962e2d70d327c6f7137241fedd9008 100644 (file)
@@ -233,10 +233,6 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
                sizeof(drvinfo->version));
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
-       drvinfo->n_stats = 0;
-       drvinfo->testinfo_len = 0;
-       drvinfo->regdump_len = atl1c_get_regs_len(netdev);
-       drvinfo->eedump_len = atl1c_get_eeprom_len(netdev);
 }
 
 static void atl1c_get_wol(struct net_device *netdev,
index 1be072f4afc2a261b5504302db5dba50b81ad268..8e3dbd4d9f79eab2ac291f903816667d6f13b72b 100644 (file)
@@ -316,10 +316,6 @@ static void atl1e_get_drvinfo(struct net_device *netdev,
        strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
-       drvinfo->n_stats = 0;
-       drvinfo->testinfo_len = 0;
-       drvinfo->regdump_len = atl1e_get_regs_len(netdev);
-       drvinfo->eedump_len = atl1e_get_eeprom_len(netdev);
 }
 
 static void atl1e_get_wol(struct net_device *netdev,
index eca1d113fee187ccb310fbd6c3ba2c4b535a9114..529bca718334acd15395c020a5a0ca4c86b4154a 100644 (file)
@@ -3388,7 +3388,6 @@ static void atl1_get_drvinfo(struct net_device *netdev,
                sizeof(drvinfo->version));
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
-       drvinfo->eedump_len = ATL1_EEDUMP_LEN;
 }
 
 static void atl1_get_wol(struct net_device *netdev,
index 46a535318c7af64ea586baf230067f9e2d4ade5b..8f76f4558a88c15b0a14bb6ec3d9fb769fea9d0a 100644 (file)
@@ -2030,10 +2030,6 @@ static void atl2_get_drvinfo(struct net_device *netdev,
        strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
-       drvinfo->n_stats = 0;
-       drvinfo->testinfo_len = 0;
-       drvinfo->regdump_len = atl2_get_regs_len(netdev);
-       drvinfo->eedump_len = atl2_get_eeprom_len(netdev);
 }
 
 static void atl2_get_wol(struct net_device *netdev,
index e930aa9a3cfb8ed3c4edad3fc03ae2b3ce6a586f..67a7d520d9f54fcf9eadf4fa81b05b17d76dd55a 100644 (file)
@@ -170,4 +170,23 @@ config SYSTEMPORT
          Broadcom BCM7xxx Set Top Box family chipset using an internal
          Ethernet switch.
 
+config BNXT
+       tristate "Broadcom NetXtreme-C/E support"
+       depends on PCI
+       select FW_LOADER
+       select LIBCRC32C
+       ---help---
+         This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
+         Ethernet cards.  To compile this driver as a module, choose M here:
+         the module will be called bnxt_en.  This is recommended.
+
+config BNXT_SRIOV
+       bool "Broadcom NetXtreme-C/E SR-IOV support"
+       depends on BNXT && PCI_IOV
+       default y
+       ---help---
+         This configuration parameter enables Single Root Input Output
+         Virtualization support in the NetXtreme-C/E products. This
+         allows for virtual function acceleration in virtual environments.
+
 endif # NET_VENDOR_BROADCOM
index e2a958a657e0bb8f816d205e0792d3fdfbfc70a4..00584d78b3e0425aed06b1e66eceb1aa91bde3bc 100644 (file)
@@ -12,3 +12,4 @@ obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
 obj-$(CONFIG_TIGON3) += tg3.o
 obj-$(CONFIG_BGMAC) += bgmac.o
 obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
+obj-$(CONFIG_BNXT) += bnxt/
index a7f2cc3e485eebfae962fe24cfc1142021a74cde..8b1929e9f698c4d8da22573b483795af1bf5bc5b 100644 (file)
@@ -1333,7 +1333,6 @@ static void bcm_enet_get_drvinfo(struct net_device *netdev,
                sizeof(drvinfo->version));
        strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
-       drvinfo->n_stats = BCM_ENET_STATS_LEN;
 }
 
 static int bcm_enet_get_sset_count(struct net_device *netdev,
@@ -2049,7 +2048,7 @@ static void swphy_poll_timer(unsigned long data)
 
        for (i = 0; i < priv->num_ports; i++) {
                struct bcm63xx_enetsw_port *port;
-               int val, j, up, advertise, lpa, lpa2, speed, duplex, media;
+               int val, j, up, advertise, lpa, speed, duplex, media;
                int external_phy = bcm_enet_port_is_rgmii(i);
                u8 override;
 
@@ -2092,22 +2091,27 @@ static void swphy_poll_timer(unsigned long data)
                lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
                                           MII_LPA);
 
-               lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
-                                           MII_STAT1000);
-
                /* figure out media and duplex from advertise and LPA values */
                media = mii_nway_result(lpa & advertise);
                duplex = (media & ADVERTISE_FULL) ? 1 : 0;
-               if (lpa2 & LPA_1000FULL)
-                       duplex = 1;
-
-               if (lpa2 & (LPA_1000FULL | LPA_1000HALF))
-                       speed = 1000;
-               else {
-                       if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
-                               speed = 100;
-                       else
-                               speed = 10;
+
+               if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
+                       speed = 100;
+               else
+                       speed = 10;
+
+               if (val & BMSR_ESTATEN) {
+                       advertise = bcmenet_sw_mdio_read(priv, external_phy,
+                                               port->phy_id, MII_CTRL1000);
+
+                       lpa = bcmenet_sw_mdio_read(priv, external_phy,
+                                               port->phy_id, MII_STAT1000);
+
+                       if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
+                                       && lpa & (LPA_1000FULL | LPA_1000HALF)) {
+                               speed = 1000;
+                               duplex = (lpa & LPA_1000FULL);
+                       }
                }
 
                dev_info(&priv->pdev->dev,
@@ -2597,7 +2601,6 @@ static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
        strncpy(drvinfo->version, bcm_enet_driver_version, 32);
        strncpy(drvinfo->fw_version, "N/A", 32);
        strncpy(drvinfo->bus_info, "bcm63xx", 32);
-       drvinfo->n_stats = BCM_ENETSW_STATS_LEN;
 }
 
 static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
index f1b5364f352170236269a4dbe2e805e35b97ce16..858106352ce9c5bad35da49ed2d5d41c16cda144 100644 (file)
@@ -287,7 +287,6 @@ static void bcm_sysport_get_drvinfo(struct net_device *dev,
        strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
        strlcpy(info->version, "0.1", sizeof(info->version));
        strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
-       info->n_stats = BCM_SYSPORT_STATS_LEN;
 }
 
 static u32 bcm_sysport_get_msglvl(struct net_device *dev)
index aeb7ce64452e14cd3cbe49325f63bae2d99e3ef2..d84efcd34fac3da6ce1b87ad44a9f2ca449c0a02 100644 (file)
@@ -1090,10 +1090,6 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
        bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version));
 
        strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
-       info->n_stats = BNX2X_NUM_STATS;
-       info->testinfo_len = BNX2X_NUM_TESTS(bp);
-       info->eedump_len = bp->common.flash_size;
-       info->regdump_len = bnx2x_get_regs_len(dev);
 }
 
 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -3351,6 +3347,13 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
                        udp_rss_requested = 0;
                else
                        return -EINVAL;
+
+               if (CHIP_IS_E1x(bp) && udp_rss_requested) {
+                       DP(BNX2X_MSG_ETHTOOL,
+                          "57710, 57711 boards don't support RSS according to UDP 4-tuple\n");
+                       return -EINVAL;
+               }
+
                if ((info->flow_type == UDP_V4_FLOW) &&
                    (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
                        bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
new file mode 100644 (file)
index 0000000..97e78e2
--- /dev/null
@@ -0,0 +1,3 @@
+obj-$(CONFIG_BNXT) += bnxt_en.o
+
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
new file mode 100644 (file)
index 0000000..6c2e0c6
--- /dev/null
@@ -0,0 +1,5728 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <linux/time.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
+#include <net/vxlan.h>
+#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#endif
+#include <linux/workqueue.h>
+#include <linux/prefetch.h>
+#include <linux/cache.h>
+#include <linux/log2.h>
+#include <linux/aer.h>
+#include <linux/bitmap.h>
+#include <linux/cpu_rmap.h>
+
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_sriov.h"
+#include "bnxt_ethtool.h"
+
+#define BNXT_TX_TIMEOUT                (5 * HZ)
+
+static const char version[] =
+       "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
+#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
+#define BNXT_RX_COPY_THRESH 256
+
+#define BNXT_TX_PUSH_THRESH 92
+
+enum board_idx {
+       BCM57302,
+       BCM57304,
+       BCM57404,
+       BCM57406,
+       BCM57304_VF,
+       BCM57404_VF,
+};
+
+/* indexed by enum above */
+static const struct {
+       char *name;
+} board_info[] = {
+       { "Broadcom BCM57302 NetXtreme-C Single-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+       { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+       { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
+       { "Broadcom BCM57406 NetXtreme-E Dual-port 10Gb Ethernet" },
+       { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
+       { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
+};
+
+static const struct pci_device_id bnxt_pci_tbl[] = {
+       { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
+       { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
+       { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
+       { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
+#ifdef CONFIG_BNXT_SRIOV
+       { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
+       { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
+#endif
+       { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
+
+static const u16 bnxt_vf_req_snif[] = {
+       HWRM_FUNC_CFG,
+       HWRM_PORT_PHY_QCFG,
+       HWRM_CFA_L2_FILTER_ALLOC,
+};
+
+static bool bnxt_vf_pciid(enum board_idx idx)
+{
+       return (idx == BCM57304_VF || idx == BCM57404_VF);
+}
+
+#define DB_CP_REARM_FLAGS      (DB_KEY_CP | DB_IDX_VALID)
+#define DB_CP_FLAGS            (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
+#define DB_CP_IRQ_DIS_FLAGS    (DB_KEY_CP | DB_IRQ_DIS)
+
+#define BNXT_CP_DB_REARM(db, raw_cons)                                 \
+               writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
+
+#define BNXT_CP_DB(db, raw_cons)                                       \
+               writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
+
+#define BNXT_CP_DB_IRQ_DIS(db)                                         \
+               writel(DB_CP_IRQ_DIS_FLAGS, db)
+
+static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
+{
+       /* Tell compiler to fetch tx indices from memory. */
+       barrier();
+
+       return bp->tx_ring_size -
+               ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
+}
+
+static const u16 bnxt_lhint_arr[] = {
+       TX_BD_FLAGS_LHINT_512_AND_SMALLER,
+       TX_BD_FLAGS_LHINT_512_TO_1023,
+       TX_BD_FLAGS_LHINT_1024_TO_2047,
+       TX_BD_FLAGS_LHINT_1024_TO_2047,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+};
+
+static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct tx_bd *txbd;
+       struct tx_bd_ext *txbd1;
+       struct netdev_queue *txq;
+       int i;
+       dma_addr_t mapping;
+       unsigned int length, pad = 0;
+       u32 len, free_size, vlan_tag_flags, cfa_action, flags;
+       u16 prod, last_frag;
+       struct pci_dev *pdev = bp->pdev;
+       struct bnxt_napi *bnapi;
+       struct bnxt_tx_ring_info *txr;
+       struct bnxt_sw_tx_bd *tx_buf;
+
+       i = skb_get_queue_mapping(skb);
+       if (unlikely(i >= bp->tx_nr_rings)) {
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       }
+
+       bnapi = bp->bnapi[i];
+       txr = &bnapi->tx_ring;
+       txq = netdev_get_tx_queue(dev, i);
+       prod = txr->tx_prod;
+
+       free_size = bnxt_tx_avail(bp, txr);
+       if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
+               netif_tx_stop_queue(txq);
+               return NETDEV_TX_BUSY;
+       }
+
+       length = skb->len;
+       len = skb_headlen(skb);
+       last_frag = skb_shinfo(skb)->nr_frags;
+
+       txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+       txbd->tx_bd_opaque = prod;
+
+       tx_buf = &txr->tx_buf_ring[prod];
+       tx_buf->skb = skb;
+       tx_buf->nr_frags = last_frag;
+
+       vlan_tag_flags = 0;
+       cfa_action = 0;
+       if (skb_vlan_tag_present(skb)) {
+               vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
+                                skb_vlan_tag_get(skb);
+               /* Currently supports 8021Q, 8021AD vlan offloads
+                * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
+                */
+               if (skb->vlan_proto == htons(ETH_P_8021Q))
+                       vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
+       }
+
+       if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
+               struct tx_push_bd *push = txr->tx_push;
+               struct tx_bd *tx_push = &push->txbd1;
+               struct tx_bd_ext *tx_push1 = &push->txbd2;
+               void *pdata = tx_push1 + 1;
+               int j;
+
+               /* Set COAL_NOW to be ready quickly for the next push */
+               tx_push->tx_bd_len_flags_type =
+                       cpu_to_le32((length << TX_BD_LEN_SHIFT) |
+                                       TX_BD_TYPE_LONG_TX_BD |
+                                       TX_BD_FLAGS_LHINT_512_AND_SMALLER |
+                                       TX_BD_FLAGS_COAL_NOW |
+                                       TX_BD_FLAGS_PACKET_END |
+                                       (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
+
+               if (skb->ip_summed == CHECKSUM_PARTIAL)
+                       tx_push1->tx_bd_hsize_lflags =
+                                       cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
+               else
+                       tx_push1->tx_bd_hsize_lflags = 0;
+
+               tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
+               tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+
+               skb_copy_from_linear_data(skb, pdata, len);
+               pdata += len;
+               for (j = 0; j < last_frag; j++) {
+                       skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
+                       void *fptr;
+
+                       fptr = skb_frag_address_safe(frag);
+                       if (!fptr)
+                               goto normal_tx;
+
+                       memcpy(pdata, fptr, skb_frag_size(frag));
+                       pdata += skb_frag_size(frag);
+               }
+
+               memcpy(txbd, tx_push, sizeof(*txbd));
+               prod = NEXT_TX(prod);
+               txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+               memcpy(txbd, tx_push1, sizeof(*txbd));
+               prod = NEXT_TX(prod);
+               push->doorbell =
+                       cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
+               txr->tx_prod = prod;
+
+               netdev_tx_sent_queue(txq, skb->len);
+
+               __iowrite64_copy(txr->tx_doorbell, push,
+                                (length + sizeof(*push) + 8) / 8);
+
+               tx_buf->is_push = 1;
+
+               goto tx_done;
+       }
+
+normal_tx:
+       if (length < BNXT_MIN_PKT_SIZE) {
+               pad = BNXT_MIN_PKT_SIZE - length;
+               if (skb_pad(skb, pad)) {
+                       /* SKB already freed. */
+                       tx_buf->skb = NULL;
+                       return NETDEV_TX_OK;
+               }
+               length = BNXT_MIN_PKT_SIZE;
+       }
+
+       mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
+
+       if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
+               dev_kfree_skb_any(skb);
+               tx_buf->skb = NULL;
+               return NETDEV_TX_OK;
+       }
+
+       dma_unmap_addr_set(tx_buf, mapping, mapping);
+       flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
+               ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
+
+       txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+       prod = NEXT_TX(prod);
+       txbd1 = (struct tx_bd_ext *)
+               &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+       txbd1->tx_bd_hsize_lflags = 0;
+       if (skb_is_gso(skb)) {
+               u32 hdr_len;
+
+               if (skb->encapsulation)
+                       hdr_len = skb_inner_network_offset(skb) +
+                               skb_inner_network_header_len(skb) +
+                               inner_tcp_hdrlen(skb);
+               else
+                       hdr_len = skb_transport_offset(skb) +
+                               tcp_hdrlen(skb);
+
+               txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
+                                       TX_BD_FLAGS_T_IPID |
+                                       (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
+               length = skb_shinfo(skb)->gso_size;
+               txbd1->tx_bd_mss = cpu_to_le32(length);
+               length += hdr_len;
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               txbd1->tx_bd_hsize_lflags =
+                       cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
+               txbd1->tx_bd_mss = 0;
+       }
+
+       length >>= 9;
+       flags |= bnxt_lhint_arr[length];
+       txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+
+       txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
+       txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+       for (i = 0; i < last_frag; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+               prod = NEXT_TX(prod);
+               txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+               len = skb_frag_size(frag);
+               mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
+                                          DMA_TO_DEVICE);
+
+               if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
+                       goto tx_dma_error;
+
+               tx_buf = &txr->tx_buf_ring[prod];
+               dma_unmap_addr_set(tx_buf, mapping, mapping);
+
+               txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+               flags = len << TX_BD_LEN_SHIFT;
+               txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+       }
+
+       flags &= ~TX_BD_LEN;
+       txbd->tx_bd_len_flags_type =
+               cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
+                           TX_BD_FLAGS_PACKET_END);
+
+       netdev_tx_sent_queue(txq, skb->len);
+
+       /* Sync BD data before updating doorbell */
+       wmb();
+
+       prod = NEXT_TX(prod);
+       txr->tx_prod = prod;
+
+       writel(DB_KEY_TX | prod, txr->tx_doorbell);
+       writel(DB_KEY_TX | prod, txr->tx_doorbell);
+
+tx_done:
+
+       mmiowb();
+
+       if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
+               netif_tx_stop_queue(txq);
+
+               /* netif_tx_stop_queue() must be done before checking
+                * tx index in bnxt_tx_avail() below, because in
+                * bnxt_tx_int(), we update tx index before checking for
+                * netif_tx_queue_stopped().
+                */
+               smp_mb();
+               if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
+                       netif_tx_wake_queue(txq);
+       }
+       return NETDEV_TX_OK;
+
+tx_dma_error:
+       last_frag = i;
+
+       /* start back at beginning and unmap skb */
+       prod = txr->tx_prod;
+       tx_buf = &txr->tx_buf_ring[prod];
+       tx_buf->skb = NULL;
+       dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+                        skb_headlen(skb), PCI_DMA_TODEVICE);
+       prod = NEXT_TX(prod);
+
+       /* unmap remaining mapped pages */
+       for (i = 0; i < last_frag; i++) {
+               prod = NEXT_TX(prod);
+               tx_buf = &txr->tx_buf_ring[prod];
+               dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+                              skb_frag_size(&skb_shinfo(skb)->frags[i]),
+                              PCI_DMA_TODEVICE);
+       }
+
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+}
+
+static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+{
+       struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+       int index = bnapi->index;
+       struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
+       u16 cons = txr->tx_cons;
+       struct pci_dev *pdev = bp->pdev;
+       int i;
+       unsigned int tx_bytes = 0;
+
+       for (i = 0; i < nr_pkts; i++) {
+               struct bnxt_sw_tx_bd *tx_buf;
+               struct sk_buff *skb;
+               int j, last;
+
+               tx_buf = &txr->tx_buf_ring[cons];
+               cons = NEXT_TX(cons);
+               skb = tx_buf->skb;
+               tx_buf->skb = NULL;
+
+               if (tx_buf->is_push) {
+                       tx_buf->is_push = 0;
+                       goto next_tx_int;
+               }
+
+               dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+                                skb_headlen(skb), PCI_DMA_TODEVICE);
+               last = tx_buf->nr_frags;
+
+               for (j = 0; j < last; j++) {
+                       cons = NEXT_TX(cons);
+                       tx_buf = &txr->tx_buf_ring[cons];
+                       dma_unmap_page(
+                               &pdev->dev,
+                               dma_unmap_addr(tx_buf, mapping),
+                               skb_frag_size(&skb_shinfo(skb)->frags[j]),
+                               PCI_DMA_TODEVICE);
+               }
+
+next_tx_int:
+               cons = NEXT_TX(cons);
+
+               tx_bytes += skb->len;
+               dev_kfree_skb_any(skb);
+       }
+
+       netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
+       txr->tx_cons = cons;
+
+       /* Need to make the tx_cons update visible to bnxt_start_xmit()
+        * before checking for netif_tx_queue_stopped().  Without the
+        * memory barrier, there is a small possibility that bnxt_start_xmit()
+        * will miss it and cause the queue to be stopped forever.
+        */
+       smp_mb();
+
+       if (unlikely(netif_tx_queue_stopped(txq)) &&
+           (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
+               __netif_tx_lock(txq, smp_processor_id());
+               if (netif_tx_queue_stopped(txq) &&
+                   bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+                   txr->dev_state != BNXT_DEV_STATE_CLOSING)
+                       netif_tx_wake_queue(txq);
+               __netif_tx_unlock(txq);
+       }
+}
+
+static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
+                                      gfp_t gfp)
+{
+       u8 *data;
+       struct pci_dev *pdev = bp->pdev;
+
+       data = kmalloc(bp->rx_buf_size, gfp);
+       if (!data)
+               return NULL;
+
+       *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
+                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
+
+       if (dma_mapping_error(&pdev->dev, *mapping)) {
+               kfree(data);
+               data = NULL;
+       }
+       return data;
+}
+
+static inline int bnxt_alloc_rx_data(struct bnxt *bp,
+                                    struct bnxt_rx_ring_info *rxr,
+                                    u16 prod, gfp_t gfp)
+{
+       struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+       struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
+       u8 *data;
+       dma_addr_t mapping;
+
+       data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
+       if (!data)
+               return -ENOMEM;
+
+       rx_buf->data = data;
+       dma_unmap_addr_set(rx_buf, mapping, mapping);
+
+       rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+
+       return 0;
+}
+
+static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
+                              u8 *data)
+{
+       u16 prod = rxr->rx_prod;
+       struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+       struct rx_bd *cons_bd, *prod_bd;
+
+       prod_rx_buf = &rxr->rx_buf_ring[prod];
+       cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+       prod_rx_buf->data = data;
+
+       dma_unmap_addr_set(prod_rx_buf, mapping,
+                          dma_unmap_addr(cons_rx_buf, mapping));
+
+       prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+       cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+
+       prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
+}
+
+static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
+{
+       u16 next, max = rxr->rx_agg_bmap_size;
+
+       next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
+       if (next >= max)
+               next = find_first_zero_bit(rxr->rx_agg_bmap, max);
+       return next;
+}
+
+static inline int bnxt_alloc_rx_page(struct bnxt *bp,
+                                    struct bnxt_rx_ring_info *rxr,
+                                    u16 prod, gfp_t gfp)
+{
+       struct rx_bd *rxbd =
+               &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+       struct bnxt_sw_rx_agg_bd *rx_agg_buf;
+       struct pci_dev *pdev = bp->pdev;
+       struct page *page;
+       dma_addr_t mapping;
+       u16 sw_prod = rxr->rx_sw_agg_prod;
+
+       page = alloc_page(gfp);
+       if (!page)
+               return -ENOMEM;
+
+       mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
+                              PCI_DMA_FROMDEVICE);
+       if (dma_mapping_error(&pdev->dev, mapping)) {
+               __free_page(page);
+               return -EIO;
+       }
+
+       if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+               sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
+
+       __set_bit(sw_prod, rxr->rx_agg_bmap);
+       rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
+       rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
+
+       rx_agg_buf->page = page;
+       rx_agg_buf->mapping = mapping;
+       rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+       rxbd->rx_bd_opaque = sw_prod;
+       return 0;
+}
+
+static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
+                                  u32 agg_bufs)
+{
+       struct bnxt *bp = bnapi->bp;
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+       u16 prod = rxr->rx_agg_prod;
+       u16 sw_prod = rxr->rx_sw_agg_prod;
+       u32 i;
+
+       for (i = 0; i < agg_bufs; i++) {
+               u16 cons;
+               struct rx_agg_cmp *agg;
+               struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
+               struct rx_bd *prod_bd;
+               struct page *page;
+
+               agg = (struct rx_agg_cmp *)
+                       &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+               cons = agg->rx_agg_cmp_opaque;
+               __clear_bit(cons, rxr->rx_agg_bmap);
+
+               if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+                       sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
+
+               __set_bit(sw_prod, rxr->rx_agg_bmap);
+               prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
+               cons_rx_buf = &rxr->rx_agg_ring[cons];
+
+               /* It is possible for sw_prod to be equal to cons, so
+                * set cons_rx_buf->page to NULL first.
+                */
+               page = cons_rx_buf->page;
+               cons_rx_buf->page = NULL;
+               prod_rx_buf->page = page;
+
+               prod_rx_buf->mapping = cons_rx_buf->mapping;
+
+               prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+               prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
+               prod_bd->rx_bd_opaque = sw_prod;
+
+               prod = NEXT_RX_AGG(prod);
+               sw_prod = NEXT_RX_AGG(sw_prod);
+               cp_cons = NEXT_CMP(cp_cons);
+       }
+       rxr->rx_agg_prod = prod;
+       rxr->rx_sw_agg_prod = sw_prod;
+}
+
+static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
+                                  struct bnxt_rx_ring_info *rxr, u16 cons,
+                                  u16 prod, u8 *data, dma_addr_t dma_addr,
+                                  unsigned int len)
+{
+       int err;
+       struct sk_buff *skb;
+
+       err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+       if (unlikely(err)) {
+               bnxt_reuse_rx_data(rxr, cons, data);
+               return NULL;
+       }
+
+       skb = build_skb(data, 0);
+       dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+                        PCI_DMA_FROMDEVICE);
+       if (!skb) {
+               kfree(data);
+               return NULL;
+       }
+
+       skb_reserve(skb, BNXT_RX_OFFSET);
+       skb_put(skb, len);
+       return skb;
+}
+
+static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
+                                    struct sk_buff *skb, u16 cp_cons,
+                                    u32 agg_bufs)
+{
+       struct pci_dev *pdev = bp->pdev;
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+       u16 prod = rxr->rx_agg_prod;
+       u32 i;
+
+       for (i = 0; i < agg_bufs; i++) {
+               u16 cons, frag_len;
+               struct rx_agg_cmp *agg;
+               struct bnxt_sw_rx_agg_bd *cons_rx_buf;
+               struct page *page;
+               dma_addr_t mapping;
+
+               agg = (struct rx_agg_cmp *)
+                       &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+               cons = agg->rx_agg_cmp_opaque;
+               frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
+                           RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
+
+               cons_rx_buf = &rxr->rx_agg_ring[cons];
+               skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
+               __clear_bit(cons, rxr->rx_agg_bmap);
+
+               /* It is possible for bnxt_alloc_rx_page() to allocate
+                * a sw_prod index that equals the cons index, so we
+                * need to clear the cons entry now.
+                */
+               mapping = dma_unmap_addr(cons_rx_buf, mapping);
+               page = cons_rx_buf->page;
+               cons_rx_buf->page = NULL;
+
+               if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
+                       struct skb_shared_info *shinfo;
+                       unsigned int nr_frags;
+
+                       shinfo = skb_shinfo(skb);
+                       nr_frags = --shinfo->nr_frags;
+                       __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
+
+                       dev_kfree_skb(skb);
+
+                       cons_rx_buf->page = page;
+
+                       /* Update prod since possibly some pages have been
+                        * allocated already.
+                        */
+                       rxr->rx_agg_prod = prod;
+                       bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
+                       return NULL;
+               }
+
+               dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
+                              PCI_DMA_FROMDEVICE);
+
+               skb->data_len += frag_len;
+               skb->len += frag_len;
+               skb->truesize += PAGE_SIZE;
+
+               prod = NEXT_RX_AGG(prod);
+               cp_cons = NEXT_CMP(cp_cons);
+       }
+       rxr->rx_agg_prod = prod;
+       return skb;
+}
+
+static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+                              u8 agg_bufs, u32 *raw_cons)
+{
+       u16 last;
+       struct rx_agg_cmp *agg;
+
+       *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
+       last = RING_CMP(*raw_cons);
+       agg = (struct rx_agg_cmp *)
+               &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
+       return RX_AGG_CMP_VALID(agg, *raw_cons);
+}
+
+static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
+                                           unsigned int len,
+                                           dma_addr_t mapping)
+{
+       struct bnxt *bp = bnapi->bp;
+       struct pci_dev *pdev = bp->pdev;
+       struct sk_buff *skb;
+
+       skb = napi_alloc_skb(&bnapi->napi, len);
+       if (!skb)
+               return NULL;
+
+       dma_sync_single_for_cpu(&pdev->dev, mapping,
+                               bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
+
+       memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
+
+       dma_sync_single_for_device(&pdev->dev, mapping,
+                                  bp->rx_copy_thresh,
+                                  PCI_DMA_FROMDEVICE);
+
+       skb_put(skb, len);
+       return skb;
+}
+
+static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+                          struct rx_tpa_start_cmp *tpa_start,
+                          struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+       u8 agg_id = TPA_START_AGG_ID(tpa_start);
+       u16 cons, prod;
+       struct bnxt_tpa_info *tpa_info;
+       struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+       struct rx_bd *prod_bd;
+       dma_addr_t mapping;
+
+       cons = tpa_start->rx_tpa_start_cmp_opaque;
+       prod = rxr->rx_prod;
+       cons_rx_buf = &rxr->rx_buf_ring[cons];
+       prod_rx_buf = &rxr->rx_buf_ring[prod];
+       tpa_info = &rxr->rx_tpa[agg_id];
+
+       prod_rx_buf->data = tpa_info->data;
+
+       mapping = tpa_info->mapping;
+       dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
+
+       prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+       prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
+
+       tpa_info->data = cons_rx_buf->data;
+       cons_rx_buf->data = NULL;
+       tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
+
+       tpa_info->len =
+               le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
+                               RX_TPA_START_CMP_LEN_SHIFT;
+       if (likely(TPA_START_HASH_VALID(tpa_start))) {
+               u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
+
+               tpa_info->hash_type = PKT_HASH_TYPE_L4;
+               tpa_info->gso_type = SKB_GSO_TCPV4;
+               /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
+               if (hash_type == 3)
+                       tpa_info->gso_type = SKB_GSO_TCPV6;
+               tpa_info->rss_hash =
+                       le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
+       } else {
+               tpa_info->hash_type = PKT_HASH_TYPE_NONE;
+               tpa_info->gso_type = 0;
+               if (netif_msg_rx_err(bp))
+                       netdev_warn(bp->dev, "TPA packet without valid hash\n");
+       }
+       tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
+       tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
+
+       rxr->rx_prod = NEXT_RX(prod);
+       cons = NEXT_RX(cons);
+       cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+       bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
+       rxr->rx_prod = NEXT_RX(rxr->rx_prod);
+       cons_rx_buf->data = NULL;
+}
+
+static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
+                          u16 cp_cons, u32 agg_bufs)
+{
+       if (agg_bufs)
+               bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+}
+
+#define BNXT_IPV4_HDR_SIZE     (sizeof(struct iphdr) + sizeof(struct tcphdr))
+#define BNXT_IPV6_HDR_SIZE     (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
+
+static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
+                                          struct rx_tpa_end_cmp *tpa_end,
+                                          struct rx_tpa_end_cmp_ext *tpa_end1,
+                                          struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+       struct tcphdr *th;
+       int payload_off, tcp_opt_len = 0;
+       int len, nw_off;
+
+       NAPI_GRO_CB(skb)->count = TPA_END_TPA_SEGS(tpa_end);
+       skb_shinfo(skb)->gso_size =
+               le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
+       skb_shinfo(skb)->gso_type = tpa_info->gso_type;
+       payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+                      RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
+                     RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
+       if (TPA_END_GRO_TS(tpa_end))
+               tcp_opt_len = 12;
+
+       if (tpa_info->gso_type == SKB_GSO_TCPV4) {
+               struct iphdr *iph;
+
+               nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
+                        ETH_HLEN;
+               skb_set_network_header(skb, nw_off);
+               iph = ip_hdr(skb);
+               skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
+               len = skb->len - skb_transport_offset(skb);
+               th = tcp_hdr(skb);
+               th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
+       } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
+               struct ipv6hdr *iph;
+
+               nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
+                        ETH_HLEN;
+               skb_set_network_header(skb, nw_off);
+               iph = ipv6_hdr(skb);
+               skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
+               len = skb->len - skb_transport_offset(skb);
+               th = tcp_hdr(skb);
+               th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
+       } else {
+               dev_kfree_skb_any(skb);
+               return NULL;
+       }
+       tcp_gro_complete(skb);
+
+       if (nw_off) { /* tunnel */
+               struct udphdr *uh = NULL;
+
+               if (skb->protocol == htons(ETH_P_IP)) {
+                       struct iphdr *iph = (struct iphdr *)skb->data;
+
+                       if (iph->protocol == IPPROTO_UDP)
+                               uh = (struct udphdr *)(iph + 1);
+               } else {
+                       struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+                       if (iph->nexthdr == IPPROTO_UDP)
+                               uh = (struct udphdr *)(iph + 1);
+               }
+               if (uh) {
+                       if (uh->check)
+                               skb_shinfo(skb)->gso_type |=
+                                       SKB_GSO_UDP_TUNNEL_CSUM;
+                       else
+                               skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+               }
+       }
+#endif
+       return skb;
+}
+
+static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+                                          struct bnxt_napi *bnapi,
+                                          u32 *raw_cons,
+                                          struct rx_tpa_end_cmp *tpa_end,
+                                          struct rx_tpa_end_cmp_ext *tpa_end1,
+                                          bool *agg_event)
+{
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+       u8 agg_id = TPA_END_AGG_ID(tpa_end);
+       u8 *data, agg_bufs;
+       u16 cp_cons = RING_CMP(*raw_cons);
+       unsigned int len;
+       struct bnxt_tpa_info *tpa_info;
+       dma_addr_t mapping;
+       struct sk_buff *skb;
+
+       tpa_info = &rxr->rx_tpa[agg_id];
+       data = tpa_info->data;
+       prefetch(data);
+       len = tpa_info->len;
+       mapping = tpa_info->mapping;
+
+       agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+                   RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
+
+       if (agg_bufs) {
+               if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
+                       return ERR_PTR(-EBUSY);
+
+               *agg_event = true;
+               cp_cons = NEXT_CMP(cp_cons);
+       }
+
+       if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
+               bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+               netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
+                           agg_bufs, (int)MAX_SKB_FRAGS);
+               return NULL;
+       }
+
+       if (len <= bp->rx_copy_thresh) {
+               skb = bnxt_copy_skb(bnapi, data, len, mapping);
+               if (!skb) {
+                       bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+                       return NULL;
+               }
+       } else {
+               u8 *new_data;
+               dma_addr_t new_mapping;
+
+               new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
+               if (!new_data) {
+                       bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+                       return NULL;
+               }
+
+               tpa_info->data = new_data;
+               tpa_info->mapping = new_mapping;
+
+               skb = build_skb(data, 0);
+               dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
+                                PCI_DMA_FROMDEVICE);
+
+               if (!skb) {
+                       kfree(data);
+                       bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+                       return NULL;
+               }
+               skb_reserve(skb, BNXT_RX_OFFSET);
+               skb_put(skb, len);
+       }
+
+       if (agg_bufs) {
+               skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+               if (!skb) {
+                       /* Page reuse already handled by bnxt_rx_pages(). */
+                       return NULL;
+               }
+       }
+       skb->protocol = eth_type_trans(skb, bp->dev);
+
+       if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
+               skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
+
+       if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
+               netdev_features_t features = skb->dev->features;
+               u16 vlan_proto = tpa_info->metadata >>
+                       RX_CMP_FLAGS2_METADATA_TPID_SFT;
+
+               if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+                    vlan_proto == ETH_P_8021Q) ||
+                   ((features & NETIF_F_HW_VLAN_STAG_RX) &&
+                    vlan_proto == ETH_P_8021AD)) {
+                       __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
+                                              tpa_info->metadata &
+                                              RX_CMP_FLAGS2_METADATA_VID_MASK);
+               }
+       }
+
+       skb_checksum_none_assert(skb);
+       if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               skb->csum_level =
+                       (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
+       }
+
+       if (TPA_END_GRO(tpa_end))
+               skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
+
+       return skb;
+}
+
+/* returns the following:
+ * 1       - 1 packet successfully received
+ * 0       - successful TPA_START, packet not completed yet
+ * -EBUSY  - completion ring does not have all the agg buffers yet
+ * -ENOMEM - packet aborted due to out of memory
+ * -EIO    - packet aborted due to hw error indicated in BD
+ */
+static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
+                      bool *agg_event)
+{
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+       struct net_device *dev = bp->dev;
+       struct rx_cmp *rxcmp;
+       struct rx_cmp_ext *rxcmp1;
+       u32 tmp_raw_cons = *raw_cons;
+       u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+       struct bnxt_sw_rx_bd *rx_buf;
+       unsigned int len;
+       u8 *data, agg_bufs, cmp_type;
+       dma_addr_t dma_addr;
+       struct sk_buff *skb;
+       int rc = 0;
+
+       rxcmp = (struct rx_cmp *)
+                       &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+       tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+       cp_cons = RING_CMP(tmp_raw_cons);
+       rxcmp1 = (struct rx_cmp_ext *)
+                       &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+       if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
+               return -EBUSY;
+
+       cmp_type = RX_CMP_TYPE(rxcmp);
+
+       prod = rxr->rx_prod;
+
+       if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
+               bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
+                              (struct rx_tpa_start_cmp_ext *)rxcmp1);
+
+               goto next_rx_no_prod;
+
+       } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+               skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
+                                  (struct rx_tpa_end_cmp *)rxcmp,
+                                  (struct rx_tpa_end_cmp_ext *)rxcmp1,
+                                  agg_event);
+
+               if (unlikely(IS_ERR(skb)))
+                       return -EBUSY;
+
+               rc = -ENOMEM;
+               if (likely(skb)) {
+                       skb_record_rx_queue(skb, bnapi->index);
+                       skb_mark_napi_id(skb, &bnapi->napi);
+                       if (bnxt_busy_polling(bnapi))
+                               netif_receive_skb(skb);
+                       else
+                               napi_gro_receive(&bnapi->napi, skb);
+                       rc = 1;
+               }
+               goto next_rx_no_prod;
+       }
+
+       cons = rxcmp->rx_cmp_opaque;
+       rx_buf = &rxr->rx_buf_ring[cons];
+       data = rx_buf->data;
+       prefetch(data);
+
+       agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
+                               RX_CMP_AGG_BUFS_SHIFT;
+
+       if (agg_bufs) {
+               if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
+                       return -EBUSY;
+
+               cp_cons = NEXT_CMP(cp_cons);
+               *agg_event = true;
+       }
+
+       rx_buf->data = NULL;
+       if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+               bnxt_reuse_rx_data(rxr, cons, data);
+               if (agg_bufs)
+                       bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+
+               rc = -EIO;
+               goto next_rx;
+       }
+
+       len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
+       dma_addr = dma_unmap_addr(rx_buf, mapping);
+
+       if (len <= bp->rx_copy_thresh) {
+               skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
+               bnxt_reuse_rx_data(rxr, cons, data);
+               if (!skb) {
+                       rc = -ENOMEM;
+                       goto next_rx;
+               }
+       } else {
+               skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
+               if (!skb) {
+                       rc = -ENOMEM;
+                       goto next_rx;
+               }
+       }
+
+       if (agg_bufs) {
+               skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+               if (!skb) {
+                       rc = -ENOMEM;
+                       goto next_rx;
+               }
+       }
+
+       if (RX_CMP_HASH_VALID(rxcmp)) {
+               u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
+               enum pkt_hash_types type = PKT_HASH_TYPE_L4;
+
+               /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
+               if (hash_type != 1 && hash_type != 3)
+                       type = PKT_HASH_TYPE_L3;
+               skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
+       }
+
+       skb->protocol = eth_type_trans(skb, dev);
+
+       if (rxcmp1->rx_cmp_flags2 &
+           cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
+               netdev_features_t features = skb->dev->features;
+               u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+               u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
+
+               if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+                    vlan_proto == ETH_P_8021Q) ||
+                   ((features & NETIF_F_HW_VLAN_STAG_RX) &&
+                    vlan_proto == ETH_P_8021AD))
+                       __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
+                                              meta_data &
+                                              RX_CMP_FLAGS2_METADATA_VID_MASK);
+       }
+
+       skb_checksum_none_assert(skb);
+       if (RX_CMP_L4_CS_OK(rxcmp1)) {
+               if (dev->features & NETIF_F_RXCSUM) {
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       skb->csum_level = RX_CMP_ENCAP(rxcmp1);
+               }
+       } else {
+               if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS)
+                       cpr->rx_l4_csum_errors++;
+       }
+
+       skb_record_rx_queue(skb, bnapi->index);
+       skb_mark_napi_id(skb, &bnapi->napi);
+       if (bnxt_busy_polling(bnapi))
+               netif_receive_skb(skb);
+       else
+               napi_gro_receive(&bnapi->napi, skb);
+       rc = 1;
+
+next_rx:
+       rxr->rx_prod = NEXT_RX(prod);
+
+next_rx_no_prod:
+       *raw_cons = tmp_raw_cons;
+
+       return rc;
+}
+
+static int bnxt_async_event_process(struct bnxt *bp,
+                                   struct hwrm_async_event_cmpl *cmpl)
+{
+       u16 event_id = le16_to_cpu(cmpl->event_id);
+
+       /* TODO CHIMP_FW: Define event id's for link change, error etc */
+       switch (event_id) {
+       case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
+               set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
+               schedule_work(&bp->sp_task);
+               break;
+       default:
+               netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
+                          event_id);
+               break;
+       }
+       return 0;
+}
+
+static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
+{
+       u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
+       struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
+       struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
+                               (struct hwrm_fwd_req_cmpl *)txcmp;
+
+       switch (cmpl_type) {
+       case CMPL_BASE_TYPE_HWRM_DONE:
+               seq_id = le16_to_cpu(h_cmpl->sequence_id);
+               if (seq_id == bp->hwrm_intr_seq_id)
+                       bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
+               else
+                       netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
+               break;
+
+       case CMPL_BASE_TYPE_HWRM_FWD_REQ:
+               vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
+
+               if ((vf_id < bp->pf.first_vf_id) ||
+                   (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
+                       netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
+                                  vf_id);
+                       return -EINVAL;
+               }
+
+               set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
+               set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
+               schedule_work(&bp->sp_task);
+               break;
+
+       case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
+               bnxt_async_event_process(bp,
+                                        (struct hwrm_async_event_cmpl *)txcmp);
+
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static irqreturn_t bnxt_msix(int irq, void *dev_instance)
+{
+       struct bnxt_napi *bnapi = dev_instance;
+       struct bnxt *bp = bnapi->bp;
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       u32 cons = RING_CMP(cpr->cp_raw_cons);
+
+       prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
+       napi_schedule(&bnapi->napi);
+       return IRQ_HANDLED;
+}
+
+static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+       u32 raw_cons = cpr->cp_raw_cons;
+       u16 cons = RING_CMP(raw_cons);
+       struct tx_cmp *txcmp;
+
+       txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+       return TX_CMP_VALID(txcmp, raw_cons);
+}
+
+#define CAG_LEGACY_INT_STATUS  0x2014
+
+static irqreturn_t bnxt_inta(int irq, void *dev_instance)
+{
+       struct bnxt_napi *bnapi = dev_instance;
+       struct bnxt *bp = bnapi->bp;
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       u32 cons = RING_CMP(cpr->cp_raw_cons);
+       u32 int_status;
+
+       prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
+
+       if (!bnxt_has_work(bp, cpr)) {
+               int_status = readl(bp->bar0 + CAG_LEGACY_INT_STATUS);
+               /* return if erroneous interrupt */
+               if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
+                       return IRQ_NONE;
+       }
+
+       /* disable ring IRQ */
+       BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
+
+       /* Return here if interrupt is shared and is disabled. */
+       if (unlikely(atomic_read(&bp->intr_sem) != 0))
+               return IRQ_HANDLED;
+
+       napi_schedule(&bnapi->napi);
+       return IRQ_HANDLED;
+}
+
+static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+{
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       u32 raw_cons = cpr->cp_raw_cons;
+       u32 cons;
+       int tx_pkts = 0;
+       int rx_pkts = 0;
+       bool rx_event = false;
+       bool agg_event = false;
+       struct tx_cmp *txcmp;
+
+       while (1) {
+               int rc;
+
+               cons = RING_CMP(raw_cons);
+               txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+               if (!TX_CMP_VALID(txcmp, raw_cons))
+                       break;
+
+               if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
+                       tx_pkts++;
+                       /* return full budget so NAPI will complete. */
+                       if (unlikely(tx_pkts > bp->tx_wake_thresh))
+                               rx_pkts = budget;
+               } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
+                       rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
+                       if (likely(rc >= 0))
+                               rx_pkts += rc;
+                       else if (rc == -EBUSY)  /* partial completion */
+                               break;
+                       rx_event = true;
+               } else if (unlikely((TX_CMP_TYPE(txcmp) ==
+                                    CMPL_BASE_TYPE_HWRM_DONE) ||
+                                   (TX_CMP_TYPE(txcmp) ==
+                                    CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
+                                   (TX_CMP_TYPE(txcmp) ==
+                                    CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
+                       bnxt_hwrm_handler(bp, txcmp);
+               }
+               raw_cons = NEXT_RAW_CMP(raw_cons);
+
+               if (rx_pkts == budget)
+                       break;
+       }
+
+       cpr->cp_raw_cons = raw_cons;
+       /* ACK completion ring before freeing tx ring and producing new
+        * buffers in rx/agg rings to prevent overflowing the completion
+        * ring.
+        */
+       BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+
+       if (tx_pkts)
+               bnxt_tx_int(bp, bnapi, tx_pkts);
+
+       if (rx_event) {
+               struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+
+               writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+               writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+               if (agg_event) {
+                       writel(DB_KEY_RX | rxr->rx_agg_prod,
+                              rxr->rx_agg_doorbell);
+                       writel(DB_KEY_RX | rxr->rx_agg_prod,
+                              rxr->rx_agg_doorbell);
+               }
+       }
+       return rx_pkts;
+}
+
+static int bnxt_poll(struct napi_struct *napi, int budget)
+{
+       struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+       struct bnxt *bp = bnapi->bp;
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       int work_done = 0;
+
+       if (!bnxt_lock_napi(bnapi))
+               return budget;
+
+       while (1) {
+               work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
+
+               if (work_done >= budget)
+                       break;
+
+               if (!bnxt_has_work(bp, cpr)) {
+                       napi_complete(napi);
+                       BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+                       break;
+               }
+       }
+       mmiowb();
+       bnxt_unlock_napi(bnapi);
+       return work_done;
+}
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int bnxt_busy_poll(struct napi_struct *napi)
+{
+       struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+       struct bnxt *bp = bnapi->bp;
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       int rx_work, budget = 4;
+
+       if (atomic_read(&bp->intr_sem) != 0)
+               return LL_FLUSH_FAILED;
+
+       if (!bnxt_lock_poll(bnapi))
+               return LL_FLUSH_BUSY;
+
+       rx_work = bnxt_poll_work(bp, bnapi, budget);
+
+       BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+
+       bnxt_unlock_poll(bnapi);
+       return rx_work;
+}
+#endif
+
+static void bnxt_free_tx_skbs(struct bnxt *bp)
+{
+       int i, max_idx;
+       struct pci_dev *pdev = bp->pdev;
+
+       if (!bp->bnapi)
+               return;
+
+       max_idx = bp->tx_nr_pages * TX_DESC_CNT;
+       for (i = 0; i < bp->tx_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_tx_ring_info *txr;
+               int j;
+
+               if (!bnapi)
+                       continue;
+
+               txr = &bnapi->tx_ring;
+               for (j = 0; j < max_idx;) {
+                       struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
+                       struct sk_buff *skb = tx_buf->skb;
+                       int k, last;
+
+                       if (!skb) {
+                               j++;
+                               continue;
+                       }
+
+                       tx_buf->skb = NULL;
+
+                       if (tx_buf->is_push) {
+                               dev_kfree_skb(skb);
+                               j += 2;
+                               continue;
+                       }
+
+                       dma_unmap_single(&pdev->dev,
+                                        dma_unmap_addr(tx_buf, mapping),
+                                        skb_headlen(skb),
+                                        PCI_DMA_TODEVICE);
+
+                       last = tx_buf->nr_frags;
+                       j += 2;
+                       for (k = 0; k < last; k++, j = NEXT_TX(j)) {
+                               skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
+
+                               tx_buf = &txr->tx_buf_ring[j];
+                               dma_unmap_page(
+                                       &pdev->dev,
+                                       dma_unmap_addr(tx_buf, mapping),
+                                       skb_frag_size(frag), PCI_DMA_TODEVICE);
+                       }
+                       dev_kfree_skb(skb);
+               }
+               netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
+       }
+}
+
+static void bnxt_free_rx_skbs(struct bnxt *bp)
+{
+       int i, max_idx, max_agg_idx;
+       struct pci_dev *pdev = bp->pdev;
+
+       if (!bp->bnapi)
+               return;
+
+       max_idx = bp->rx_nr_pages * RX_DESC_CNT;
+       max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
+       for (i = 0; i < bp->rx_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_rx_ring_info *rxr;
+               int j;
+
+               if (!bnapi)
+                       continue;
+
+               rxr = &bnapi->rx_ring;
+
+               if (rxr->rx_tpa) {
+                       for (j = 0; j < MAX_TPA; j++) {
+                               struct bnxt_tpa_info *tpa_info =
+                                                       &rxr->rx_tpa[j];
+                               u8 *data = tpa_info->data;
+
+                               if (!data)
+                                       continue;
+
+                               dma_unmap_single(
+                                       &pdev->dev,
+                                       dma_unmap_addr(tpa_info, mapping),
+                                       bp->rx_buf_use_size,
+                                       PCI_DMA_FROMDEVICE);
+
+                               tpa_info->data = NULL;
+
+                               kfree(data);
+                       }
+               }
+
+               for (j = 0; j < max_idx; j++) {
+                       struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
+                       u8 *data = rx_buf->data;
+
+                       if (!data)
+                               continue;
+
+                       dma_unmap_single(&pdev->dev,
+                                        dma_unmap_addr(rx_buf, mapping),
+                                        bp->rx_buf_use_size,
+                                        PCI_DMA_FROMDEVICE);
+
+                       rx_buf->data = NULL;
+
+                       kfree(data);
+               }
+
+               for (j = 0; j < max_agg_idx; j++) {
+                       struct bnxt_sw_rx_agg_bd *rx_agg_buf =
+                               &rxr->rx_agg_ring[j];
+                       struct page *page = rx_agg_buf->page;
+
+                       if (!page)
+                               continue;
+
+                       dma_unmap_page(&pdev->dev,
+                                      dma_unmap_addr(rx_agg_buf, mapping),
+                                      PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
+                       rx_agg_buf->page = NULL;
+                       __clear_bit(j, rxr->rx_agg_bmap);
+
+                       __free_page(page);
+               }
+       }
+}
+
+static void bnxt_free_skbs(struct bnxt *bp)
+{
+       bnxt_free_tx_skbs(bp);
+       bnxt_free_rx_skbs(bp);
+}
+
+static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+       struct pci_dev *pdev = bp->pdev;
+       int i;
+
+       for (i = 0; i < ring->nr_pages; i++) {
+               if (!ring->pg_arr[i])
+                       continue;
+
+               dma_free_coherent(&pdev->dev, ring->page_size,
+                                 ring->pg_arr[i], ring->dma_arr[i]);
+
+               ring->pg_arr[i] = NULL;
+       }
+       if (ring->pg_tbl) {
+               dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
+                                 ring->pg_tbl, ring->pg_tbl_map);
+               ring->pg_tbl = NULL;
+       }
+       if (ring->vmem_size && *ring->vmem) {
+               vfree(*ring->vmem);
+               *ring->vmem = NULL;
+       }
+}
+
+static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+       int i;
+       struct pci_dev *pdev = bp->pdev;
+
+       if (ring->nr_pages > 1) {
+               ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
+                                                 ring->nr_pages * 8,
+                                                 &ring->pg_tbl_map,
+                                                 GFP_KERNEL);
+               if (!ring->pg_tbl)
+                       return -ENOMEM;
+       }
+
+       for (i = 0; i < ring->nr_pages; i++) {
+               ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+                                                    ring->page_size,
+                                                    &ring->dma_arr[i],
+                                                    GFP_KERNEL);
+               if (!ring->pg_arr[i])
+                       return -ENOMEM;
+
+               if (ring->nr_pages > 1)
+                       ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
+       }
+
+       if (ring->vmem_size) {
+               *ring->vmem = vzalloc(ring->vmem_size);
+               if (!(*ring->vmem))
+                       return -ENOMEM;
+       }
+       return 0;
+}
+
+static void bnxt_free_rx_rings(struct bnxt *bp)
+{
+       int i;
+
+       if (!bp->bnapi)
+               return;
+
+       for (i = 0; i < bp->rx_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_rx_ring_info *rxr;
+               struct bnxt_ring_struct *ring;
+
+               if (!bnapi)
+                       continue;
+
+               rxr = &bnapi->rx_ring;
+
+               kfree(rxr->rx_tpa);
+               rxr->rx_tpa = NULL;
+
+               kfree(rxr->rx_agg_bmap);
+               rxr->rx_agg_bmap = NULL;
+
+               ring = &rxr->rx_ring_struct;
+               bnxt_free_ring(bp, ring);
+
+               ring = &rxr->rx_agg_ring_struct;
+               bnxt_free_ring(bp, ring);
+       }
+}
+
+static int bnxt_alloc_rx_rings(struct bnxt *bp)
+{
+       int i, rc, agg_rings = 0, tpa_rings = 0;
+
+       if (bp->flags & BNXT_FLAG_AGG_RINGS)
+               agg_rings = 1;
+
+       if (bp->flags & BNXT_FLAG_TPA)
+               tpa_rings = 1;
+
+       for (i = 0; i < bp->rx_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_rx_ring_info *rxr;
+               struct bnxt_ring_struct *ring;
+
+               if (!bnapi)
+                       continue;
+
+               rxr = &bnapi->rx_ring;
+               ring = &rxr->rx_ring_struct;
+
+               rc = bnxt_alloc_ring(bp, ring);
+               if (rc)
+                       return rc;
+
+               if (agg_rings) {
+                       u16 mem_size;
+
+                       ring = &rxr->rx_agg_ring_struct;
+                       rc = bnxt_alloc_ring(bp, ring);
+                       if (rc)
+                               return rc;
+
+                       rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
+                       mem_size = rxr->rx_agg_bmap_size / 8;
+                       rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+                       if (!rxr->rx_agg_bmap)
+                               return -ENOMEM;
+
+                       if (tpa_rings) {
+                               rxr->rx_tpa = kcalloc(MAX_TPA,
+                                               sizeof(struct bnxt_tpa_info),
+                                               GFP_KERNEL);
+                               if (!rxr->rx_tpa)
+                                       return -ENOMEM;
+                       }
+               }
+       }
+       return 0;
+}
+
+static void bnxt_free_tx_rings(struct bnxt *bp)
+{
+       int i;
+       struct pci_dev *pdev = bp->pdev;
+
+       if (!bp->bnapi)
+               return;
+
+       for (i = 0; i < bp->tx_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_tx_ring_info *txr;
+               struct bnxt_ring_struct *ring;
+
+               if (!bnapi)
+                       continue;
+
+               txr = &bnapi->tx_ring;
+
+               if (txr->tx_push) {
+                       dma_free_coherent(&pdev->dev, bp->tx_push_size,
+                                         txr->tx_push, txr->tx_push_mapping);
+                       txr->tx_push = NULL;
+               }
+
+               ring = &txr->tx_ring_struct;
+
+               bnxt_free_ring(bp, ring);
+       }
+}
+
+static int bnxt_alloc_tx_rings(struct bnxt *bp)
+{
+       int i, j, rc;
+       struct pci_dev *pdev = bp->pdev;
+
+       bp->tx_push_size = 0;
+       if (bp->tx_push_thresh) {
+               int push_size;
+
+               push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
+                                       bp->tx_push_thresh);
+
+               if (push_size > 128) {
+                       push_size = 0;
+                       bp->tx_push_thresh = 0;
+               }
+
+               bp->tx_push_size = push_size;
+       }
+
+       for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_tx_ring_info *txr;
+               struct bnxt_ring_struct *ring;
+
+               if (!bnapi)
+                       continue;
+
+               txr = &bnapi->tx_ring;
+               ring = &txr->tx_ring_struct;
+
+               rc = bnxt_alloc_ring(bp, ring);
+               if (rc)
+                       return rc;
+
+               if (bp->tx_push_size) {
+                       struct tx_bd *txbd;
+                       dma_addr_t mapping;
+
+                       /* One pre-allocated DMA buffer to backup
+                        * TX push operation
+                        */
+                       txr->tx_push = dma_alloc_coherent(&pdev->dev,
+                                               bp->tx_push_size,
+                                               &txr->tx_push_mapping,
+                                               GFP_KERNEL);
+
+                       if (!txr->tx_push)
+                               return -ENOMEM;
+
+                       txbd = &txr->tx_push->txbd1;
+
+                       mapping = txr->tx_push_mapping +
+                               sizeof(struct tx_push_bd);
+                       txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+                       memset(txbd + 1, 0, sizeof(struct tx_bd_ext));
+               }
+               ring->queue_id = bp->q_info[j].queue_id;
+               if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
+                       j++;
+       }
+       return 0;
+}
+
+static void bnxt_free_cp_rings(struct bnxt *bp)
+{
+       int i;
+
+       if (!bp->bnapi)
+               return;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr;
+               struct bnxt_ring_struct *ring;
+
+               if (!bnapi)
+                       continue;
+
+               cpr = &bnapi->cp_ring;
+               ring = &cpr->cp_ring_struct;
+
+               bnxt_free_ring(bp, ring);
+       }
+}
+
+static int bnxt_alloc_cp_rings(struct bnxt *bp)
+{
+       int i, rc;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr;
+               struct bnxt_ring_struct *ring;
+
+               if (!bnapi)
+                       continue;
+
+               cpr = &bnapi->cp_ring;
+               ring = &cpr->cp_ring_struct;
+
+               rc = bnxt_alloc_ring(bp, ring);
+               if (rc)
+                       return rc;
+       }
+       return 0;
+}
+
+static void bnxt_init_ring_struct(struct bnxt *bp)
+{
+       int i;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr;
+               struct bnxt_rx_ring_info *rxr;
+               struct bnxt_tx_ring_info *txr;
+               struct bnxt_ring_struct *ring;
+
+               if (!bnapi)
+                       continue;
+
+               cpr = &bnapi->cp_ring;
+               ring = &cpr->cp_ring_struct;
+               ring->nr_pages = bp->cp_nr_pages;
+               ring->page_size = HW_CMPD_RING_SIZE;
+               ring->pg_arr = (void **)cpr->cp_desc_ring;
+               ring->dma_arr = cpr->cp_desc_mapping;
+               ring->vmem_size = 0;
+
+               rxr = &bnapi->rx_ring;
+               ring = &rxr->rx_ring_struct;
+               ring->nr_pages = bp->rx_nr_pages;
+               ring->page_size = HW_RXBD_RING_SIZE;
+               ring->pg_arr = (void **)rxr->rx_desc_ring;
+               ring->dma_arr = rxr->rx_desc_mapping;
+               ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
+               ring->vmem = (void **)&rxr->rx_buf_ring;
+
+               ring = &rxr->rx_agg_ring_struct;
+               ring->nr_pages = bp->rx_agg_nr_pages;
+               ring->page_size = HW_RXBD_RING_SIZE;
+               ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
+               ring->dma_arr = rxr->rx_agg_desc_mapping;
+               ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
+               ring->vmem = (void **)&rxr->rx_agg_ring;
+
+               txr = &bnapi->tx_ring;
+               ring = &txr->tx_ring_struct;
+               ring->nr_pages = bp->tx_nr_pages;
+               ring->page_size = HW_RXBD_RING_SIZE;
+               ring->pg_arr = (void **)txr->tx_desc_ring;
+               ring->dma_arr = txr->tx_desc_mapping;
+               ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
+               ring->vmem = (void **)&txr->tx_buf_ring;
+       }
+}
+
+static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
+{
+       int i;
+       u32 prod;
+       struct rx_bd **rx_buf_ring;
+
+       rx_buf_ring = (struct rx_bd **)ring->pg_arr;
+       for (i = 0, prod = 0; i < ring->nr_pages; i++) {
+               int j;
+               struct rx_bd *rxbd;
+
+               rxbd = rx_buf_ring[i];
+               if (!rxbd)
+                       continue;
+
+               for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
+                       rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
+                       rxbd->rx_bd_opaque = prod;
+               }
+       }
+}
+
+static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+       struct net_device *dev = bp->dev;
+       struct bnxt_napi *bnapi = bp->bnapi[ring_nr];
+       struct bnxt_rx_ring_info *rxr;
+       struct bnxt_ring_struct *ring;
+       u32 prod, type;
+       int i;
+
+       if (!bnapi)
+               return -EINVAL;
+
+       type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
+               RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
+
+       if (NET_IP_ALIGN == 2)
+               type |= RX_BD_FLAGS_SOP;
+
+       rxr = &bnapi->rx_ring;
+       ring = &rxr->rx_ring_struct;
+       bnxt_init_rxbd_pages(ring, type);
+
+       prod = rxr->rx_prod;
+       for (i = 0; i < bp->rx_ring_size; i++) {
+               if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
+                       netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
+                                   ring_nr, i, bp->rx_ring_size);
+                       break;
+               }
+               prod = NEXT_RX(prod);
+       }
+       rxr->rx_prod = prod;
+       ring->fw_ring_id = INVALID_HW_RING_ID;
+
+       if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+               return 0;
+
+       ring = &rxr->rx_agg_ring_struct;
+
+       type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
+               RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+
+       bnxt_init_rxbd_pages(ring, type);
+
+       prod = rxr->rx_agg_prod;
+       for (i = 0; i < bp->rx_agg_ring_size; i++) {
+               if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
+                       netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
+                                   ring_nr, i, bp->rx_ring_size);
+                       break;
+               }
+               prod = NEXT_RX_AGG(prod);
+       }
+       rxr->rx_agg_prod = prod;
+       ring->fw_ring_id = INVALID_HW_RING_ID;
+
+       if (bp->flags & BNXT_FLAG_TPA) {
+               if (rxr->rx_tpa) {
+                       u8 *data;
+                       dma_addr_t mapping;
+
+                       for (i = 0; i < MAX_TPA; i++) {
+                               data = __bnxt_alloc_rx_data(bp, &mapping,
+                                                           GFP_KERNEL);
+                               if (!data)
+                                       return -ENOMEM;
+
+                               rxr->rx_tpa[i].data = data;
+                               rxr->rx_tpa[i].mapping = mapping;
+                       }
+               } else {
+                       netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
+static int bnxt_init_rx_rings(struct bnxt *bp)
+{
+       int i, rc = 0;
+
+       for (i = 0; i < bp->rx_nr_rings; i++) {
+               rc = bnxt_init_one_rx_ring(bp, i);
+               if (rc)
+                       break;
+       }
+
+       return rc;
+}
+
+static int bnxt_init_tx_rings(struct bnxt *bp)
+{
+       u16 i;
+
+       bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
+                                  MAX_SKB_FRAGS + 1);
+
+       for (i = 0; i < bp->tx_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+               struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+
+               ring->fw_ring_id = INVALID_HW_RING_ID;
+       }
+
+       return 0;
+}
+
+static void bnxt_free_ring_grps(struct bnxt *bp)
+{
+       kfree(bp->grp_info);
+       bp->grp_info = NULL;
+}
+
+static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
+{
+       int i;
+
+       if (irq_re_init) {
+               bp->grp_info = kcalloc(bp->cp_nr_rings,
+                                      sizeof(struct bnxt_ring_grp_info),
+                                      GFP_KERNEL);
+               if (!bp->grp_info)
+                       return -ENOMEM;
+       }
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               if (irq_re_init)
+                       bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
+               bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+               bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
+               bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
+               bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
+       }
+       return 0;
+}
+
+static void bnxt_free_vnics(struct bnxt *bp)
+{
+       kfree(bp->vnic_info);
+       bp->vnic_info = NULL;
+       bp->nr_vnics = 0;
+}
+
+static int bnxt_alloc_vnics(struct bnxt *bp)
+{
+       int num_vnics = 1;
+
+#ifdef CONFIG_RFS_ACCEL
+       if (bp->flags & BNXT_FLAG_RFS)
+               num_vnics += bp->rx_nr_rings;
+#endif
+
+       bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
+                               GFP_KERNEL);
+       if (!bp->vnic_info)
+               return -ENOMEM;
+
+       bp->nr_vnics = num_vnics;
+       return 0;
+}
+
+static void bnxt_init_vnics(struct bnxt *bp)
+{
+       int i;
+
+       for (i = 0; i < bp->nr_vnics; i++) {
+               struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+               vnic->fw_vnic_id = INVALID_HW_RING_ID;
+               vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+               vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
+
+               if (bp->vnic_info[i].rss_hash_key) {
+                       if (i == 0)
+                               prandom_bytes(vnic->rss_hash_key,
+                                             HW_HASH_KEY_SIZE);
+                       else
+                               memcpy(vnic->rss_hash_key,
+                                      bp->vnic_info[0].rss_hash_key,
+                                      HW_HASH_KEY_SIZE);
+               }
+       }
+}
+
+static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
+{
+       int pages;
+
+       pages = ring_size / desc_per_pg;
+
+       if (!pages)
+               return 1;
+
+       pages++;
+
+       while (pages & (pages - 1))
+               pages++;
+
+       return pages;
+}
+
+static void bnxt_set_tpa_flags(struct bnxt *bp)
+{
+       bp->flags &= ~BNXT_FLAG_TPA;
+       if (bp->dev->features & NETIF_F_LRO)
+               bp->flags |= BNXT_FLAG_LRO;
+       if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+               bp->flags |= BNXT_FLAG_GRO;
+}
+
+/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
+ * be set on entry.
+ */
+void bnxt_set_ring_params(struct bnxt *bp)
+{
+       u32 ring_size, rx_size, rx_space;
+       u32 agg_factor = 0, agg_ring_size = 0;
+
+       /* 8 for CRC and VLAN */
+       rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
+
+       rx_space = rx_size + NET_SKB_PAD +
+               SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+       bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
+       ring_size = bp->rx_ring_size;
+       bp->rx_agg_ring_size = 0;
+       bp->rx_agg_nr_pages = 0;
+
+       if (bp->flags & BNXT_FLAG_TPA)
+               agg_factor = 4;
+
+       bp->flags &= ~BNXT_FLAG_JUMBO;
+       if (rx_space > PAGE_SIZE) {
+               u32 jumbo_factor;
+
+               bp->flags |= BNXT_FLAG_JUMBO;
+               jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
+               if (jumbo_factor > agg_factor)
+                       agg_factor = jumbo_factor;
+       }
+       agg_ring_size = ring_size * agg_factor;
+
+       if (agg_ring_size) {
+               bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
+                                                       RX_DESC_CNT);
+               if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
+                       u32 tmp = agg_ring_size;
+
+                       bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
+                       agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
+                       netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
+                                   tmp, agg_ring_size);
+               }
+               bp->rx_agg_ring_size = agg_ring_size;
+               bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
+               rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+               rx_space = rx_size + NET_SKB_PAD +
+                       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+       }
+
+       bp->rx_buf_use_size = rx_size;
+       bp->rx_buf_size = rx_space;
+
+       bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
+       bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
+
+       ring_size = bp->tx_ring_size;
+       bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
+       bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
+
+       ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
+       bp->cp_ring_size = ring_size;
+
+       bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
+       if (bp->cp_nr_pages > MAX_CP_PAGES) {
+               bp->cp_nr_pages = MAX_CP_PAGES;
+               bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
+               netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
+                           ring_size, bp->cp_ring_size);
+       }
+       bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
+       bp->cp_ring_mask = bp->cp_bit - 1;
+}
+
+static void bnxt_free_vnic_attributes(struct bnxt *bp)
+{
+       int i;
+       struct bnxt_vnic_info *vnic;
+       struct pci_dev *pdev = bp->pdev;
+
+       if (!bp->vnic_info)
+               return;
+
+       for (i = 0; i < bp->nr_vnics; i++) {
+               vnic = &bp->vnic_info[i];
+
+               kfree(vnic->fw_grp_ids);
+               vnic->fw_grp_ids = NULL;
+
+               kfree(vnic->uc_list);
+               vnic->uc_list = NULL;
+
+               if (vnic->mc_list) {
+                       dma_free_coherent(&pdev->dev, vnic->mc_list_size,
+                                         vnic->mc_list, vnic->mc_list_mapping);
+                       vnic->mc_list = NULL;
+               }
+
+               if (vnic->rss_table) {
+                       dma_free_coherent(&pdev->dev, PAGE_SIZE,
+                                         vnic->rss_table,
+                                         vnic->rss_table_dma_addr);
+                       vnic->rss_table = NULL;
+               }
+
+               vnic->rss_hash_key = NULL;
+               vnic->flags = 0;
+       }
+}
+
+static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
+{
+       int i, rc = 0, size;
+       struct bnxt_vnic_info *vnic;
+       struct pci_dev *pdev = bp->pdev;
+       int max_rings;
+
+       for (i = 0; i < bp->nr_vnics; i++) {
+               vnic = &bp->vnic_info[i];
+
+               if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
+                       int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
+
+                       if (mem_size > 0) {
+                               vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
+                               if (!vnic->uc_list) {
+                                       rc = -ENOMEM;
+                                       goto out;
+                               }
+                       }
+               }
+
+               if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
+                       vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
+                       vnic->mc_list =
+                               dma_alloc_coherent(&pdev->dev,
+                                                  vnic->mc_list_size,
+                                                  &vnic->mc_list_mapping,
+                                                  GFP_KERNEL);
+                       if (!vnic->mc_list) {
+                               rc = -ENOMEM;
+                               goto out;
+                       }
+               }
+
+               if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+                       max_rings = bp->rx_nr_rings;
+               else
+                       max_rings = 1;
+
+               vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
+               if (!vnic->fw_grp_ids) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+
+               /* Allocate rss table and hash key */
+               vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+                                                    &vnic->rss_table_dma_addr,
+                                                    GFP_KERNEL);
+               if (!vnic->rss_table) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+
+               size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
+
+               vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
+               vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
+       }
+       return 0;
+
+out:
+       return rc;
+}
+
+static void bnxt_free_hwrm_resources(struct bnxt *bp)
+{
+       struct pci_dev *pdev = bp->pdev;
+
+       dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
+                         bp->hwrm_cmd_resp_dma_addr);
+
+       bp->hwrm_cmd_resp_addr = NULL;
+       if (bp->hwrm_dbg_resp_addr) {
+               dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
+                                 bp->hwrm_dbg_resp_addr,
+                                 bp->hwrm_dbg_resp_dma_addr);
+
+               bp->hwrm_dbg_resp_addr = NULL;
+       }
+}
+
+static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
+{
+       struct pci_dev *pdev = bp->pdev;
+
+       bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+                                                  &bp->hwrm_cmd_resp_dma_addr,
+                                                  GFP_KERNEL);
+       if (!bp->hwrm_cmd_resp_addr)
+               return -ENOMEM;
+       bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
+                                                   HWRM_DBG_REG_BUF_SIZE,
+                                                   &bp->hwrm_dbg_resp_dma_addr,
+                                                   GFP_KERNEL);
+       if (!bp->hwrm_dbg_resp_addr)
+               netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
+
+       return 0;
+}
+
+static void bnxt_free_stats(struct bnxt *bp)
+{
+       u32 size, i;
+       struct pci_dev *pdev = bp->pdev;
+
+       if (!bp->bnapi)
+               return;
+
+       size = sizeof(struct ctx_hw_stats);
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+               if (cpr->hw_stats) {
+                       dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
+                                         cpr->hw_stats_map);
+                       cpr->hw_stats = NULL;
+               }
+       }
+}
+
+static int bnxt_alloc_stats(struct bnxt *bp)
+{
+       u32 size, i;
+       struct pci_dev *pdev = bp->pdev;
+
+       size = sizeof(struct ctx_hw_stats);
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+               cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
+                                                  &cpr->hw_stats_map,
+                                                  GFP_KERNEL);
+               if (!cpr->hw_stats)
+                       return -ENOMEM;
+
+               cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+       }
+       return 0;
+}
+
+static void bnxt_clear_ring_indices(struct bnxt *bp)
+{
+       int i;
+
+       if (!bp->bnapi)
+               return;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr;
+               struct bnxt_rx_ring_info *rxr;
+               struct bnxt_tx_ring_info *txr;
+
+               if (!bnapi)
+                       continue;
+
+               cpr = &bnapi->cp_ring;
+               cpr->cp_raw_cons = 0;
+
+               txr = &bnapi->tx_ring;
+               txr->tx_prod = 0;
+               txr->tx_cons = 0;
+
+               rxr = &bnapi->rx_ring;
+               rxr->rx_prod = 0;
+               rxr->rx_agg_prod = 0;
+               rxr->rx_sw_agg_prod = 0;
+       }
+}
+
+static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
+{
+#ifdef CONFIG_RFS_ACCEL
+       int i;
+
+       /* Under rtnl_lock and all our NAPIs have been disabled.  It's
+        * safe to delete the hash table.
+        */
+       for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+               struct hlist_head *head;
+               struct hlist_node *tmp;
+               struct bnxt_ntuple_filter *fltr;
+
+               head = &bp->ntp_fltr_hash_tbl[i];
+               hlist_for_each_entry_safe(fltr, tmp, head, hash) {
+                       hlist_del(&fltr->hash);
+                       kfree(fltr);
+               }
+       }
+       if (irq_reinit) {
+               kfree(bp->ntp_fltr_bmap);
+               bp->ntp_fltr_bmap = NULL;
+       }
+       bp->ntp_fltr_count = 0;
+#endif
+}
+
+static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
+{
+#ifdef CONFIG_RFS_ACCEL
+       int i, rc = 0;
+
+       if (!(bp->flags & BNXT_FLAG_RFS))
+               return 0;
+
+       for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
+               INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
+
+       bp->ntp_fltr_count = 0;
+       bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
+                                   GFP_KERNEL);
+
+       if (!bp->ntp_fltr_bmap)
+               rc = -ENOMEM;
+
+       return rc;
+#else
+       return 0;
+#endif
+}
+
+static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
+{
+       bnxt_free_vnic_attributes(bp);
+       bnxt_free_tx_rings(bp);
+       bnxt_free_rx_rings(bp);
+       bnxt_free_cp_rings(bp);
+       bnxt_free_ntp_fltrs(bp, irq_re_init);
+       if (irq_re_init) {
+               bnxt_free_stats(bp);
+               bnxt_free_ring_grps(bp);
+               bnxt_free_vnics(bp);
+               kfree(bp->bnapi);
+               bp->bnapi = NULL;
+       } else {
+               bnxt_clear_ring_indices(bp);
+       }
+}
+
+static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
+{
+       int i, rc, size, arr_size;
+       void *bnapi;
+
+       if (irq_re_init) {
+               /* Allocate bnapi mem pointer array and mem block for
+                * all queues
+                */
+               arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
+                               bp->cp_nr_rings);
+               size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
+               bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
+               if (!bnapi)
+                       return -ENOMEM;
+
+               bp->bnapi = bnapi;
+               bnapi += arr_size;
+               for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
+                       bp->bnapi[i] = bnapi;
+                       bp->bnapi[i]->index = i;
+                       bp->bnapi[i]->bp = bp;
+               }
+
+               rc = bnxt_alloc_stats(bp);
+               if (rc)
+                       goto alloc_mem_err;
+
+               rc = bnxt_alloc_ntp_fltrs(bp);
+               if (rc)
+                       goto alloc_mem_err;
+
+               rc = bnxt_alloc_vnics(bp);
+               if (rc)
+                       goto alloc_mem_err;
+       }
+
+       bnxt_init_ring_struct(bp);
+
+       rc = bnxt_alloc_rx_rings(bp);
+       if (rc)
+               goto alloc_mem_err;
+
+       rc = bnxt_alloc_tx_rings(bp);
+       if (rc)
+               goto alloc_mem_err;
+
+       rc = bnxt_alloc_cp_rings(bp);
+       if (rc)
+               goto alloc_mem_err;
+
+       bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
+                                 BNXT_VNIC_UCAST_FLAG;
+       rc = bnxt_alloc_vnic_attributes(bp);
+       if (rc)
+               goto alloc_mem_err;
+       return 0;
+
+alloc_mem_err:
+       bnxt_free_mem(bp, true);
+       return rc;
+}
+
+void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
+                           u16 cmpl_ring, u16 target_id)
+{
+       struct hwrm_cmd_req_hdr *req = request;
+
+       req->cmpl_ring_req_type =
+               cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT));
+       req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT);
+       req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
+}
+
+int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+{
+       int i, intr_process, rc;
+       struct hwrm_cmd_req_hdr *req = msg;
+       u32 *data = msg;
+       __le32 *resp_len, *valid;
+       u16 cp_ring_id, len = 0;
+       struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
+
+       req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++);
+       memset(resp, 0, PAGE_SIZE);
+       cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) &
+                     HWRM_CMPL_RING_MASK) >>
+                    HWRM_CMPL_RING_SFT;
+       intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
+
+       /* Write request msg to hwrm channel */
+       __iowrite32_copy(bp->bar0, data, msg_len / 4);
+
+       /* currently supports only one outstanding message */
+       if (intr_process)
+               bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) &
+                                      HWRM_SEQ_ID_MASK;
+
+       /* Ring channel doorbell */
+       writel(1, bp->bar0 + 0x100);
+
+       i = 0;
+       if (intr_process) {
+               /* Wait until hwrm response cmpl interrupt is processed */
+               while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
+                      i++ < timeout) {
+                       usleep_range(600, 800);
+               }
+
+               if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
+                       netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
+                                  req->cmpl_ring_req_type);
+                       return -1;
+               }
+       } else {
+               /* Check if response len is updated */
+               resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
+               for (i = 0; i < timeout; i++) {
+                       len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
+                             HWRM_RESP_LEN_SFT;
+                       if (len)
+                               break;
+                       usleep_range(600, 800);
+               }
+
+               if (i >= timeout) {
+                       netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
+                                  timeout, req->cmpl_ring_req_type,
+                                  req->target_id_seq_id, *resp_len);
+                       return -1;
+               }
+
+               /* Last word of resp contains valid bit */
+               valid = bp->hwrm_cmd_resp_addr + len - 4;
+               for (i = 0; i < timeout; i++) {
+                       if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
+                               break;
+                       usleep_range(600, 800);
+               }
+
+               if (i >= timeout) {
+                       netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
+                                  timeout, req->cmpl_ring_req_type,
+                                  req->target_id_seq_id, len, *valid);
+                       return -1;
+               }
+       }
+
+       rc = le16_to_cpu(resp->error_code);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
+                          le16_to_cpu(resp->req_type),
+                          le16_to_cpu(resp->seq_id), rc);
+               return rc;
+       }
+       return 0;
+}
+
+int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+{
+       int rc;
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, msg, msg_len, timeout);
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
+{
+       struct hwrm_func_drv_rgtr_input req = {0};
+       int i;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
+
+       req.enables =
+               cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
+                           FUNC_DRV_RGTR_REQ_ENABLES_VER |
+                           FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+
+       /* TODO: current async event fwd bits are not defined and the firmware
+        * only checks if it is non-zero to enable async event forwarding
+        */
+       req.async_event_fwd[0] |= cpu_to_le32(1);
+       req.os_type = cpu_to_le16(1);
+       req.ver_maj = DRV_VER_MAJ;
+       req.ver_min = DRV_VER_MIN;
+       req.ver_upd = DRV_VER_UPD;
+
+       if (BNXT_PF(bp)) {
+               unsigned long vf_req_snif_bmap[4];
+               u32 *data = (u32 *)vf_req_snif_bmap;
+
+               memset(vf_req_snif_bmap, 0, 32);
+               for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
+                       __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
+
+               for (i = 0; i < 8; i++) {
+                       req.vf_req_fwd[i] = cpu_to_le32(*data);
+                       data++;
+               }
+               req.enables |=
+                       cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
+       }
+
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
+{
+       u32 rc = 0;
+       struct hwrm_tunnel_dst_port_free_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
+       req.tunnel_type = tunnel_type;
+
+       switch (tunnel_type) {
+       case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
+               req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
+               break;
+       case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
+               req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
+               break;
+       default:
+               break;
+       }
+
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
+                          rc);
+       return rc;
+}
+
+static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
+                                          u8 tunnel_type)
+{
+       u32 rc = 0;
+       struct hwrm_tunnel_dst_port_alloc_input req = {0};
+       struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
+
+       req.tunnel_type = tunnel_type;
+       req.tunnel_dst_port_val = port;
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
+                          rc);
+               goto err_out;
+       }
+
+       if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN)
+               bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
+
+       else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE)
+               bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
+err_out:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
+{
+       struct hwrm_cfa_l2_set_rx_mask_input req = {0};
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
+       req.dflt_vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+       req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+       req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+       req.mask = cpu_to_le32(vnic->rx_mask);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
+                                           struct bnxt_ntuple_filter *fltr)
+{
+       struct hwrm_cfa_ntuple_filter_free_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
+       req.ntuple_filter_id = fltr->filter_id;
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+#define BNXT_NTP_FLTR_FLAGS                                    \
+       (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID)
+
+static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
+                                            struct bnxt_ntuple_filter *fltr)
+{
+       int rc = 0;
+       struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
+       struct hwrm_cfa_ntuple_filter_alloc_output *resp =
+               bp->hwrm_cmd_resp_addr;
+       struct flow_keys *keys = &fltr->fkeys;
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
+       req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0];
+
+       req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
+
+       req.ethertype = htons(ETH_P_IP);
+       memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
+       req.ipaddr_type = 4;
+       req.ip_protocol = keys->basic.ip_proto;
+
+       req.src_ipaddr[0] = keys->addrs.v4addrs.src;
+       req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+       req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+       req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+
+       req.src_port = keys->ports.src;
+       req.src_port_mask = cpu_to_be16(0xffff);
+       req.dst_port = keys->ports.dst;
+       req.dst_port_mask = cpu_to_be16(0xffff);
+
+       req.dst_vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc)
+               fltr->filter_id = resp->ntuple_filter_id;
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+#endif
+
+static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
+                                    u8 *mac_addr)
+{
+       u32 rc = 0;
+       struct hwrm_cfa_l2_filter_alloc_input req = {0};
+       struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
+       req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
+                               CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+       req.dst_vnic_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
+       req.enables =
+               cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
+                           CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID |
+                           CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
+       memcpy(req.l2_addr, mac_addr, ETH_ALEN);
+       req.l2_addr_mask[0] = 0xff;
+       req.l2_addr_mask[1] = 0xff;
+       req.l2_addr_mask[2] = 0xff;
+       req.l2_addr_mask[3] = 0xff;
+       req.l2_addr_mask[4] = 0xff;
+       req.l2_addr_mask[5] = 0xff;
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc)
+               bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
+                                                       resp->l2_filter_id;
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
+{
+       u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
+       int rc = 0;
+
+       /* Any associated ntuple filters will also be cleared by firmware. */
+       mutex_lock(&bp->hwrm_cmd_lock);
+       for (i = 0; i < num_of_vnics; i++) {
+               struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+               for (j = 0; j < vnic->uc_filter_count; j++) {
+                       struct hwrm_cfa_l2_filter_free_input req = {0};
+
+                       bnxt_hwrm_cmd_hdr_init(bp, &req,
+                                              HWRM_CFA_L2_FILTER_FREE, -1, -1);
+
+                       req.l2_filter_id = vnic->fw_l2_filter_id[j];
+
+                       rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                               HWRM_CMD_TIMEOUT);
+               }
+               vnic->uc_filter_count = 0;
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+
+       return rc;
+}
+
+static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
+{
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+       struct hwrm_vnic_tpa_cfg_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
+
+       if (tpa_flags) {
+               u16 mss = bp->dev->mtu - 40;
+               u32 nsegs, n, segs = 0, flags;
+
+               flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
+                       VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
+                       VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
+                       VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
+                       VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
+               if (tpa_flags & BNXT_FLAG_GRO)
+                       flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
+
+               req.flags = cpu_to_le32(flags);
+
+               req.enables =
+                       cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
+                                   VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS);
+
+               /* Number of segs are log2 units, and first packet is not
+                * included as part of this units.
+                */
+               if (mss <= PAGE_SIZE) {
+                       n = PAGE_SIZE / mss;
+                       nsegs = (MAX_SKB_FRAGS - 1) * n;
+               } else {
+                       n = mss / PAGE_SIZE;
+                       if (mss & (PAGE_SIZE - 1))
+                               n++;
+                       nsegs = (MAX_SKB_FRAGS - n) / n;
+               }
+
+               segs = ilog2(nsegs);
+               req.max_agg_segs = cpu_to_le16(segs);
+               req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
+       }
+       req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
+{
+       u32 i, j, max_rings;
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+       struct hwrm_vnic_rss_cfg_input req = {0};
+
+       if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID)
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
+       if (set_rss) {
+               vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
+                                BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
+                                BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
+                                BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
+
+               req.hash_type = cpu_to_le32(vnic->hash_type);
+
+               if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+                       max_rings = bp->rx_nr_rings;
+               else
+                       max_rings = 1;
+
+               /* Fill the RSS indirection table with ring group ids */
+               for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
+                       if (j == max_rings)
+                               j = 0;
+                       vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
+               }
+
+               req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+               req.hash_key_tbl_addr =
+                       cpu_to_le64(vnic->rss_hash_key_dma_addr);
+       }
+       req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
+{
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+       struct hwrm_vnic_plcmodes_cfg_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
+       req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
+                               VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+                               VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+       req.enables =
+               cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
+                           VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+       /* thresholds not implemented in firmware yet */
+       req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
+       req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+       req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id)
+{
+       struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
+       req.rss_cos_lb_ctx_id =
+               cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx);
+
+       hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+}
+
+static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
+{
+       int i;
+
+       for (i = 0; i < bp->nr_vnics; i++) {
+               struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+               if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID)
+                       bnxt_hwrm_vnic_ctx_free_one(bp, i);
+       }
+       bp->rsscos_nr_ctxs = 0;
+}
+
+static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
+{
+       int rc;
+       struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
+       struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
+                                               bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
+                              -1);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc)
+               bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx =
+                       le16_to_cpu(resp->rss_cos_lb_ctx_id);
+       mutex_unlock(&bp->hwrm_cmd_lock);
+
+       return rc;
+}
+
+static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
+{
+       int grp_idx = 0;
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+       struct hwrm_vnic_cfg_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
+       /* Only RSS support for now TBD: COS & LB */
+       req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
+                                 VNIC_CFG_REQ_ENABLES_RSS_RULE);
+       req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
+       req.cos_rule = cpu_to_le16(0xffff);
+       if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+               grp_idx = 0;
+       else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
+               grp_idx = vnic_id - 1;
+
+       req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+       req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
+
+       req.lb_rule = cpu_to_le16(0xffff);
+       req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
+                             VLAN_HLEN);
+
+       if (bp->flags & BNXT_FLAG_STRIP_VLAN)
+               req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
+
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
+{
+       u32 rc = 0;
+
+       if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
+               struct hwrm_vnic_free_input req = {0};
+
+               bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
+               req.vnic_id =
+                       cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
+
+               rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+               if (rc)
+                       return rc;
+               bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
+       }
+       return rc;
+}
+
+static void bnxt_hwrm_vnic_free(struct bnxt *bp)
+{
+       u16 i;
+
+       for (i = 0; i < bp->nr_vnics; i++)
+               bnxt_hwrm_vnic_free_one(bp, i);
+}
+
+static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, u16 start_grp_id,
+                               u16 end_grp_id)
+{
+       u32 rc = 0, i, j;
+       struct hwrm_vnic_alloc_input req = {0};
+       struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+       /* map ring groups to this vnic */
+       for (i = start_grp_id, j = 0; i < end_grp_id; i++, j++) {
+               if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) {
+                       netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
+                                  j, (end_grp_id - start_grp_id));
+                       break;
+               }
+               bp->vnic_info[vnic_id].fw_grp_ids[j] =
+                                       bp->grp_info[i].fw_grp_id;
+       }
+
+       bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+       if (vnic_id == 0)
+               req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc)
+               bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
+{
+       u16 i;
+       u32 rc = 0;
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       for (i = 0; i < bp->rx_nr_rings; i++) {
+               struct hwrm_ring_grp_alloc_input req = {0};
+               struct hwrm_ring_grp_alloc_output *resp =
+                                       bp->hwrm_cmd_resp_addr;
+
+               bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
+
+               req.cr = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+               req.rr = cpu_to_le16(bp->grp_info[i].rx_fw_ring_id);
+               req.ar = cpu_to_le16(bp->grp_info[i].agg_fw_ring_id);
+               req.sc = cpu_to_le16(bp->grp_info[i].fw_stats_ctx);
+
+               rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                       HWRM_CMD_TIMEOUT);
+               if (rc)
+                       break;
+
+               bp->grp_info[i].fw_grp_id = le32_to_cpu(resp->ring_group_id);
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
+{
+       u16 i;
+       u32 rc = 0;
+       struct hwrm_ring_grp_free_input req = {0};
+
+       if (!bp->grp_info)
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
+                       continue;
+               req.ring_group_id =
+                       cpu_to_le32(bp->grp_info[i].fw_grp_id);
+
+               rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                       HWRM_CMD_TIMEOUT);
+               if (rc)
+                       break;
+               bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
+                                   struct bnxt_ring_struct *ring,
+                                   u32 ring_type, u32 map_index,
+                                   u32 stats_ctx_id)
+{
+       int rc = 0, err = 0;
+       struct hwrm_ring_alloc_input req = {0};
+       struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+       u16 ring_id;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
+
+       req.enables = 0;
+       if (ring->nr_pages > 1) {
+               req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
+               /* Page size is in log2 units */
+               req.page_size = BNXT_PAGE_SHIFT;
+               req.page_tbl_depth = 1;
+       } else {
+               req.page_tbl_addr =  cpu_to_le64(ring->dma_arr[0]);
+       }
+       req.fbo = 0;
+       /* Association of ring index with doorbell index and MSIX number */
+       req.logical_id = cpu_to_le16(map_index);
+
+       switch (ring_type) {
+       case HWRM_RING_ALLOC_TX:
+               req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
+               /* Association of transmit ring with completion ring */
+               req.cmpl_ring_id =
+                       cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
+               req.length = cpu_to_le32(bp->tx_ring_mask + 1);
+               req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
+               req.queue_id = cpu_to_le16(ring->queue_id);
+               break;
+       case HWRM_RING_ALLOC_RX:
+               req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+               req.length = cpu_to_le32(bp->rx_ring_mask + 1);
+               break;
+       case HWRM_RING_ALLOC_AGG:
+               req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+               req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
+               break;
+       case HWRM_RING_ALLOC_CMPL:
+               req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
+               req.length = cpu_to_le32(bp->cp_ring_mask + 1);
+               if (bp->flags & BNXT_FLAG_USING_MSIX)
+                       req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+               break;
+       default:
+               netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
+                          ring_type);
+               return -1;
+       }
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       err = le16_to_cpu(resp->error_code);
+       ring_id = le16_to_cpu(resp->ring_id);
+       mutex_unlock(&bp->hwrm_cmd_lock);
+
+       if (rc || err) {
+               switch (ring_type) {
+               case RING_FREE_REQ_RING_TYPE_CMPL:
+                       netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
+                                  rc, err);
+                       return -1;
+
+               case RING_FREE_REQ_RING_TYPE_RX:
+                       netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
+                                  rc, err);
+                       return -1;
+
+               case RING_FREE_REQ_RING_TYPE_TX:
+                       netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
+                                  rc, err);
+                       return -1;
+
+               default:
+                       netdev_err(bp->dev, "Invalid ring\n");
+                       return -1;
+               }
+       }
+       ring->fw_ring_id = ring_id;
+       return rc;
+}
+
+static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
+{
+       int i, rc = 0;
+
+       if (bp->cp_nr_rings) {
+               for (i = 0; i < bp->cp_nr_rings; i++) {
+                       struct bnxt_napi *bnapi = bp->bnapi[i];
+                       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+                       struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+                       rc = hwrm_ring_alloc_send_msg(bp, ring,
+                                                     HWRM_RING_ALLOC_CMPL, i,
+                                                     INVALID_STATS_CTX_ID);
+                       if (rc)
+                               goto err_out;
+                       cpr->cp_doorbell = bp->bar1 + i * 0x80;
+                       BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+                       bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
+               }
+       }
+
+       if (bp->tx_nr_rings) {
+               for (i = 0; i < bp->tx_nr_rings; i++) {
+                       struct bnxt_napi *bnapi = bp->bnapi[i];
+                       struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+                       struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+                       u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx;
+
+                       rc = hwrm_ring_alloc_send_msg(bp, ring,
+                                                     HWRM_RING_ALLOC_TX, i,
+                                                     fw_stats_ctx);
+                       if (rc)
+                               goto err_out;
+                       txr->tx_doorbell = bp->bar1 + i * 0x80;
+               }
+       }
+
+       if (bp->rx_nr_rings) {
+               for (i = 0; i < bp->rx_nr_rings; i++) {
+                       struct bnxt_napi *bnapi = bp->bnapi[i];
+                       struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+                       struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+
+                       rc = hwrm_ring_alloc_send_msg(bp, ring,
+                                                     HWRM_RING_ALLOC_RX, i,
+                                                     INVALID_STATS_CTX_ID);
+                       if (rc)
+                               goto err_out;
+                       rxr->rx_doorbell = bp->bar1 + i * 0x80;
+                       writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+                       bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
+               }
+       }
+
+       if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+               for (i = 0; i < bp->rx_nr_rings; i++) {
+                       struct bnxt_napi *bnapi = bp->bnapi[i];
+                       struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+                       struct bnxt_ring_struct *ring =
+                                               &rxr->rx_agg_ring_struct;
+
+                       rc = hwrm_ring_alloc_send_msg(bp, ring,
+                                                     HWRM_RING_ALLOC_AGG,
+                                                     bp->rx_nr_rings + i,
+                                                     INVALID_STATS_CTX_ID);
+                       if (rc)
+                               goto err_out;
+
+                       rxr->rx_agg_doorbell =
+                               bp->bar1 + (bp->rx_nr_rings + i) * 0x80;
+                       writel(DB_KEY_RX | rxr->rx_agg_prod,
+                              rxr->rx_agg_doorbell);
+                       bp->grp_info[i].agg_fw_ring_id = ring->fw_ring_id;
+               }
+       }
+err_out:
+       return rc;
+}
+
+static int hwrm_ring_free_send_msg(struct bnxt *bp,
+                                  struct bnxt_ring_struct *ring,
+                                  u32 ring_type, int cmpl_ring_id)
+{
+       int rc;
+       struct hwrm_ring_free_input req = {0};
+       struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
+       u16 error_code;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1);
+       req.ring_type = ring_type;
+       req.ring_id = cpu_to_le16(ring->fw_ring_id);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       error_code = le16_to_cpu(resp->error_code);
+       mutex_unlock(&bp->hwrm_cmd_lock);
+
+       if (rc || error_code) {
+               switch (ring_type) {
+               case RING_FREE_REQ_RING_TYPE_CMPL:
+                       netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
+                                  rc);
+                       return rc;
+               case RING_FREE_REQ_RING_TYPE_RX:
+                       netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
+                                  rc);
+                       return rc;
+               case RING_FREE_REQ_RING_TYPE_TX:
+                       netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
+                                  rc);
+                       return rc;
+               default:
+                       netdev_err(bp->dev, "Invalid ring\n");
+                       return -1;
+               }
+       }
+       return 0;
+}
+
+static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
+{
+       int i, rc = 0;
+
+       if (!bp->bnapi)
+               return 0;
+
+       if (bp->tx_nr_rings) {
+               for (i = 0; i < bp->tx_nr_rings; i++) {
+                       struct bnxt_napi *bnapi = bp->bnapi[i];
+                       struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+                       struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+                       u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+                       if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+                               hwrm_ring_free_send_msg(
+                                       bp, ring,
+                                       RING_FREE_REQ_RING_TYPE_TX,
+                                       close_path ? cmpl_ring_id :
+                                       INVALID_HW_RING_ID);
+                               ring->fw_ring_id = INVALID_HW_RING_ID;
+                       }
+               }
+       }
+
+       if (bp->rx_nr_rings) {
+               for (i = 0; i < bp->rx_nr_rings; i++) {
+                       struct bnxt_napi *bnapi = bp->bnapi[i];
+                       struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+                       struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+                       u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+                       if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+                               hwrm_ring_free_send_msg(
+                                       bp, ring,
+                                       RING_FREE_REQ_RING_TYPE_RX,
+                                       close_path ? cmpl_ring_id :
+                                       INVALID_HW_RING_ID);
+                               ring->fw_ring_id = INVALID_HW_RING_ID;
+                               bp->grp_info[i].rx_fw_ring_id =
+                                       INVALID_HW_RING_ID;
+                       }
+               }
+       }
+
+       if (bp->rx_agg_nr_pages) {
+               for (i = 0; i < bp->rx_nr_rings; i++) {
+                       struct bnxt_napi *bnapi = bp->bnapi[i];
+                       struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+                       struct bnxt_ring_struct *ring =
+                                               &rxr->rx_agg_ring_struct;
+                       u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+                       if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+                               hwrm_ring_free_send_msg(
+                                       bp, ring,
+                                       RING_FREE_REQ_RING_TYPE_RX,
+                                       close_path ? cmpl_ring_id :
+                                       INVALID_HW_RING_ID);
+                               ring->fw_ring_id = INVALID_HW_RING_ID;
+                               bp->grp_info[i].agg_fw_ring_id =
+                                       INVALID_HW_RING_ID;
+                       }
+               }
+       }
+
+       if (bp->cp_nr_rings) {
+               for (i = 0; i < bp->cp_nr_rings; i++) {
+                       struct bnxt_napi *bnapi = bp->bnapi[i];
+                       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+                       struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+                       if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+                               hwrm_ring_free_send_msg(
+                                       bp, ring,
+                                       RING_FREE_REQ_RING_TYPE_CMPL,
+                                       INVALID_HW_RING_ID);
+                               ring->fw_ring_id = INVALID_HW_RING_ID;
+                               bp->grp_info[i].cp_fw_ring_id =
+                                                       INVALID_HW_RING_ID;
+                       }
+               }
+       }
+
+       return rc;
+}
+
+int bnxt_hwrm_set_coal(struct bnxt *bp)
+{
+       int i, rc = 0;
+       struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
+       u16 max_buf, max_buf_irq;
+       u16 buf_tmr, buf_tmr_irq;
+       u32 flags;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
+                              -1, -1);
+
+       /* Each rx completion (2 records) should be DMAed immediately */
+       max_buf = min_t(u16, bp->coal_bufs / 4, 2);
+       /* max_buf must not be zero */
+       max_buf = clamp_t(u16, max_buf, 1, 63);
+       max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63);
+       buf_tmr = max_t(u16, bp->coal_ticks / 4, 1);
+       buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1);
+
+       flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+
+       /* RING_IDLE generates more IRQs for lower latency.  Enable it only
+        * if coal_ticks is less than 25 us.
+        */
+       if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25)
+               flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
+
+       req.flags = cpu_to_le16(flags);
+       req.num_cmpl_dma_aggr = cpu_to_le16(max_buf);
+       req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq);
+       req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr);
+       req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq);
+       req.int_lat_tmr_min = cpu_to_le16(buf_tmr);
+       req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks);
+       req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+
+               rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                       HWRM_CMD_TIMEOUT);
+               if (rc)
+                       break;
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
+{
+       int rc = 0, i;
+       struct hwrm_stat_ctx_free_input req = {0};
+
+       if (!bp->bnapi)
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+               if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
+                       req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
+
+                       rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                               HWRM_CMD_TIMEOUT);
+                       if (rc)
+                               break;
+
+                       cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+               }
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
+{
+       int rc = 0, i;
+       struct hwrm_stat_ctx_alloc_input req = {0};
+       struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
+
+       req.update_period_ms = cpu_to_le32(1000);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+               req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
+
+               rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                       HWRM_CMD_TIMEOUT);
+               if (rc)
+                       break;
+
+               cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
+
+               bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return 0;
+}
+
+static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+       int rc = 0;
+       struct hwrm_func_qcaps_input req = {0};
+       struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
+       req.fid = cpu_to_le16(0xffff);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               goto hwrm_func_qcaps_exit;
+
+       if (BNXT_PF(bp)) {
+               struct bnxt_pf_info *pf = &bp->pf;
+
+               pf->fw_fid = le16_to_cpu(resp->fid);
+               pf->port_id = le16_to_cpu(resp->port_id);
+               memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+               pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+               pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+               pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+               pf->max_pf_tx_rings = pf->max_tx_rings;
+               pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+               pf->max_pf_rx_rings = pf->max_rx_rings;
+               pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+               pf->max_vnics = le16_to_cpu(resp->max_vnics);
+               pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+               pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
+               pf->max_vfs = le16_to_cpu(resp->max_vfs);
+               pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
+               pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
+               pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
+               pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
+               pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
+               pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+       } else {
+#ifdef CONFIG_BNXT_SRIOV
+               struct bnxt_vf_info *vf = &bp->vf;
+
+               vf->fw_fid = le16_to_cpu(resp->fid);
+               memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+               if (!is_valid_ether_addr(vf->mac_addr))
+                       random_ether_addr(vf->mac_addr);
+
+               vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+               vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+               vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+               vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+               vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+               vf->max_vnics = le16_to_cpu(resp->max_vnics);
+               vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+#endif
+       }
+
+       bp->tx_push_thresh = 0;
+       if (resp->flags &
+           cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
+               bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
+
+hwrm_func_qcaps_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_func_reset(struct bnxt *bp)
+{
+       struct hwrm_func_reset_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
+       req.enables = 0;
+
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
+}
+
+static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
+{
+       int rc = 0;
+       struct hwrm_queue_qportcfg_input req = {0};
+       struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       u8 i, *qptr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               goto qportcfg_exit;
+
+       if (!resp->max_configurable_queues) {
+               rc = -EINVAL;
+               goto qportcfg_exit;
+       }
+       bp->max_tc = resp->max_configurable_queues;
+       if (bp->max_tc > BNXT_MAX_QUEUE)
+               bp->max_tc = BNXT_MAX_QUEUE;
+
+       qptr = &resp->queue_id0;
+       for (i = 0; i < bp->max_tc; i++) {
+               bp->q_info[i].queue_id = *qptr++;
+               bp->q_info[i].queue_profile = *qptr++;
+       }
+
+qportcfg_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_ver_get(struct bnxt *bp)
+{
+       int rc;
+       struct hwrm_ver_get_input req = {0};
+       struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
+       req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
+       req.hwrm_intf_min = HWRM_VERSION_MINOR;
+       req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               goto hwrm_ver_get_exit;
+
+       memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
+
+       if (req.hwrm_intf_maj != resp->hwrm_intf_maj ||
+           req.hwrm_intf_min != resp->hwrm_intf_min ||
+           req.hwrm_intf_upd != resp->hwrm_intf_upd) {
+               netdev_warn(bp->dev, "HWRM interface %d.%d.%d does not match driver interface %d.%d.%d.\n",
+                           resp->hwrm_intf_maj, resp->hwrm_intf_min,
+                           resp->hwrm_intf_upd, req.hwrm_intf_maj,
+                           req.hwrm_intf_min, req.hwrm_intf_upd);
+               netdev_warn(bp->dev, "Please update driver or firmware with matching interface versions.\n");
+       }
+       snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d",
+                resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
+                resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
+
+hwrm_ver_get_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
+{
+       if (bp->vxlan_port_cnt) {
+               bnxt_hwrm_tunnel_dst_port_free(
+                       bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+       }
+       bp->vxlan_port_cnt = 0;
+       if (bp->nge_port_cnt) {
+               bnxt_hwrm_tunnel_dst_port_free(
+                       bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
+       }
+       bp->nge_port_cnt = 0;
+}
+
+static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
+{
+       int rc, i;
+       u32 tpa_flags = 0;
+
+       if (set_tpa)
+               tpa_flags = bp->flags & BNXT_FLAG_TPA;
+       for (i = 0; i < bp->nr_vnics; i++) {
+               rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
+               if (rc) {
+                       netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
+                                  rc, i);
+                       return rc;
+               }
+       }
+       return 0;
+}
+
+static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
+{
+       int i;
+
+       for (i = 0; i < bp->nr_vnics; i++)
+               bnxt_hwrm_vnic_set_rss(bp, i, false);
+}
+
+static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
+                                   bool irq_re_init)
+{
+       if (bp->vnic_info) {
+               bnxt_hwrm_clear_vnic_filter(bp);
+               /* clear all RSS setting before free vnic ctx */
+               bnxt_hwrm_clear_vnic_rss(bp);
+               bnxt_hwrm_vnic_ctx_free(bp);
+               /* before free the vnic, undo the vnic tpa settings */
+               if (bp->flags & BNXT_FLAG_TPA)
+                       bnxt_set_tpa(bp, false);
+               bnxt_hwrm_vnic_free(bp);
+       }
+       bnxt_hwrm_ring_free(bp, close_path);
+       bnxt_hwrm_ring_grp_free(bp);
+       if (irq_re_init) {
+               bnxt_hwrm_stat_ctx_free(bp);
+               bnxt_hwrm_free_tunnel_ports(bp);
+       }
+}
+
+static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+{
+       int rc;
+
+       /* allocate context for vnic */
+       rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+                          vnic_id, rc);
+               goto vnic_setup_err;
+       }
+       bp->rsscos_nr_ctxs++;
+
+       /* configure default vnic, ring grp */
+       rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
+                          vnic_id, rc);
+               goto vnic_setup_err;
+       }
+
+       /* Enable RSS hashing on vnic */
+       rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
+                          vnic_id, rc);
+               goto vnic_setup_err;
+       }
+
+       if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+               rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+               if (rc) {
+                       netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
+                                  vnic_id, rc);
+               }
+       }
+
+vnic_setup_err:
+       return rc;
+}
+
+static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
+{
+#ifdef CONFIG_RFS_ACCEL
+       int i, rc = 0;
+
+       for (i = 0; i < bp->rx_nr_rings; i++) {
+               u16 vnic_id = i + 1;
+               u16 ring_id = i;
+
+               if (vnic_id >= bp->nr_vnics)
+                       break;
+
+               bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
+               rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, ring_id + 1);
+               if (rc) {
+                       netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+                                  vnic_id, rc);
+                       break;
+               }
+               rc = bnxt_setup_vnic(bp, vnic_id);
+               if (rc)
+                       break;
+       }
+       return rc;
+#else
+       return 0;
+#endif
+}
+
+static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
+{
+       int rc = 0;
+
+       if (irq_re_init) {
+               rc = bnxt_hwrm_stat_ctx_alloc(bp);
+               if (rc) {
+                       netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
+                                  rc);
+                       goto err_out;
+               }
+       }
+
+       rc = bnxt_hwrm_ring_alloc(bp);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
+               goto err_out;
+       }
+
+       rc = bnxt_hwrm_ring_grp_alloc(bp);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
+               goto err_out;
+       }
+
+       /* default vnic 0 */
+       rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
+               goto err_out;
+       }
+
+       rc = bnxt_setup_vnic(bp, 0);
+       if (rc)
+               goto err_out;
+
+       if (bp->flags & BNXT_FLAG_RFS) {
+               rc = bnxt_alloc_rfs_vnics(bp);
+               if (rc)
+                       goto err_out;
+       }
+
+       if (bp->flags & BNXT_FLAG_TPA) {
+               rc = bnxt_set_tpa(bp, true);
+               if (rc)
+                       goto err_out;
+       }
+
+       if (BNXT_VF(bp))
+               bnxt_update_vf_mac(bp);
+
+       /* Filter for default vnic 0 */
+       rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
+       if (rc) {
+               netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
+               goto err_out;
+       }
+       bp->vnic_info[0].uc_filter_count = 1;
+
+       bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST |
+                                  CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+
+       if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+               bp->vnic_info[0].rx_mask |=
+                               CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+       rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+       if (rc) {
+               netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc);
+               goto err_out;
+       }
+
+       rc = bnxt_hwrm_set_coal(bp);
+       if (rc)
+               netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
+                           rc);
+
+       return 0;
+
+err_out:
+       bnxt_hwrm_resource_free(bp, 0, true);
+
+       return rc;
+}
+
+static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
+{
+       bnxt_hwrm_resource_free(bp, 1, irq_re_init);
+       return 0;
+}
+
+static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
+{
+       bnxt_init_rx_rings(bp);
+       bnxt_init_tx_rings(bp);
+       bnxt_init_ring_grps(bp, irq_re_init);
+       bnxt_init_vnics(bp);
+
+       return bnxt_init_chip(bp, irq_re_init);
+}
+
+static void bnxt_disable_int(struct bnxt *bp)
+{
+       int i;
+
+       if (!bp->bnapi)
+               return;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+               BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+       }
+}
+
+static void bnxt_enable_int(struct bnxt *bp)
+{
+       int i;
+
+       atomic_set(&bp->intr_sem, 0);
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+               BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+       }
+}
+
+static int bnxt_set_real_num_queues(struct bnxt *bp)
+{
+       int rc;
+       struct net_device *dev = bp->dev;
+
+       rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
+       if (rc)
+               return rc;
+
+       rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
+       if (rc)
+               return rc;
+
+#ifdef CONFIG_RFS_ACCEL
+       if (bp->rx_nr_rings)
+               dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
+       if (!dev->rx_cpu_rmap)
+               rc = -ENOMEM;
+#endif
+
+       return rc;
+}
+
+static int bnxt_setup_msix(struct bnxt *bp)
+{
+       struct msix_entry *msix_ent;
+       struct net_device *dev = bp->dev;
+       int i, total_vecs, rc = 0;
+       const int len = sizeof(bp->irq_tbl[0].name);
+
+       bp->flags &= ~BNXT_FLAG_USING_MSIX;
+       total_vecs = bp->cp_nr_rings;
+
+       msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
+       if (!msix_ent)
+               return -ENOMEM;
+
+       for (i = 0; i < total_vecs; i++) {
+               msix_ent[i].entry = i;
+               msix_ent[i].vector = 0;
+       }
+
+       total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, 1, total_vecs);
+       if (total_vecs < 0) {
+               rc = -ENODEV;
+               goto msix_setup_exit;
+       }
+
+       bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
+       if (bp->irq_tbl) {
+               int tcs;
+
+               /* Trim rings based upon num of vectors allocated */
+               bp->rx_nr_rings = min_t(int, total_vecs, bp->rx_nr_rings);
+               bp->tx_nr_rings = min_t(int, total_vecs, bp->tx_nr_rings);
+               bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+               tcs = netdev_get_num_tc(dev);
+               if (tcs > 1) {
+                       bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
+                       if (bp->tx_nr_rings_per_tc == 0) {
+                               netdev_reset_tc(dev);
+                               bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+                       } else {
+                               int i, off, count;
+
+                               bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+                               for (i = 0; i < tcs; i++) {
+                                       count = bp->tx_nr_rings_per_tc;
+                                       off = i * count;
+                                       netdev_set_tc_queue(dev, i, count, off);
+                               }
+                       }
+               }
+               bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
+
+               for (i = 0; i < bp->cp_nr_rings; i++) {
+                       bp->irq_tbl[i].vector = msix_ent[i].vector;
+                       snprintf(bp->irq_tbl[i].name, len,
+                                "%s-%s-%d", dev->name, "TxRx", i);
+                       bp->irq_tbl[i].handler = bnxt_msix;
+               }
+               rc = bnxt_set_real_num_queues(bp);
+               if (rc)
+                       goto msix_setup_exit;
+       } else {
+               rc = -ENOMEM;
+               goto msix_setup_exit;
+       }
+       bp->flags |= BNXT_FLAG_USING_MSIX;
+       kfree(msix_ent);
+       return 0;
+
+msix_setup_exit:
+       netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
+       pci_disable_msix(bp->pdev);
+       kfree(msix_ent);
+       return rc;
+}
+
+static int bnxt_setup_inta(struct bnxt *bp)
+{
+       int rc;
+       const int len = sizeof(bp->irq_tbl[0].name);
+
+       if (netdev_get_num_tc(bp->dev))
+               netdev_reset_tc(bp->dev);
+
+       bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
+       if (!bp->irq_tbl) {
+               rc = -ENOMEM;
+               return rc;
+       }
+       bp->rx_nr_rings = 1;
+       bp->tx_nr_rings = 1;
+       bp->cp_nr_rings = 1;
+       bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+       bp->irq_tbl[0].vector = bp->pdev->irq;
+       snprintf(bp->irq_tbl[0].name, len,
+                "%s-%s-%d", bp->dev->name, "TxRx", 0);
+       bp->irq_tbl[0].handler = bnxt_inta;
+       rc = bnxt_set_real_num_queues(bp);
+       return rc;
+}
+
+static int bnxt_setup_int_mode(struct bnxt *bp)
+{
+       int rc = 0;
+
+       if (bp->flags & BNXT_FLAG_MSIX_CAP)
+               rc = bnxt_setup_msix(bp);
+
+       if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+               /* fallback to INTA */
+               rc = bnxt_setup_inta(bp);
+       }
+       return rc;
+}
+
+static void bnxt_free_irq(struct bnxt *bp)
+{
+       struct bnxt_irq *irq;
+       int i;
+
+#ifdef CONFIG_RFS_ACCEL
+       free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
+       bp->dev->rx_cpu_rmap = NULL;
+#endif
+       if (!bp->irq_tbl)
+               return;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               irq = &bp->irq_tbl[i];
+               if (irq->requested)
+                       free_irq(irq->vector, bp->bnapi[i]);
+               irq->requested = 0;
+       }
+       if (bp->flags & BNXT_FLAG_USING_MSIX)
+               pci_disable_msix(bp->pdev);
+       kfree(bp->irq_tbl);
+       bp->irq_tbl = NULL;
+}
+
+static int bnxt_request_irq(struct bnxt *bp)
+{
+       int i, rc = 0;
+       unsigned long flags = 0;
+#ifdef CONFIG_RFS_ACCEL
+       struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
+#endif
+
+       if (!(bp->flags & BNXT_FLAG_USING_MSIX))
+               flags = IRQF_SHARED;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_irq *irq = &bp->irq_tbl[i];
+#ifdef CONFIG_RFS_ACCEL
+               if (rmap && (i < bp->rx_nr_rings)) {
+                       rc = irq_cpu_rmap_add(rmap, irq->vector);
+                       if (rc)
+                               netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
+                                           i);
+               }
+#endif
+               rc = request_irq(irq->vector, irq->handler, flags, irq->name,
+                                bp->bnapi[i]);
+               if (rc)
+                       break;
+
+               irq->requested = 1;
+       }
+       return rc;
+}
+
+static void bnxt_del_napi(struct bnxt *bp)
+{
+       int i;
+
+       if (!bp->bnapi)
+               return;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+
+               napi_hash_del(&bnapi->napi);
+               netif_napi_del(&bnapi->napi);
+       }
+}
+
+static void bnxt_init_napi(struct bnxt *bp)
+{
+       int i;
+       struct bnxt_napi *bnapi;
+
+       if (bp->flags & BNXT_FLAG_USING_MSIX) {
+               for (i = 0; i < bp->cp_nr_rings; i++) {
+                       bnapi = bp->bnapi[i];
+                       netif_napi_add(bp->dev, &bnapi->napi,
+                                      bnxt_poll, 64);
+                       napi_hash_add(&bnapi->napi);
+               }
+       } else {
+               bnapi = bp->bnapi[0];
+               netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
+               napi_hash_add(&bnapi->napi);
+       }
+}
+
+static void bnxt_disable_napi(struct bnxt *bp)
+{
+       int i;
+
+       if (!bp->bnapi)
+               return;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               napi_disable(&bp->bnapi[i]->napi);
+               bnxt_disable_poll(bp->bnapi[i]);
+       }
+}
+
+static void bnxt_enable_napi(struct bnxt *bp)
+{
+       int i;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               bnxt_enable_poll(bp->bnapi[i]);
+               napi_enable(&bp->bnapi[i]->napi);
+       }
+}
+
+static void bnxt_tx_disable(struct bnxt *bp)
+{
+       int i;
+       struct bnxt_napi *bnapi;
+       struct bnxt_tx_ring_info *txr;
+       struct netdev_queue *txq;
+
+       if (bp->bnapi) {
+               for (i = 0; i < bp->tx_nr_rings; i++) {
+                       bnapi = bp->bnapi[i];
+                       txr = &bnapi->tx_ring;
+                       txq = netdev_get_tx_queue(bp->dev, i);
+                       __netif_tx_lock(txq, smp_processor_id());
+                       txr->dev_state = BNXT_DEV_STATE_CLOSING;
+                       __netif_tx_unlock(txq);
+               }
+       }
+       /* Stop all TX queues */
+       netif_tx_disable(bp->dev);
+       netif_carrier_off(bp->dev);
+}
+
+static void bnxt_tx_enable(struct bnxt *bp)
+{
+       int i;
+       struct bnxt_napi *bnapi;
+       struct bnxt_tx_ring_info *txr;
+       struct netdev_queue *txq;
+
+       for (i = 0; i < bp->tx_nr_rings; i++) {
+               bnapi = bp->bnapi[i];
+               txr = &bnapi->tx_ring;
+               txq = netdev_get_tx_queue(bp->dev, i);
+               txr->dev_state = 0;
+       }
+       netif_tx_wake_all_queues(bp->dev);
+       if (bp->link_info.link_up)
+               netif_carrier_on(bp->dev);
+}
+
+static void bnxt_report_link(struct bnxt *bp)
+{
+       if (bp->link_info.link_up) {
+               const char *duplex;
+               const char *flow_ctrl;
+               u16 speed;
+
+               netif_carrier_on(bp->dev);
+               if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
+                       duplex = "full";
+               else
+                       duplex = "half";
+               if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
+                       flow_ctrl = "ON - receive & transmit";
+               else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
+                       flow_ctrl = "ON - transmit";
+               else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
+                       flow_ctrl = "ON - receive";
+               else
+                       flow_ctrl = "none";
+               speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+               netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
+                           speed, duplex, flow_ctrl);
+       } else {
+               netif_carrier_off(bp->dev);
+               netdev_err(bp->dev, "NIC Link is Down\n");
+       }
+}
+
+static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
+{
+       int rc = 0;
+       struct bnxt_link_info *link_info = &bp->link_info;
+       struct hwrm_port_phy_qcfg_input req = {0};
+       struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       u8 link_up = link_info->link_up;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc) {
+               mutex_unlock(&bp->hwrm_cmd_lock);
+               return rc;
+       }
+
+       memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
+       link_info->phy_link_status = resp->link;
+       link_info->duplex =  resp->duplex;
+       link_info->pause = resp->pause;
+       link_info->auto_mode = resp->auto_mode;
+       link_info->auto_pause_setting = resp->auto_pause;
+       link_info->force_pause_setting = resp->force_pause;
+       link_info->duplex_setting = resp->duplex_setting;
+       if (link_info->phy_link_status == BNXT_LINK_LINK)
+               link_info->link_speed = le16_to_cpu(resp->link_speed);
+       else
+               link_info->link_speed = 0;
+       link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
+       link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
+       link_info->support_speeds = le16_to_cpu(resp->support_speeds);
+       link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
+       link_info->preemphasis = le32_to_cpu(resp->preemphasis);
+       link_info->phy_ver[0] = resp->phy_maj;
+       link_info->phy_ver[1] = resp->phy_min;
+       link_info->phy_ver[2] = resp->phy_bld;
+       link_info->media_type = resp->media_type;
+       link_info->transceiver = resp->transceiver_type;
+       link_info->phy_addr = resp->phy_addr;
+
+       /* TODO: need to add more logic to report VF link */
+       if (chng_link_state) {
+               if (link_info->phy_link_status == BNXT_LINK_LINK)
+                       link_info->link_up = 1;
+               else
+                       link_info->link_up = 0;
+               if (link_up != link_info->link_up)
+                       bnxt_report_link(bp);
+       } else {
+               /* alwasy link down if not require to update link state */
+               link_info->link_up = 0;
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return 0;
+}
+
+static void
+bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
+{
+       if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
+               if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
+                       req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+               if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
+                       req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+               req->enables |=
+                       cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
+       } else {
+               if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
+                       req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
+               if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
+                       req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
+               req->enables |=
+                       cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
+       }
+}
+
+static void bnxt_hwrm_set_link_common(struct bnxt *bp,
+                                     struct hwrm_port_phy_cfg_input *req)
+{
+       u8 autoneg = bp->link_info.autoneg;
+       u16 fw_link_speed = bp->link_info.req_link_speed;
+       u32 advertising = bp->link_info.advertising;
+
+       if (autoneg & BNXT_AUTONEG_SPEED) {
+               req->auto_mode |=
+                       PORT_PHY_CFG_REQ_AUTO_MODE_MASK;
+
+               req->enables |= cpu_to_le32(
+                       PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
+               req->auto_link_speed_mask = cpu_to_le16(advertising);
+
+               req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
+               req->flags |=
+                       cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
+       } else {
+               req->force_link_speed = cpu_to_le16(fw_link_speed);
+               req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
+       }
+
+       /* currently don't support half duplex */
+       req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL;
+       req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX);
+       /* tell chimp that the setting takes effect immediately */
+       req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
+}
+
+int bnxt_hwrm_set_pause(struct bnxt *bp)
+{
+       struct hwrm_port_phy_cfg_input req = {0};
+       int rc;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+       bnxt_hwrm_set_pause_common(bp, &req);
+
+       if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
+           bp->link_info.force_link_chng)
+               bnxt_hwrm_set_link_common(bp, &req);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
+               /* since changing of pause setting doesn't trigger any link
+                * change event, the driver needs to update the current pause
+                * result upon successfully return of the phy_cfg command
+                */
+               bp->link_info.pause =
+               bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
+               bp->link_info.auto_pause_setting = 0;
+               if (!bp->link_info.force_link_chng)
+                       bnxt_report_link(bp);
+       }
+       bp->link_info.force_link_chng = false;
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
+{
+       struct hwrm_port_phy_cfg_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+       if (set_pause)
+               bnxt_hwrm_set_pause_common(bp, &req);
+
+       bnxt_hwrm_set_link_common(bp, &req);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_update_phy_setting(struct bnxt *bp)
+{
+       int rc;
+       bool update_link = false;
+       bool update_pause = false;
+       struct bnxt_link_info *link_info = &bp->link_info;
+
+       rc = bnxt_update_link(bp, true);
+       if (rc) {
+               netdev_err(bp->dev, "failed to update link (rc: %x)\n",
+                          rc);
+               return rc;
+       }
+       if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
+           link_info->auto_pause_setting != link_info->req_flow_ctrl)
+               update_pause = true;
+       if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
+           link_info->force_pause_setting != link_info->req_flow_ctrl)
+               update_pause = true;
+       if (link_info->req_duplex != link_info->duplex_setting)
+               update_link = true;
+       if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+               if (BNXT_AUTO_MODE(link_info->auto_mode))
+                       update_link = true;
+               if (link_info->req_link_speed != link_info->force_link_speed)
+                       update_link = true;
+       } else {
+               if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
+                       update_link = true;
+               if (link_info->advertising != link_info->auto_link_speeds)
+                       update_link = true;
+               if (link_info->req_link_speed != link_info->auto_link_speed)
+                       update_link = true;
+       }
+
+       if (update_link)
+               rc = bnxt_hwrm_set_link_setting(bp, update_pause);
+       else if (update_pause)
+               rc = bnxt_hwrm_set_pause(bp);
+       if (rc) {
+               netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
+                          rc);
+               return rc;
+       }
+
+       return rc;
+}
+
+static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+       int rc = 0;
+
+       netif_carrier_off(bp->dev);
+       if (irq_re_init) {
+               rc = bnxt_setup_int_mode(bp);
+               if (rc) {
+                       netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
+                                  rc);
+                       return rc;
+               }
+       }
+       if ((bp->flags & BNXT_FLAG_RFS) &&
+           !(bp->flags & BNXT_FLAG_USING_MSIX)) {
+               /* disable RFS if falling back to INTA */
+               bp->dev->hw_features &= ~NETIF_F_NTUPLE;
+               bp->flags &= ~BNXT_FLAG_RFS;
+       }
+
+       rc = bnxt_alloc_mem(bp, irq_re_init);
+       if (rc) {
+               netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
+               goto open_err_free_mem;
+       }
+
+       if (irq_re_init) {
+               bnxt_init_napi(bp);
+               rc = bnxt_request_irq(bp);
+               if (rc) {
+                       netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
+                       goto open_err;
+               }
+       }
+
+       bnxt_enable_napi(bp);
+
+       rc = bnxt_init_nic(bp, irq_re_init);
+       if (rc) {
+               netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
+               goto open_err;
+       }
+
+       if (link_re_init) {
+               rc = bnxt_update_phy_setting(bp);
+               if (rc)
+                       goto open_err;
+       }
+
+       if (irq_re_init) {
+#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
+               vxlan_get_rx_port(bp->dev);
+#endif
+               if (!bnxt_hwrm_tunnel_dst_port_alloc(
+                               bp, htons(0x17c1),
+                               TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
+                       bp->nge_port_cnt = 1;
+       }
+
+       bp->state = BNXT_STATE_OPEN;
+       bnxt_enable_int(bp);
+       /* Enable TX queues */
+       bnxt_tx_enable(bp);
+       mod_timer(&bp->timer, jiffies + bp->current_interval);
+
+       return 0;
+
+open_err:
+       bnxt_disable_napi(bp);
+       bnxt_del_napi(bp);
+
+open_err_free_mem:
+       bnxt_free_skbs(bp);
+       bnxt_free_irq(bp);
+       bnxt_free_mem(bp, true);
+       return rc;
+}
+
+/* rtnl_lock held */
+int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+       int rc = 0;
+
+       rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
+       if (rc) {
+               netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
+               dev_close(bp->dev);
+       }
+       return rc;
+}
+
+static int bnxt_open(struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc = 0;
+
+       rc = bnxt_hwrm_func_reset(bp);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
+                          rc);
+               rc = -1;
+               return rc;
+       }
+       return __bnxt_open_nic(bp, true, true);
+}
+
+static void bnxt_disable_int_sync(struct bnxt *bp)
+{
+       int i;
+
+       atomic_inc(&bp->intr_sem);
+       if (!netif_running(bp->dev))
+               return;
+
+       bnxt_disable_int(bp);
+       for (i = 0; i < bp->cp_nr_rings; i++)
+               synchronize_irq(bp->irq_tbl[i].vector);
+}
+
+int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+       int rc = 0;
+
+#ifdef CONFIG_BNXT_SRIOV
+       if (bp->sriov_cfg) {
+               rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
+                                                     !bp->sriov_cfg,
+                                                     BNXT_SRIOV_CFG_WAIT_TMO);
+               if (rc)
+                       netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+       }
+#endif
+       /* Change device state to avoid TX queue wake up's */
+       bnxt_tx_disable(bp);
+
+       bp->state = BNXT_STATE_CLOSED;
+       cancel_work_sync(&bp->sp_task);
+
+       /* Flush rings before disabling interrupts */
+       bnxt_shutdown_nic(bp, irq_re_init);
+
+       /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
+
+       bnxt_disable_napi(bp);
+       bnxt_disable_int_sync(bp);
+       del_timer_sync(&bp->timer);
+       bnxt_free_skbs(bp);
+
+       if (irq_re_init) {
+               bnxt_free_irq(bp);
+               bnxt_del_napi(bp);
+       }
+       bnxt_free_mem(bp, irq_re_init);
+       return rc;
+}
+
+static int bnxt_close(struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       bnxt_close_nic(bp, true, true);
+       return 0;
+}
+
+/* rtnl_lock held */
+static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+       switch (cmd) {
+       case SIOCGMIIPHY:
+               /* fallthru */
+       case SIOCGMIIREG: {
+               if (!netif_running(dev))
+                       return -EAGAIN;
+
+               return 0;
+       }
+
+       case SIOCSMIIREG:
+               if (!netif_running(dev))
+                       return -EAGAIN;
+
+               return 0;
+
+       default:
+               /* do nothing */
+               break;
+       }
+       return -EOPNOTSUPP;
+}
+
+static struct rtnl_link_stats64 *
+bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+       u32 i;
+       struct bnxt *bp = netdev_priv(dev);
+
+       memset(stats, 0, sizeof(struct rtnl_link_stats64));
+
+       if (!bp->bnapi)
+               return stats;
+
+       /* TODO check if we need to synchronize with bnxt_close path */
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+               struct ctx_hw_stats *hw_stats = cpr->hw_stats;
+
+               stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
+               stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
+               stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
+
+               stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
+               stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
+               stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
+
+               stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
+               stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
+               stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
+
+               stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
+               stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
+               stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
+
+               stats->rx_missed_errors +=
+                       le64_to_cpu(hw_stats->rx_discard_pkts);
+
+               stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
+
+               stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts);
+
+               stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
+       }
+
+       return stats;
+}
+
+static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
+{
+       struct net_device *dev = bp->dev;
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+       struct netdev_hw_addr *ha;
+       u8 *haddr;
+       int mc_count = 0;
+       bool update = false;
+       int off = 0;
+
+       netdev_for_each_mc_addr(ha, dev) {
+               if (mc_count >= BNXT_MAX_MC_ADDRS) {
+                       *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+                       vnic->mc_list_count = 0;
+                       return false;
+               }
+               haddr = ha->addr;
+               if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
+                       memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
+                       update = true;
+               }
+               off += ETH_ALEN;
+               mc_count++;
+       }
+       if (mc_count)
+               *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
+
+       if (mc_count != vnic->mc_list_count) {
+               vnic->mc_list_count = mc_count;
+               update = true;
+       }
+       return update;
+}
+
+static bool bnxt_uc_list_updated(struct bnxt *bp)
+{
+       struct net_device *dev = bp->dev;
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+       struct netdev_hw_addr *ha;
+       int off = 0;
+
+       if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
+               return true;
+
+       netdev_for_each_uc_addr(ha, dev) {
+               if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
+                       return true;
+
+               off += ETH_ALEN;
+       }
+       return false;
+}
+
+static void bnxt_set_rx_mode(struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+       u32 mask = vnic->rx_mask;
+       bool mc_update = false;
+       bool uc_update;
+
+       if (!netif_running(dev))
+               return;
+
+       mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
+                 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
+                 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
+
+       /* Only allow PF to be in promiscuous mode */
+       if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+               mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+       uc_update = bnxt_uc_list_updated(bp);
+
+       if (dev->flags & IFF_ALLMULTI) {
+               mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+               vnic->mc_list_count = 0;
+       } else {
+               mc_update = bnxt_mc_list_updated(bp, &mask);
+       }
+
+       if (mask != vnic->rx_mask || uc_update || mc_update) {
+               vnic->rx_mask = mask;
+
+               set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
+               schedule_work(&bp->sp_task);
+       }
+}
+
+static void bnxt_cfg_rx_mode(struct bnxt *bp)
+{
+       struct net_device *dev = bp->dev;
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+       struct netdev_hw_addr *ha;
+       int i, off = 0, rc;
+       bool uc_update;
+
+       netif_addr_lock_bh(dev);
+       uc_update = bnxt_uc_list_updated(bp);
+       netif_addr_unlock_bh(dev);
+
+       if (!uc_update)
+               goto skip_uc;
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       for (i = 1; i < vnic->uc_filter_count; i++) {
+               struct hwrm_cfa_l2_filter_free_input req = {0};
+
+               bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
+                                      -1);
+
+               req.l2_filter_id = vnic->fw_l2_filter_id[i];
+
+               rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                       HWRM_CMD_TIMEOUT);
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+
+       vnic->uc_filter_count = 1;
+
+       netif_addr_lock_bh(dev);
+       if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
+               vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+       } else {
+               netdev_for_each_uc_addr(ha, dev) {
+                       memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
+                       off += ETH_ALEN;
+                       vnic->uc_filter_count++;
+               }
+       }
+       netif_addr_unlock_bh(dev);
+
+       for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
+               rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
+               if (rc) {
+                       netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
+                                  rc);
+                       vnic->uc_filter_count = i;
+               }
+       }
+
+skip_uc:
+       rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+       if (rc)
+               netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
+                          rc);
+}
+
+static netdev_features_t bnxt_fix_features(struct net_device *dev,
+                                          netdev_features_t features)
+{
+       return features;
+}
+
+static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       u32 flags = bp->flags;
+       u32 changes;
+       int rc = 0;
+       bool re_init = false;
+       bool update_tpa = false;
+
+       flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
+       if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+               flags |= BNXT_FLAG_GRO;
+       if (features & NETIF_F_LRO)
+               flags |= BNXT_FLAG_LRO;
+
+       if (features & NETIF_F_HW_VLAN_CTAG_RX)
+               flags |= BNXT_FLAG_STRIP_VLAN;
+
+       if (features & NETIF_F_NTUPLE)
+               flags |= BNXT_FLAG_RFS;
+
+       changes = flags ^ bp->flags;
+       if (changes & BNXT_FLAG_TPA) {
+               update_tpa = true;
+               if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
+                   (flags & BNXT_FLAG_TPA) == 0)
+                       re_init = true;
+       }
+
+       if (changes & ~BNXT_FLAG_TPA)
+               re_init = true;
+
+       if (flags != bp->flags) {
+               u32 old_flags = bp->flags;
+
+               bp->flags = flags;
+
+               if (!netif_running(dev)) {
+                       if (update_tpa)
+                               bnxt_set_ring_params(bp);
+                       return rc;
+               }
+
+               if (re_init) {
+                       bnxt_close_nic(bp, false, false);
+                       if (update_tpa)
+                               bnxt_set_ring_params(bp);
+
+                       return bnxt_open_nic(bp, false, false);
+               }
+               if (update_tpa) {
+                       rc = bnxt_set_tpa(bp,
+                                         (flags & BNXT_FLAG_TPA) ?
+                                         true : false);
+                       if (rc)
+                               bp->flags = old_flags;
+               }
+       }
+       return rc;
+}
+
+static void bnxt_dbg_dump_states(struct bnxt *bp)
+{
+       int i;
+       struct bnxt_napi *bnapi;
+       struct bnxt_tx_ring_info *txr;
+       struct bnxt_rx_ring_info *rxr;
+       struct bnxt_cp_ring_info *cpr;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               bnapi = bp->bnapi[i];
+               txr = &bnapi->tx_ring;
+               rxr = &bnapi->rx_ring;
+               cpr = &bnapi->cp_ring;
+               if (netif_msg_drv(bp)) {
+                       netdev_info(bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
+                                   i, txr->tx_ring_struct.fw_ring_id,
+                                   txr->tx_prod, txr->tx_cons);
+                       netdev_info(bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
+                                   i, rxr->rx_ring_struct.fw_ring_id,
+                                   rxr->rx_prod,
+                                   rxr->rx_agg_ring_struct.fw_ring_id,
+                                   rxr->rx_agg_prod, rxr->rx_sw_agg_prod);
+                       netdev_info(bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
+                                   i, cpr->cp_ring_struct.fw_ring_id,
+                                   cpr->cp_raw_cons);
+               }
+       }
+}
+
+static void bnxt_reset_task(struct bnxt *bp)
+{
+       bnxt_dbg_dump_states(bp);
+       if (netif_running(bp->dev))
+               bnxt_tx_disable(bp); /* prevent tx timout again */
+}
+
+static void bnxt_tx_timeout(struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
+       set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+       schedule_work(&bp->sp_task);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bnxt_poll_controller(struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int i;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_irq *irq = &bp->irq_tbl[i];
+
+               disable_irq(irq->vector);
+               irq->handler(irq->vector, bp->bnapi[i]);
+               enable_irq(irq->vector);
+       }
+}
+#endif
+
+static void bnxt_timer(unsigned long data)
+{
+       struct bnxt *bp = (struct bnxt *)data;
+       struct net_device *dev = bp->dev;
+
+       if (!netif_running(dev))
+               return;
+
+       if (atomic_read(&bp->intr_sem) != 0)
+               goto bnxt_restart_timer;
+
+bnxt_restart_timer:
+       mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+static void bnxt_cfg_ntp_filters(struct bnxt *);
+
+static void bnxt_sp_task(struct work_struct *work)
+{
+       struct bnxt *bp = container_of(work, struct bnxt, sp_task);
+       int rc;
+
+       if (bp->state != BNXT_STATE_OPEN)
+               return;
+
+       if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
+               bnxt_cfg_rx_mode(bp);
+
+       if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
+               bnxt_cfg_ntp_filters(bp);
+       if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+               rc = bnxt_update_link(bp, true);
+               if (rc)
+                       netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
+                                  rc);
+       }
+       if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
+               bnxt_hwrm_exec_fwd_req(bp);
+       if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
+               bnxt_hwrm_tunnel_dst_port_alloc(
+                       bp, bp->vxlan_port,
+                       TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+       }
+       if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
+               bnxt_hwrm_tunnel_dst_port_free(
+                       bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+       }
+       if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
+               bnxt_reset_task(bp);
+}
+
+static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
+{
+       int rc;
+       struct bnxt *bp = netdev_priv(dev);
+
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       /* enable device (incl. PCI PM wakeup), and bus-mastering */
+       rc = pci_enable_device(pdev);
+       if (rc) {
+               dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+               goto init_err;
+       }
+
+       if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+               dev_err(&pdev->dev,
+                       "Cannot find PCI device base address, aborting\n");
+               rc = -ENODEV;
+               goto init_err_disable;
+       }
+
+       rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+       if (rc) {
+               dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+               goto init_err_disable;
+       }
+
+       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
+           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
+               dev_err(&pdev->dev, "System does not support DMA, aborting\n");
+               goto init_err_disable;
+       }
+
+       pci_set_master(pdev);
+
+       bp->dev = dev;
+       bp->pdev = pdev;
+
+       bp->bar0 = pci_ioremap_bar(pdev, 0);
+       if (!bp->bar0) {
+               dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
+               rc = -ENOMEM;
+               goto init_err_release;
+       }
+
+       bp->bar1 = pci_ioremap_bar(pdev, 2);
+       if (!bp->bar1) {
+               dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
+               rc = -ENOMEM;
+               goto init_err_release;
+       }
+
+       bp->bar2 = pci_ioremap_bar(pdev, 4);
+       if (!bp->bar2) {
+               dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
+               rc = -ENOMEM;
+               goto init_err_release;
+       }
+
+       INIT_WORK(&bp->sp_task, bnxt_sp_task);
+
+       spin_lock_init(&bp->ntp_fltr_lock);
+
+       bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
+       bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
+
+       bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4);
+       bp->coal_bufs = 20;
+       bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1);
+       bp->coal_bufs_irq = 2;
+
+       init_timer(&bp->timer);
+       bp->timer.data = (unsigned long)bp;
+       bp->timer.function = bnxt_timer;
+       bp->current_interval = BNXT_TIMER_INTERVAL;
+
+       bp->state = BNXT_STATE_CLOSED;
+
+       return 0;
+
+init_err_release:
+       if (bp->bar2) {
+               pci_iounmap(pdev, bp->bar2);
+               bp->bar2 = NULL;
+       }
+
+       if (bp->bar1) {
+               pci_iounmap(pdev, bp->bar1);
+               bp->bar1 = NULL;
+       }
+
+       if (bp->bar0) {
+               pci_iounmap(pdev, bp->bar0);
+               bp->bar0 = NULL;
+       }
+
+       pci_release_regions(pdev);
+
+init_err_disable:
+       pci_disable_device(pdev);
+
+init_err:
+       return rc;
+}
+
+/* rtnl_lock held */
+static int bnxt_change_mac_addr(struct net_device *dev, void *p)
+{
+       struct sockaddr *addr = p;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+       return 0;
+}
+
+/* rtnl_lock held */
+static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       if (new_mtu < 60 || new_mtu > 9000)
+               return -EINVAL;
+
+       if (netif_running(dev))
+               bnxt_close_nic(bp, false, false);
+
+       dev->mtu = new_mtu;
+       bnxt_set_ring_params(bp);
+
+       if (netif_running(dev))
+               return bnxt_open_nic(bp, false, false);
+
+       return 0;
+}
+
+static int bnxt_setup_tc(struct net_device *dev, u8 tc)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       if (tc > bp->max_tc) {
+               netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
+                          tc, bp->max_tc);
+               return -EINVAL;
+       }
+
+       if (netdev_get_num_tc(dev) == tc)
+               return 0;
+
+       if (tc) {
+               int max_rx_rings, max_tx_rings;
+
+               bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+               if (bp->tx_nr_rings_per_tc * tc > max_tx_rings)
+                       return -ENOMEM;
+       }
+
+       /* Needs to close the device and do hw resource re-allocations */
+       if (netif_running(bp->dev))
+               bnxt_close_nic(bp, true, false);
+
+       if (tc) {
+               bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
+               netdev_set_num_tc(dev, tc);
+       } else {
+               bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+               netdev_reset_tc(dev);
+       }
+       bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+       bp->num_stat_ctxs = bp->cp_nr_rings;
+
+       if (netif_running(bp->dev))
+               return bnxt_open_nic(bp, true, false);
+
+       return 0;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
+                           struct bnxt_ntuple_filter *f2)
+{
+       struct flow_keys *keys1 = &f1->fkeys;
+       struct flow_keys *keys2 = &f2->fkeys;
+
+       if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
+           keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
+           keys1->ports.ports == keys2->ports.ports &&
+           keys1->basic.ip_proto == keys2->basic.ip_proto &&
+           keys1->basic.n_proto == keys2->basic.n_proto &&
+           ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr))
+               return true;
+
+       return false;
+}
+
+static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+                             u16 rxq_index, u32 flow_id)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_ntuple_filter *fltr, *new_fltr;
+       struct flow_keys *fkeys;
+       struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
+       int rc = 0, idx;
+       struct hlist_head *head;
+
+       if (skb->encapsulation)
+               return -EPROTONOSUPPORT;
+
+       new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
+       if (!new_fltr)
+               return -ENOMEM;
+
+       fkeys = &new_fltr->fkeys;
+       if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
+               rc = -EPROTONOSUPPORT;
+               goto err_free;
+       }
+
+       if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
+           ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
+            (fkeys->basic.ip_proto != IPPROTO_UDP))) {
+               rc = -EPROTONOSUPPORT;
+               goto err_free;
+       }
+
+       memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
+
+       idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
+       head = &bp->ntp_fltr_hash_tbl[idx];
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(fltr, head, hash) {
+               if (bnxt_fltr_match(fltr, new_fltr)) {
+                       rcu_read_unlock();
+                       rc = 0;
+                       goto err_free;
+               }
+       }
+       rcu_read_unlock();
+
+       spin_lock_bh(&bp->ntp_fltr_lock);
+       new_fltr->sw_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
+                                                 BNXT_NTP_FLTR_MAX_FLTR, 0);
+       if (new_fltr->sw_id < 0) {
+               spin_unlock_bh(&bp->ntp_fltr_lock);
+               rc = -ENOMEM;
+               goto err_free;
+       }
+
+       new_fltr->flow_id = flow_id;
+       new_fltr->rxq = rxq_index;
+       hlist_add_head_rcu(&new_fltr->hash, head);
+       bp->ntp_fltr_count++;
+       spin_unlock_bh(&bp->ntp_fltr_lock);
+
+       set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
+       schedule_work(&bp->sp_task);
+
+       return new_fltr->sw_id;
+
+err_free:
+       kfree(new_fltr);
+       return rc;
+}
+
+static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+{
+       int i;
+
+       for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+               struct hlist_head *head;
+               struct hlist_node *tmp;
+               struct bnxt_ntuple_filter *fltr;
+               int rc;
+
+               head = &bp->ntp_fltr_hash_tbl[i];
+               hlist_for_each_entry_safe(fltr, tmp, head, hash) {
+                       bool del = false;
+
+                       if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
+                               if (rps_may_expire_flow(bp->dev, fltr->rxq,
+                                                       fltr->flow_id,
+                                                       fltr->sw_id)) {
+                                       bnxt_hwrm_cfa_ntuple_filter_free(bp,
+                                                                        fltr);
+                                       del = true;
+                               }
+                       } else {
+                               rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
+                                                                      fltr);
+                               if (rc)
+                                       del = true;
+                               else
+                                       set_bit(BNXT_FLTR_VALID, &fltr->state);
+                       }
+
+                       if (del) {
+                               spin_lock_bh(&bp->ntp_fltr_lock);
+                               hlist_del_rcu(&fltr->hash);
+                               bp->ntp_fltr_count--;
+                               spin_unlock_bh(&bp->ntp_fltr_lock);
+                               synchronize_rcu();
+                               clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
+                               kfree(fltr);
+                       }
+               }
+       }
+}
+
+#else
+
+static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+{
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+                               __be16 port)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       if (!netif_running(dev))
+               return;
+
+       if (sa_family != AF_INET6 && sa_family != AF_INET)
+               return;
+
+       if (bp->vxlan_port_cnt && bp->vxlan_port != port)
+               return;
+
+       bp->vxlan_port_cnt++;
+       if (bp->vxlan_port_cnt == 1) {
+               bp->vxlan_port = port;
+               set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
+               schedule_work(&bp->sp_task);
+       }
+}
+
+static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+                               __be16 port)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       if (!netif_running(dev))
+               return;
+
+       if (sa_family != AF_INET6 && sa_family != AF_INET)
+               return;
+
+       if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
+               bp->vxlan_port_cnt--;
+
+               if (bp->vxlan_port_cnt == 0) {
+                       set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
+                       schedule_work(&bp->sp_task);
+               }
+       }
+}
+
+static const struct net_device_ops bnxt_netdev_ops = {
+       .ndo_open               = bnxt_open,
+       .ndo_start_xmit         = bnxt_start_xmit,
+       .ndo_stop               = bnxt_close,
+       .ndo_get_stats64        = bnxt_get_stats64,
+       .ndo_set_rx_mode        = bnxt_set_rx_mode,
+       .ndo_do_ioctl           = bnxt_ioctl,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = bnxt_change_mac_addr,
+       .ndo_change_mtu         = bnxt_change_mtu,
+       .ndo_fix_features       = bnxt_fix_features,
+       .ndo_set_features       = bnxt_set_features,
+       .ndo_tx_timeout         = bnxt_tx_timeout,
+#ifdef CONFIG_BNXT_SRIOV
+       .ndo_get_vf_config      = bnxt_get_vf_config,
+       .ndo_set_vf_mac         = bnxt_set_vf_mac,
+       .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
+       .ndo_set_vf_rate        = bnxt_set_vf_bw,
+       .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
+       .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = bnxt_poll_controller,
+#endif
+       .ndo_setup_tc           = bnxt_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+       .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
+#endif
+       .ndo_add_vxlan_port     = bnxt_add_vxlan_port,
+       .ndo_del_vxlan_port     = bnxt_del_vxlan_port,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       .ndo_busy_poll          = bnxt_busy_poll,
+#endif
+};
+
+static void bnxt_remove_one(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct bnxt *bp = netdev_priv(dev);
+
+       if (BNXT_PF(bp))
+               bnxt_sriov_disable(bp);
+
+       unregister_netdev(dev);
+       cancel_work_sync(&bp->sp_task);
+       bp->sp_event = 0;
+
+       bnxt_free_hwrm_resources(bp);
+       pci_iounmap(pdev, bp->bar2);
+       pci_iounmap(pdev, bp->bar1);
+       pci_iounmap(pdev, bp->bar0);
+       free_netdev(dev);
+
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+}
+
+static int bnxt_probe_phy(struct bnxt *bp)
+{
+       int rc = 0;
+       struct bnxt_link_info *link_info = &bp->link_info;
+       char phy_ver[PHY_VER_STR_LEN];
+
+       rc = bnxt_update_link(bp, false);
+       if (rc) {
+               netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
+                          rc);
+               return rc;
+       }
+
+       /*initialize the ethool setting copy with NVM settings */
+       if (BNXT_AUTO_MODE(link_info->auto_mode))
+               link_info->autoneg |= BNXT_AUTONEG_SPEED;
+
+       if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+               if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH)
+                       link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+               link_info->req_flow_ctrl = link_info->auto_pause_setting;
+       } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+               link_info->req_flow_ctrl = link_info->force_pause_setting;
+       }
+       link_info->req_duplex = link_info->duplex_setting;
+       if (link_info->autoneg & BNXT_AUTONEG_SPEED)
+               link_info->req_link_speed = link_info->auto_link_speed;
+       else
+               link_info->req_link_speed = link_info->force_link_speed;
+       link_info->advertising = link_info->auto_link_speeds;
+       snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
+                link_info->phy_ver[0],
+                link_info->phy_ver[1],
+                link_info->phy_ver[2]);
+       strcat(bp->fw_ver_str, phy_ver);
+       return rc;
+}
+
+static int bnxt_get_max_irq(struct pci_dev *pdev)
+{
+       u16 ctrl;
+
+       if (!pdev->msix_cap)
+               return 1;
+
+       pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+       return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
+}
+
+void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx)
+{
+       int max_rings = 0;
+
+       if (BNXT_PF(bp)) {
+               *max_tx = bp->pf.max_pf_tx_rings;
+               *max_rx = bp->pf.max_pf_rx_rings;
+               max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
+               max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs);
+       } else {
+#ifdef CONFIG_BNXT_SRIOV
+               *max_tx = bp->vf.max_tx_rings;
+               *max_rx = bp->vf.max_rx_rings;
+               max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
+               max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs);
+#endif
+       }
+       if (bp->flags & BNXT_FLAG_AGG_RINGS)
+               *max_rx >>= 1;
+
+       *max_rx = min_t(int, *max_rx, max_rings);
+       *max_tx = min_t(int, *max_tx, max_rings);
+}
+
+static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       static int version_printed;
+       struct net_device *dev;
+       struct bnxt *bp;
+       int rc, max_rx_rings, max_tx_rings, max_irqs, dflt_rings;
+
+       if (version_printed++ == 0)
+               pr_info("%s", version);
+
+       max_irqs = bnxt_get_max_irq(pdev);
+       dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
+       if (!dev)
+               return -ENOMEM;
+
+       bp = netdev_priv(dev);
+
+       if (bnxt_vf_pciid(ent->driver_data))
+               bp->flags |= BNXT_FLAG_VF;
+
+       if (pdev->msix_cap) {
+               bp->flags |= BNXT_FLAG_MSIX_CAP;
+               if (BNXT_PF(bp))
+                       bp->flags |= BNXT_FLAG_RFS;
+       }
+
+       rc = bnxt_init_board(pdev, dev);
+       if (rc < 0)
+               goto init_err_free;
+
+       dev->netdev_ops = &bnxt_netdev_ops;
+       dev->watchdog_timeo = BNXT_TX_TIMEOUT;
+       dev->ethtool_ops = &bnxt_ethtool_ops;
+
+       pci_set_drvdata(pdev, dev);
+
+       dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+                          NETIF_F_TSO | NETIF_F_TSO6 |
+                          NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+                          NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
+                          NETIF_F_RXHASH |
+                          NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
+
+       if (bp->flags & BNXT_FLAG_RFS)
+               dev->hw_features |= NETIF_F_NTUPLE;
+
+       dev->hw_enc_features =
+                       NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+                       NETIF_F_TSO | NETIF_F_TSO6 |
+                       NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+                       NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
+       dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
+       dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
+                           NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
+       dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
+       dev->priv_flags |= IFF_UNICAST_FLT;
+
+#ifdef CONFIG_BNXT_SRIOV
+       init_waitqueue_head(&bp->sriov_cfg_wait);
+#endif
+       rc = bnxt_alloc_hwrm_resources(bp);
+       if (rc)
+               goto init_err;
+
+       mutex_init(&bp->hwrm_cmd_lock);
+       bnxt_hwrm_ver_get(bp);
+
+       rc = bnxt_hwrm_func_drv_rgtr(bp);
+       if (rc)
+               goto init_err;
+
+       /* Get the MAX capabilities for this function */
+       rc = bnxt_hwrm_func_qcaps(bp);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
+                          rc);
+               rc = -1;
+               goto init_err;
+       }
+
+       rc = bnxt_hwrm_queue_qportcfg(bp);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
+                          rc);
+               rc = -1;
+               goto init_err;
+       }
+
+       bnxt_set_tpa_flags(bp);
+       bnxt_set_ring_params(bp);
+       dflt_rings = netif_get_num_default_rss_queues();
+       if (BNXT_PF(bp)) {
+               memcpy(dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
+               bp->pf.max_irqs = max_irqs;
+       } else {
+#if defined(CONFIG_BNXT_SRIOV)
+               memcpy(dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+               bp->vf.max_irqs = max_irqs;
+#endif
+       }
+       bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+       bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
+       bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
+       bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+       bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
+       bp->num_stat_ctxs = bp->cp_nr_rings;
+
+       if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
+               bp->flags |= BNXT_FLAG_STRIP_VLAN;
+
+       rc = bnxt_probe_phy(bp);
+       if (rc)
+               goto init_err;
+
+       rc = register_netdev(dev);
+       if (rc)
+               goto init_err;
+
+       netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
+                   board_info[ent->driver_data].name,
+                   (long)pci_resource_start(pdev, 0), dev->dev_addr);
+
+       return 0;
+
+init_err:
+       pci_iounmap(pdev, bp->bar0);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+
+init_err_free:
+       free_netdev(dev);
+       return rc;
+}
+
+static struct pci_driver bnxt_pci_driver = {
+       .name           = DRV_MODULE_NAME,
+       .id_table       = bnxt_pci_tbl,
+       .probe          = bnxt_init_one,
+       .remove         = bnxt_remove_one,
+#if defined(CONFIG_BNXT_SRIOV)
+       .sriov_configure = bnxt_sriov_configure,
+#endif
+};
+
+module_pci_driver(bnxt_pci_driver);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
new file mode 100644 (file)
index 0000000..4f2267c
--- /dev/null
@@ -0,0 +1,1086 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_H
+#define BNXT_H
+
+#define DRV_MODULE_NAME                "bnxt_en"
+#define DRV_MODULE_VERSION     "0.1.24"
+
+#define DRV_VER_MAJ    0
+#define DRV_VER_MIN    1
+#define DRV_VER_UPD    24
+
+struct tx_bd {
+       __le32 tx_bd_len_flags_type;
+       #define TX_BD_TYPE                                      (0x3f << 0)
+        #define TX_BD_TYPE_SHORT_TX_BD                          (0x00 << 0)
+        #define TX_BD_TYPE_LONG_TX_BD                           (0x10 << 0)
+       #define TX_BD_FLAGS_PACKET_END                          (1 << 6)
+       #define TX_BD_FLAGS_NO_CMPL                             (1 << 7)
+       #define TX_BD_FLAGS_BD_CNT                              (0x1f << 8)
+        #define TX_BD_FLAGS_BD_CNT_SHIFT                        8
+       #define TX_BD_FLAGS_LHINT                               (3 << 13)
+        #define TX_BD_FLAGS_LHINT_SHIFT                         13
+        #define TX_BD_FLAGS_LHINT_512_AND_SMALLER               (0 << 13)
+        #define TX_BD_FLAGS_LHINT_512_TO_1023                   (1 << 13)
+        #define TX_BD_FLAGS_LHINT_1024_TO_2047                  (2 << 13)
+        #define TX_BD_FLAGS_LHINT_2048_AND_LARGER               (3 << 13)
+       #define TX_BD_FLAGS_COAL_NOW                            (1 << 15)
+       #define TX_BD_LEN                                       (0xffff << 16)
+        #define TX_BD_LEN_SHIFT                                 16
+
+       u32 tx_bd_opaque;
+       __le64 tx_bd_haddr;
+} __packed;
+
+struct tx_bd_ext {
+       __le32 tx_bd_hsize_lflags;
+       #define TX_BD_FLAGS_TCP_UDP_CHKSUM                      (1 << 0)
+       #define TX_BD_FLAGS_IP_CKSUM                            (1 << 1)
+       #define TX_BD_FLAGS_NO_CRC                              (1 << 2)
+       #define TX_BD_FLAGS_STAMP                               (1 << 3)
+       #define TX_BD_FLAGS_T_IP_CHKSUM                         (1 << 4)
+       #define TX_BD_FLAGS_LSO                                 (1 << 5)
+       #define TX_BD_FLAGS_IPID_FMT                            (1 << 6)
+       #define TX_BD_FLAGS_T_IPID                              (1 << 7)
+       #define TX_BD_HSIZE                                     (0xff << 16)
+        #define TX_BD_HSIZE_SHIFT                               16
+
+       __le32 tx_bd_mss;
+       __le32 tx_bd_cfa_action;
+       #define TX_BD_CFA_ACTION                                (0xffff << 16)
+        #define TX_BD_CFA_ACTION_SHIFT                          16
+
+       __le32 tx_bd_cfa_meta;
+       #define TX_BD_CFA_META_MASK                             0xfffffff
+       #define TX_BD_CFA_META_VID_MASK                         0xfff
+       #define TX_BD_CFA_META_PRI_MASK                         (0xf << 12)
+        #define TX_BD_CFA_META_PRI_SHIFT                        12
+       #define TX_BD_CFA_META_TPID_MASK                        (3 << 16)
+        #define TX_BD_CFA_META_TPID_SHIFT                       16
+       #define TX_BD_CFA_META_KEY                              (0xf << 28)
+        #define TX_BD_CFA_META_KEY_SHIFT                        28
+       #define TX_BD_CFA_META_KEY_VLAN                         (1 << 28)
+};
+
+struct rx_bd {
+       __le32 rx_bd_len_flags_type;
+       #define RX_BD_TYPE                                      (0x3f << 0)
+        #define RX_BD_TYPE_RX_PACKET_BD                         0x4
+        #define RX_BD_TYPE_RX_BUFFER_BD                         0x5
+        #define RX_BD_TYPE_RX_AGG_BD                            0x6
+        #define RX_BD_TYPE_16B_BD_SIZE                          (0 << 4)
+        #define RX_BD_TYPE_32B_BD_SIZE                          (1 << 4)
+        #define RX_BD_TYPE_48B_BD_SIZE                          (2 << 4)
+        #define RX_BD_TYPE_64B_BD_SIZE                          (3 << 4)
+       #define RX_BD_FLAGS_SOP                                 (1 << 6)
+       #define RX_BD_FLAGS_EOP                                 (1 << 7)
+       #define RX_BD_FLAGS_BUFFERS                             (3 << 8)
+        #define RX_BD_FLAGS_1_BUFFER_PACKET                     (0 << 8)
+        #define RX_BD_FLAGS_2_BUFFER_PACKET                     (1 << 8)
+        #define RX_BD_FLAGS_3_BUFFER_PACKET                     (2 << 8)
+        #define RX_BD_FLAGS_4_BUFFER_PACKET                     (3 << 8)
+       #define RX_BD_LEN                                       (0xffff << 16)
+        #define RX_BD_LEN_SHIFT                                 16
+
+       u32 rx_bd_opaque;
+       __le64 rx_bd_haddr;
+};
+
+struct tx_cmp {
+       __le32 tx_cmp_flags_type;
+       #define CMP_TYPE                                        (0x3f << 0)
+        #define CMP_TYPE_TX_L2_CMP                              0
+        #define CMP_TYPE_RX_L2_CMP                              17
+        #define CMP_TYPE_RX_AGG_CMP                             18
+        #define CMP_TYPE_RX_L2_TPA_START_CMP                    19
+        #define CMP_TYPE_RX_L2_TPA_END_CMP                      21
+        #define CMP_TYPE_STATUS_CMP                             32
+        #define CMP_TYPE_REMOTE_DRIVER_REQ                      34
+        #define CMP_TYPE_REMOTE_DRIVER_RESP                     36
+        #define CMP_TYPE_ERROR_STATUS                           48
+        #define CMPL_BASE_TYPE_STAT_EJECT                       (0x1aUL << 0)
+        #define CMPL_BASE_TYPE_HWRM_DONE                        (0x20UL << 0)
+        #define CMPL_BASE_TYPE_HWRM_FWD_REQ                     (0x22UL << 0)
+        #define CMPL_BASE_TYPE_HWRM_FWD_RESP                    (0x24UL << 0)
+        #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT                 (0x2eUL << 0)
+
+       #define TX_CMP_FLAGS_ERROR                              (1 << 6)
+       #define TX_CMP_FLAGS_PUSH                               (1 << 7)
+
+       u32 tx_cmp_opaque;
+       __le32 tx_cmp_errors_v;
+       #define TX_CMP_V                                        (1 << 0)
+       #define TX_CMP_ERRORS_BUFFER_ERROR                      (7 << 1)
+        #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR             0
+        #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT           2
+        #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG         4
+        #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS          5
+        #define TX_CMP_ERRORS_ZERO_LENGTH_PKT                   (1 << 4)
+        #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN                  (1 << 5)
+        #define TX_CMP_ERRORS_DMA_ERROR                         (1 << 6)
+        #define TX_CMP_ERRORS_HINT_TOO_SHORT                    (1 << 7)
+
+       __le32 tx_cmp_unsed_3;
+};
+
+struct rx_cmp {
+       __le32 rx_cmp_len_flags_type;
+       #define RX_CMP_CMP_TYPE                                 (0x3f << 0)
+       #define RX_CMP_FLAGS_ERROR                              (1 << 6)
+       #define RX_CMP_FLAGS_PLACEMENT                          (7 << 7)
+       #define RX_CMP_FLAGS_RSS_VALID                          (1 << 10)
+       #define RX_CMP_FLAGS_UNUSED                             (1 << 11)
+        #define RX_CMP_FLAGS_ITYPES_SHIFT                       12
+        #define RX_CMP_FLAGS_ITYPE_UNKNOWN                      (0 << 12)
+        #define RX_CMP_FLAGS_ITYPE_IP                           (1 << 12)
+        #define RX_CMP_FLAGS_ITYPE_TCP                          (2 << 12)
+        #define RX_CMP_FLAGS_ITYPE_UDP                          (3 << 12)
+        #define RX_CMP_FLAGS_ITYPE_FCOE                         (4 << 12)
+        #define RX_CMP_FLAGS_ITYPE_ROCE                         (5 << 12)
+        #define RX_CMP_FLAGS_ITYPE_PTP_WO_TS                    (8 << 12)
+        #define RX_CMP_FLAGS_ITYPE_PTP_W_TS                     (9 << 12)
+       #define RX_CMP_LEN                                      (0xffff << 16)
+        #define RX_CMP_LEN_SHIFT                                16
+
+       u32 rx_cmp_opaque;
+       __le32 rx_cmp_misc_v1;
+       #define RX_CMP_V1                                       (1 << 0)
+       #define RX_CMP_AGG_BUFS                                 (0x1f << 1)
+        #define RX_CMP_AGG_BUFS_SHIFT                           1
+       #define RX_CMP_RSS_HASH_TYPE                            (0x7f << 9)
+        #define RX_CMP_RSS_HASH_TYPE_SHIFT                      9
+       #define RX_CMP_PAYLOAD_OFFSET                           (0xff << 16)
+        #define RX_CMP_PAYLOAD_OFFSET_SHIFT                     16
+
+       __le32 rx_cmp_rss_hash;
+};
+
+#define RX_CMP_HASH_VALID(rxcmp)                               \
+       ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
+
+#define RX_CMP_HASH_TYPE(rxcmp)                                        \
+       ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
+        RX_CMP_RSS_HASH_TYPE_SHIFT)
+
+struct rx_cmp_ext {
+       __le32 rx_cmp_flags2;
+       #define RX_CMP_FLAGS2_IP_CS_CALC                        0x1
+       #define RX_CMP_FLAGS2_L4_CS_CALC                        (0x1 << 1)
+       #define RX_CMP_FLAGS2_T_IP_CS_CALC                      (0x1 << 2)
+       #define RX_CMP_FLAGS2_T_L4_CS_CALC                      (0x1 << 3)
+       #define RX_CMP_FLAGS2_META_FORMAT_VLAN                  (0x1 << 4)
+       __le32 rx_cmp_meta_data;
+       #define RX_CMP_FLAGS2_METADATA_VID_MASK                 0xfff
+       #define RX_CMP_FLAGS2_METADATA_TPID_MASK                0xffff0000
+        #define RX_CMP_FLAGS2_METADATA_TPID_SFT                 16
+       __le32 rx_cmp_cfa_code_errors_v2;
+       #define RX_CMP_V                                        (1 << 0)
+       #define RX_CMPL_ERRORS_MASK                             (0x7fff << 1)
+        #define RX_CMPL_ERRORS_SFT                              1
+       #define RX_CMPL_ERRORS_BUFFER_ERROR_MASK                (0x7 << 1)
+        #define RX_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER           (0x0 << 1)
+        #define RX_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT         (0x1 << 1)
+        #define RX_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP         (0x2 << 1)
+        #define RX_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT          (0x3 << 1)
+       #define RX_CMPL_ERRORS_IP_CS_ERROR                      (0x1 << 4)
+       #define RX_CMPL_ERRORS_L4_CS_ERROR                      (0x1 << 5)
+       #define RX_CMPL_ERRORS_T_IP_CS_ERROR                    (0x1 << 6)
+       #define RX_CMPL_ERRORS_T_L4_CS_ERROR                    (0x1 << 7)
+       #define RX_CMPL_ERRORS_CRC_ERROR                        (0x1 << 8)
+       #define RX_CMPL_ERRORS_T_PKT_ERROR_MASK                 (0x7 << 9)
+        #define RX_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR             (0x0 << 9)
+        #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION     (0x1 << 9)
+        #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN     (0x2 << 9)
+        #define RX_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR   (0x3 << 9)
+        #define RX_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR     (0x4 << 9)
+        #define RX_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR    (0x5 << 9)
+        #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL         (0x6 << 9)
+       #define RX_CMPL_ERRORS_PKT_ERROR_MASK                   (0xf << 12)
+        #define RX_CMPL_ERRORS_PKT_ERROR_NO_ERROR               (0x0 << 12)
+        #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION         (0x1 << 12)
+        #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN         (0x2 << 12)
+        #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL             (0x3 << 12)
+        #define RX_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR         (0x4 << 12)
+        #define RX_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR        (0x5 << 12)
+        #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN         (0x6 << 12)
+        #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7 << 12)
+        #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN         (0x8 << 12)
+
+       #define RX_CMPL_CFA_CODE_MASK                           (0xffff << 16)
+        #define RX_CMPL_CFA_CODE_SFT                            16
+
+       __le32 rx_cmp_unused3;
+};
+
+#define RX_CMP_L2_ERRORS                                               \
+       cpu_to_le32(RX_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_CMPL_ERRORS_CRC_ERROR)
+
+#define RX_CMP_L4_CS_BITS                                              \
+       (cpu_to_le32(RX_CMP_FLAGS2_L4_CS_CALC | RX_CMP_FLAGS2_T_L4_CS_CALC))
+
+#define RX_CMP_L4_CS_ERR_BITS                                          \
+       (cpu_to_le32(RX_CMPL_ERRORS_L4_CS_ERROR | RX_CMPL_ERRORS_T_L4_CS_ERROR))
+
+#define RX_CMP_L4_CS_OK(rxcmp1)                                                \
+           (((rxcmp1)->rx_cmp_flags2 & RX_CMP_L4_CS_BITS) &&           \
+            !((rxcmp1)->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS))
+
+#define RX_CMP_ENCAP(rxcmp1)                                           \
+           ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) &                    \
+            RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3)
+
+struct rx_agg_cmp {
+       __le32 rx_agg_cmp_len_flags_type;
+       #define RX_AGG_CMP_TYPE                                 (0x3f << 0)
+       #define RX_AGG_CMP_LEN                                  (0xffff << 16)
+        #define RX_AGG_CMP_LEN_SHIFT                            16
+       u32 rx_agg_cmp_opaque;
+       __le32 rx_agg_cmp_v;
+       #define RX_AGG_CMP_V                                    (1 << 0)
+       __le32 rx_agg_cmp_unused;
+};
+
+struct rx_tpa_start_cmp {
+       __le32 rx_tpa_start_cmp_len_flags_type;
+       #define RX_TPA_START_CMP_TYPE                           (0x3f << 0)
+       #define RX_TPA_START_CMP_FLAGS                          (0x3ff << 6)
+        #define RX_TPA_START_CMP_FLAGS_SHIFT                    6
+       #define RX_TPA_START_CMP_FLAGS_PLACEMENT                (0x7 << 7)
+        #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT          7
+        #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO          (0x1 << 7)
+        #define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS            (0x2 << 7)
+        #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO      (0x5 << 7)
+        #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS        (0x6 << 7)
+       #define RX_TPA_START_CMP_FLAGS_RSS_VALID                (0x1 << 10)
+       #define RX_TPA_START_CMP_FLAGS_ITYPES                   (0xf << 12)
+        #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT             12
+        #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP                (0x2 << 12)
+       #define RX_TPA_START_CMP_LEN                            (0xffff << 16)
+        #define RX_TPA_START_CMP_LEN_SHIFT                      16
+
+       u32 rx_tpa_start_cmp_opaque;
+       __le32 rx_tpa_start_cmp_misc_v1;
+       #define RX_TPA_START_CMP_V1                             (0x1 << 0)
+       #define RX_TPA_START_CMP_RSS_HASH_TYPE                  (0x7f << 9)
+        #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT            9
+       #define RX_TPA_START_CMP_AGG_ID                         (0x7f << 25)
+        #define RX_TPA_START_CMP_AGG_ID_SHIFT                   25
+
+       __le32 rx_tpa_start_cmp_rss_hash;
+};
+
+#define TPA_START_HASH_VALID(rx_tpa_start)                             \
+       ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type &              \
+        cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID))
+
+#define TPA_START_HASH_TYPE(rx_tpa_start)                              \
+       ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &       \
+         RX_TPA_START_CMP_RSS_HASH_TYPE) >>                            \
+        RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT)
+
+#define TPA_START_AGG_ID(rx_tpa_start)                                 \
+       ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &       \
+        RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
+
+struct rx_tpa_start_cmp_ext {
+       __le32 rx_tpa_start_cmp_flags2;
+       #define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC              (0x1 << 0)
+       #define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC              (0x1 << 1)
+       #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC            (0x1 << 2)
+       #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC            (0x1 << 3)
+
+       __le32 rx_tpa_start_cmp_metadata;
+       __le32 rx_tpa_start_cmp_cfa_code_v2;
+       #define RX_TPA_START_CMP_V2                             (0x1 << 0)
+       #define RX_TPA_START_CMP_CFA_CODE                       (0xffff << 16)
+        #define RX_TPA_START_CMPL_CFA_CODE_SHIFT                16
+       __le32 rx_tpa_start_cmp_unused5;
+};
+
+struct rx_tpa_end_cmp {
+       __le32 rx_tpa_end_cmp_len_flags_type;
+       #define RX_TPA_END_CMP_TYPE                             (0x3f << 0)
+       #define RX_TPA_END_CMP_FLAGS                            (0x3ff << 6)
+        #define RX_TPA_END_CMP_FLAGS_SHIFT                      6
+       #define RX_TPA_END_CMP_FLAGS_PLACEMENT                  (0x7 << 7)
+        #define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT            7
+        #define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO            (0x1 << 7)
+        #define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS              (0x2 << 7)
+        #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO        (0x5 << 7)
+        #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS          (0x6 << 7)
+       #define RX_TPA_END_CMP_FLAGS_RSS_VALID                  (0x1 << 10)
+       #define RX_TPA_END_CMP_FLAGS_ITYPES                     (0xf << 12)
+        #define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT               12
+        #define RX_TPA_END_CMP_FLAGS_ITYPE_TCP                  (0x2 << 12)
+       #define RX_TPA_END_CMP_LEN                              (0xffff << 16)
+        #define RX_TPA_END_CMP_LEN_SHIFT                        16
+
+       u32 rx_tpa_end_cmp_opaque;
+       __le32 rx_tpa_end_cmp_misc_v1;
+       #define RX_TPA_END_CMP_V1                               (0x1 << 0)
+       #define RX_TPA_END_CMP_AGG_BUFS                         (0x3f << 1)
+        #define RX_TPA_END_CMP_AGG_BUFS_SHIFT                   1
+       #define RX_TPA_END_CMP_TPA_SEGS                         (0xff << 8)
+        #define RX_TPA_END_CMP_TPA_SEGS_SHIFT                   8
+       #define RX_TPA_END_CMP_PAYLOAD_OFFSET                   (0xff << 16)
+        #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT             16
+       #define RX_TPA_END_CMP_AGG_ID                           (0x7f << 25)
+        #define RX_TPA_END_CMP_AGG_ID_SHIFT                     25
+
+       __le32 rx_tpa_end_cmp_tsdelta;
+       #define RX_TPA_END_GRO_TS                               (0x1 << 31)
+};
+
+#define TPA_END_AGG_ID(rx_tpa_end)                                     \
+       ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &           \
+        RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
+
+#define TPA_END_TPA_SEGS(rx_tpa_end)                                   \
+       ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &           \
+        RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
+
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO                         \
+       cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO &          \
+                   RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS)
+
+#define TPA_END_GRO(rx_tpa_end)                                                \
+       ((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type &                  \
+        RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
+
+#define TPA_END_GRO_TS(rx_tpa_end)                                     \
+       ((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & cpu_to_le32(RX_TPA_END_GRO_TS))
+
+struct rx_tpa_end_cmp_ext {
+       __le32 rx_tpa_end_cmp_dup_acks;
+       #define RX_TPA_END_CMP_TPA_DUP_ACKS                     (0xf << 0)
+
+       __le32 rx_tpa_end_cmp_seg_len;
+       #define RX_TPA_END_CMP_TPA_SEG_LEN                      (0xffff << 0)
+
+       __le32 rx_tpa_end_cmp_errors_v2;
+       #define RX_TPA_END_CMP_V2                               (0x1 << 0)
+       #define RX_TPA_END_CMP_ERRORS                           (0x7fff << 1)
+       #define RX_TPA_END_CMPL_ERRORS_SHIFT                     1
+
+       u32 rx_tpa_end_cmp_start_opaque;
+};
+
+#define DB_IDX_MASK                                            0xffffff
+#define DB_IDX_VALID                                           (0x1 << 26)
+#define DB_IRQ_DIS                                             (0x1 << 27)
+#define DB_KEY_TX                                              (0x0 << 28)
+#define DB_KEY_RX                                              (0x1 << 28)
+#define DB_KEY_CP                                              (0x2 << 28)
+#define DB_KEY_ST                                              (0x3 << 28)
+#define DB_KEY_TX_PUSH                                         (0x4 << 28)
+#define DB_LONG_TX_PUSH                                                (0x2 << 24)
+
+#define INVALID_HW_RING_ID     ((u16)-1)
+
+#define BNXT_RSS_HASH_TYPE_FLAG_IPV4           0x01
+#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4       0x02
+#define BNXT_RSS_HASH_TYPE_FLAG_IPV6           0x04
+#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6       0x08
+
+/* The hardware supports certain page sizes.  Use the supported page sizes
+ * to allocate the rings.
+ */
+#if (PAGE_SHIFT < 12)
+#define BNXT_PAGE_SHIFT        12
+#elif (PAGE_SHIFT <= 13)
+#define BNXT_PAGE_SHIFT        PAGE_SHIFT
+#elif (PAGE_SHIFT < 16)
+#define BNXT_PAGE_SHIFT        13
+#else
+#define BNXT_PAGE_SHIFT        16
+#endif
+
+#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
+
+#define BNXT_MIN_PKT_SIZE      45
+
+#define BNXT_NUM_TESTS(bp)     0
+
+#define BNXT_DEFAULT_RX_RING_SIZE      1023
+#define BNXT_DEFAULT_TX_RING_SIZE      512
+
+#define MAX_TPA                64
+
+#define MAX_RX_PAGES   8
+#define MAX_RX_AGG_PAGES       32
+#define MAX_TX_PAGES   8
+#define MAX_CP_PAGES   64
+
+#define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
+#define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd))
+#define CP_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_cmp))
+
+#define SW_RXBD_RING_SIZE (sizeof(struct bnxt_sw_rx_bd) * RX_DESC_CNT)
+#define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
+
+#define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnxt_sw_rx_agg_bd) * RX_DESC_CNT)
+
+#define SW_TXBD_RING_SIZE (sizeof(struct bnxt_sw_tx_bd) * TX_DESC_CNT)
+#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
+
+#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
+
+#define BNXT_MAX_RX_DESC_CNT           (RX_DESC_CNT * MAX_RX_PAGES - 1)
+#define BNXT_MAX_RX_JUM_DESC_CNT       (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
+#define BNXT_MAX_TX_DESC_CNT           (TX_DESC_CNT * MAX_TX_PAGES - 1)
+
+#define RX_RING(x)     (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define RX_IDX(x)      ((x) & (RX_DESC_CNT - 1))
+
+#define TX_RING(x)     (((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define TX_IDX(x)      ((x) & (TX_DESC_CNT - 1))
+
+#define CP_RING(x)     (((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define CP_IDX(x)      ((x) & (CP_DESC_CNT - 1))
+
+#define TX_CMP_VALID(txcmp, raw_cons)                                  \
+       (!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_V)) ==        \
+        !((raw_cons) & bp->cp_bit))
+
+#define RX_CMP_VALID(rxcmp1, raw_cons)                                 \
+       (!!((rxcmp1)->rx_cmp_cfa_code_errors_v2 & cpu_to_le32(RX_CMP_V)) ==\
+        !((raw_cons) & bp->cp_bit))
+
+#define RX_AGG_CMP_VALID(agg, raw_cons)                                \
+       (!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) == \
+        !((raw_cons) & bp->cp_bit))
+
+#define TX_CMP_TYPE(txcmp)                                     \
+       (le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE)
+
+#define RX_CMP_TYPE(rxcmp)                                     \
+       (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
+
+#define NEXT_RX(idx)           (((idx) + 1) & bp->rx_ring_mask)
+
+#define NEXT_RX_AGG(idx)       (((idx) + 1) & bp->rx_agg_ring_mask)
+
+#define NEXT_TX(idx)           (((idx) + 1) & bp->tx_ring_mask)
+
+#define ADV_RAW_CMP(idx, n)    ((idx) + (n))
+#define NEXT_RAW_CMP(idx)      ADV_RAW_CMP(idx, 1)
+#define RING_CMP(idx)          ((idx) & bp->cp_ring_mask)
+#define NEXT_CMP(idx)          RING_CMP(ADV_RAW_CMP(idx, 1))
+
+#define HWRM_CMD_TIMEOUT               500
+#define HWRM_RESET_TIMEOUT             ((HWRM_CMD_TIMEOUT) * 4)
+#define HWRM_RESP_ERR_CODE_MASK                0xffff
+#define HWRM_RESP_LEN_MASK             0xffff0000
+#define HWRM_RESP_LEN_SFT              16
+#define HWRM_RESP_VALID_MASK           0xff000000
+#define BNXT_HWRM_REQ_MAX_SIZE         128
+#define BNXT_HWRM_REQS_PER_PAGE                (BNXT_PAGE_SIZE /       \
+                                        BNXT_HWRM_REQ_MAX_SIZE)
+
+struct bnxt_sw_tx_bd {
+       struct sk_buff          *skb;
+       DEFINE_DMA_UNMAP_ADDR(mapping);
+       u8                      is_gso;
+       u8                      is_push;
+       unsigned short          nr_frags;
+};
+
+struct bnxt_sw_rx_bd {
+       u8                      *data;
+       DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct bnxt_sw_rx_agg_bd {
+       struct page             *page;
+       dma_addr_t              mapping;
+};
+
+struct bnxt_ring_struct {
+       int                     nr_pages;
+       int                     page_size;
+       void                    **pg_arr;
+       dma_addr_t              *dma_arr;
+
+       __le64                  *pg_tbl;
+       dma_addr_t              pg_tbl_map;
+
+       int                     vmem_size;
+       void                    **vmem;
+
+       u16                     fw_ring_id; /* Ring id filled by Chimp FW */
+       u8                      queue_id;
+};
+
+struct tx_push_bd {
+       __le32                  doorbell;
+       struct tx_bd            txbd1;
+       struct tx_bd_ext        txbd2;
+};
+
+struct bnxt_tx_ring_info {
+       u16                     tx_prod;
+       u16                     tx_cons;
+       void __iomem            *tx_doorbell;
+
+       struct tx_bd            *tx_desc_ring[MAX_TX_PAGES];
+       struct bnxt_sw_tx_bd    *tx_buf_ring;
+
+       dma_addr_t              tx_desc_mapping[MAX_TX_PAGES];
+
+       struct tx_push_bd       *tx_push;
+       dma_addr_t              tx_push_mapping;
+
+#define BNXT_DEV_STATE_CLOSING 0x1
+       u32                     dev_state;
+
+       struct bnxt_ring_struct tx_ring_struct;
+};
+
+struct bnxt_tpa_info {
+       u8                      *data;
+       dma_addr_t              mapping;
+       u16                     len;
+       unsigned short          gso_type;
+       u32                     flags2;
+       u32                     metadata;
+       enum pkt_hash_types     hash_type;
+       u32                     rss_hash;
+};
+
+struct bnxt_rx_ring_info {
+       u16                     rx_prod;
+       u16                     rx_agg_prod;
+       u16                     rx_sw_agg_prod;
+       void __iomem            *rx_doorbell;
+       void __iomem            *rx_agg_doorbell;
+
+       struct rx_bd            *rx_desc_ring[MAX_RX_PAGES];
+       struct bnxt_sw_rx_bd    *rx_buf_ring;
+
+       struct rx_bd            *rx_agg_desc_ring[MAX_RX_AGG_PAGES];
+       struct bnxt_sw_rx_agg_bd        *rx_agg_ring;
+
+       unsigned long           *rx_agg_bmap;
+       u16                     rx_agg_bmap_size;
+
+       dma_addr_t              rx_desc_mapping[MAX_RX_PAGES];
+       dma_addr_t              rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
+
+       struct bnxt_tpa_info    *rx_tpa;
+
+       struct bnxt_ring_struct rx_ring_struct;
+       struct bnxt_ring_struct rx_agg_ring_struct;
+};
+
+struct bnxt_cp_ring_info {
+       u32                     cp_raw_cons;
+       void __iomem            *cp_doorbell;
+
+       struct tx_cmp           *cp_desc_ring[MAX_CP_PAGES];
+
+       dma_addr_t              cp_desc_mapping[MAX_CP_PAGES];
+
+       struct ctx_hw_stats     *hw_stats;
+       dma_addr_t              hw_stats_map;
+       u32                     hw_stats_ctx_id;
+       u64                     rx_l4_csum_errors;
+
+       struct bnxt_ring_struct cp_ring_struct;
+};
+
+struct bnxt_napi {
+       struct napi_struct      napi;
+       struct bnxt             *bp;
+
+       int                     index;
+       struct bnxt_cp_ring_info        cp_ring;
+       struct bnxt_rx_ring_info        rx_ring;
+       struct bnxt_tx_ring_info        tx_ring;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       atomic_t                poll_state;
+#endif
+};
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+enum bnxt_poll_state_t {
+       BNXT_STATE_IDLE = 0,
+       BNXT_STATE_NAPI,
+       BNXT_STATE_POLL,
+       BNXT_STATE_DISABLE,
+};
+#endif
+
+struct bnxt_irq {
+       irq_handler_t   handler;
+       unsigned int    vector;
+       u8              requested;
+       char            name[IFNAMSIZ + 2];
+};
+
+#define HWRM_RING_ALLOC_TX     0x1
+#define HWRM_RING_ALLOC_RX     0x2
+#define HWRM_RING_ALLOC_AGG    0x4
+#define HWRM_RING_ALLOC_CMPL   0x8
+
+#define INVALID_STATS_CTX_ID   -1
+
+struct hwrm_cmd_req_hdr {
+#define HWRM_CMPL_RING_MASK    0xffff0000
+#define HWRM_CMPL_RING_SFT     16
+       __le32  cmpl_ring_req_type;
+#define HWRM_SEQ_ID_MASK       0xffff
+#define HWRM_SEQ_ID_INVALID -1
+#define HWRM_RESP_LEN_OFFSET   4
+#define HWRM_TARGET_FID_MASK   0xffff0000
+#define HWRM_TARGET_FID_SFT    16
+       __le32  target_id_seq_id;
+       __le64  resp_addr;
+};
+
+struct bnxt_ring_grp_info {
+       u16     fw_stats_ctx;
+       u16     fw_grp_id;
+       u16     rx_fw_ring_id;
+       u16     agg_fw_ring_id;
+       u16     cp_fw_ring_id;
+};
+
+struct bnxt_vnic_info {
+       u16             fw_vnic_id; /* returned by Chimp during alloc */
+       u16             fw_rss_cos_lb_ctx;
+       u16             fw_l2_ctx_id;
+#define BNXT_MAX_UC_ADDRS      4
+       __le64          fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
+                               /* index 0 always dev_addr */
+       u16             uc_filter_count;
+       u8              *uc_list;
+
+       u16             *fw_grp_ids;
+       u16             hash_type;
+       dma_addr_t      rss_table_dma_addr;
+       __le16          *rss_table;
+       dma_addr_t      rss_hash_key_dma_addr;
+       u64             *rss_hash_key;
+       u32             rx_mask;
+
+       u8              *mc_list;
+       int             mc_list_size;
+       int             mc_list_count;
+       dma_addr_t      mc_list_mapping;
+#define BNXT_MAX_MC_ADDRS      16
+
+       u32             flags;
+#define BNXT_VNIC_RSS_FLAG     1
+#define BNXT_VNIC_RFS_FLAG     2
+#define BNXT_VNIC_MCAST_FLAG   4
+#define BNXT_VNIC_UCAST_FLAG   8
+};
+
+#if defined(CONFIG_BNXT_SRIOV)
+struct bnxt_vf_info {
+       u16     fw_fid;
+       u8      mac_addr[ETH_ALEN];
+       u16     max_rsscos_ctxs;
+       u16     max_cp_rings;
+       u16     max_tx_rings;
+       u16     max_rx_rings;
+       u16     max_l2_ctxs;
+       u16     max_irqs;
+       u16     max_vnics;
+       u16     max_stat_ctxs;
+       u16     vlan;
+       u32     flags;
+#define BNXT_VF_QOS            0x1
+#define BNXT_VF_SPOOFCHK       0x2
+#define BNXT_VF_LINK_FORCED    0x4
+#define BNXT_VF_LINK_UP                0x8
+       u32     func_flags; /* func cfg flags */
+       u32     min_tx_rate;
+       u32     max_tx_rate;
+       void    *hwrm_cmd_req_addr;
+       dma_addr_t      hwrm_cmd_req_dma_addr;
+};
+#endif
+
+struct bnxt_pf_info {
+#define BNXT_FIRST_PF_FID      1
+#define BNXT_FIRST_VF_FID      128
+       u32     fw_fid;
+       u8      port_id;
+       u8      mac_addr[ETH_ALEN];
+       u16     max_rsscos_ctxs;
+       u16     max_cp_rings;
+       u16     max_tx_rings; /* HW assigned max tx rings for this PF */
+       u16     max_pf_tx_rings; /* runtime max tx rings owned by PF */
+       u16     max_rx_rings; /* HW assigned max rx rings for this PF */
+       u16     max_pf_rx_rings; /* runtime max rx rings owned by PF */
+       u16     max_irqs;
+       u16     max_l2_ctxs;
+       u16     max_vnics;
+       u16     max_stat_ctxs;
+       u32     first_vf_id;
+       u16     active_vfs;
+       u16     max_vfs;
+       u32     max_encap_records;
+       u32     max_decap_records;
+       u32     max_tx_em_flows;
+       u32     max_tx_wm_flows;
+       u32     max_rx_em_flows;
+       u32     max_rx_wm_flows;
+       unsigned long   *vf_event_bmap;
+       u16     hwrm_cmd_req_pages;
+       void                    *hwrm_cmd_req_addr[4];
+       dma_addr_t              hwrm_cmd_req_dma_addr[4];
+       struct bnxt_vf_info     *vf;
+};
+
+struct bnxt_ntuple_filter {
+       struct hlist_node       hash;
+       u8                      src_mac_addr[ETH_ALEN];
+       struct flow_keys        fkeys;
+       __le64                  filter_id;
+       u16                     sw_id;
+       u16                     rxq;
+       u32                     flow_id;
+       unsigned long           state;
+#define BNXT_FLTR_VALID                0
+#define BNXT_FLTR_UPDATE       1
+};
+
+#define BNXT_ALL_COPPER_ETHTOOL_SPEED                          \
+       (ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | \
+        ADVERTISED_10000baseT_Full)
+
+struct bnxt_link_info {
+       u8                      media_type;
+       u8                      transceiver;
+       u8                      phy_addr;
+       u8                      phy_link_status;
+#define BNXT_LINK_NO_LINK      PORT_PHY_QCFG_RESP_LINK_NO_LINK
+#define BNXT_LINK_SIGNAL       PORT_PHY_QCFG_RESP_LINK_SIGNAL
+#define BNXT_LINK_LINK         PORT_PHY_QCFG_RESP_LINK_LINK
+       u8                      wire_speed;
+       u8                      loop_back;
+       u8                      link_up;
+       u8                      duplex;
+#define BNXT_LINK_DUPLEX_HALF  PORT_PHY_QCFG_RESP_DUPLEX_HALF
+#define BNXT_LINK_DUPLEX_FULL  PORT_PHY_QCFG_RESP_DUPLEX_FULL
+       u8                      pause;
+#define BNXT_LINK_PAUSE_TX     PORT_PHY_QCFG_RESP_PAUSE_TX
+#define BNXT_LINK_PAUSE_RX     PORT_PHY_QCFG_RESP_PAUSE_RX
+#define BNXT_LINK_PAUSE_BOTH   (PORT_PHY_QCFG_RESP_PAUSE_RX | \
+                                PORT_PHY_QCFG_RESP_PAUSE_TX)
+       u8                      auto_pause_setting;
+       u8                      force_pause_setting;
+       u8                      duplex_setting;
+       u8                      auto_mode;
+#define BNXT_AUTO_MODE(mode)   ((mode) > BNXT_LINK_AUTO_NONE && \
+                                (mode) <= BNXT_LINK_AUTO_MSK)
+#define BNXT_LINK_AUTO_NONE     PORT_PHY_QCFG_RESP_AUTO_MODE_NONE
+#define BNXT_LINK_AUTO_ALLSPDS PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS
+#define BNXT_LINK_AUTO_ONESPD  PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED
+#define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW
+#define BNXT_LINK_AUTO_MSK     PORT_PHY_QCFG_RESP_AUTO_MODE_MASK
+#define PHY_VER_LEN            3
+       u8                      phy_ver[PHY_VER_LEN];
+       u16                     link_speed;
+#define BNXT_LINK_SPEED_100MB  PORT_PHY_QCFG_RESP_LINK_SPEED_100MB
+#define BNXT_LINK_SPEED_1GB    PORT_PHY_QCFG_RESP_LINK_SPEED_1GB
+#define BNXT_LINK_SPEED_2GB    PORT_PHY_QCFG_RESP_LINK_SPEED_2GB
+#define BNXT_LINK_SPEED_2_5GB  PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB
+#define BNXT_LINK_SPEED_10GB   PORT_PHY_QCFG_RESP_LINK_SPEED_10GB
+#define BNXT_LINK_SPEED_20GB   PORT_PHY_QCFG_RESP_LINK_SPEED_20GB
+#define BNXT_LINK_SPEED_25GB   PORT_PHY_QCFG_RESP_LINK_SPEED_25GB
+#define BNXT_LINK_SPEED_40GB   PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
+#define BNXT_LINK_SPEED_50GB   PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
+       u16                     support_speeds;
+       u16                     auto_link_speeds;
+#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
+#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
+#define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB
+#define BNXT_LINK_SPEED_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB
+#define BNXT_LINK_SPEED_MSK_2_5GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB
+#define BNXT_LINK_SPEED_MSK_20GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB
+#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
+#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
+#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
+       u16                     auto_link_speed;
+       u16                     force_link_speed;
+       u32                     preemphasis;
+
+       /* copy of requested setting from ethtool cmd */
+       u8                      autoneg;
+#define BNXT_AUTONEG_SPEED             1
+#define BNXT_AUTONEG_FLOW_CTRL         2
+       u8                      req_duplex;
+       u8                      req_flow_ctrl;
+       u16                     req_link_speed;
+       u32                     advertising;
+       bool                    force_link_chng;
+       /* a copy of phy_qcfg output used to report link
+        * info to VF
+        */
+       struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+};
+
+#define BNXT_MAX_QUEUE 8
+
+struct bnxt_queue_info {
+       u8      queue_id;
+       u8      queue_profile;
+};
+
+struct bnxt {
+       void __iomem            *bar0;
+       void __iomem            *bar1;
+       void __iomem            *bar2;
+
+       u32                     reg_base;
+
+       struct net_device       *dev;
+       struct pci_dev          *pdev;
+
+       atomic_t                intr_sem;
+
+       u32                     flags;
+       #define BNXT_FLAG_DCB_ENABLED   0x1
+       #define BNXT_FLAG_VF            0x2
+       #define BNXT_FLAG_LRO           0x4
+#ifdef CONFIG_INET
+       #define BNXT_FLAG_GRO           0x8
+#else
+       /* Cannot support hardware GRO if CONFIG_INET is not set */
+       #define BNXT_FLAG_GRO           0x0
+#endif
+       #define BNXT_FLAG_TPA           (BNXT_FLAG_LRO | BNXT_FLAG_GRO)
+       #define BNXT_FLAG_JUMBO         0x10
+       #define BNXT_FLAG_STRIP_VLAN    0x20
+       #define BNXT_FLAG_AGG_RINGS     (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
+                                        BNXT_FLAG_LRO)
+       #define BNXT_FLAG_USING_MSIX    0x40
+       #define BNXT_FLAG_MSIX_CAP      0x80
+       #define BNXT_FLAG_RFS           0x100
+       #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA |             \
+                                           BNXT_FLAG_RFS |             \
+                                           BNXT_FLAG_STRIP_VLAN)
+
+#define BNXT_PF(bp)            (!((bp)->flags & BNXT_FLAG_VF))
+#define BNXT_VF(bp)            ((bp)->flags & BNXT_FLAG_VF)
+
+       struct bnxt_napi        **bnapi;
+
+       u32                     rx_buf_size;
+       u32                     rx_buf_use_size;        /* useable size */
+       u32                     rx_ring_size;
+       u32                     rx_agg_ring_size;
+       u32                     rx_copy_thresh;
+       u32                     rx_ring_mask;
+       u32                     rx_agg_ring_mask;
+       int                     rx_nr_pages;
+       int                     rx_agg_nr_pages;
+       int                     rx_nr_rings;
+       int                     rsscos_nr_ctxs;
+
+       u32                     tx_ring_size;
+       u32                     tx_ring_mask;
+       int                     tx_nr_pages;
+       int                     tx_nr_rings;
+       int                     tx_nr_rings_per_tc;
+
+       int                     tx_wake_thresh;
+       int                     tx_push_thresh;
+       int                     tx_push_size;
+
+       u32                     cp_ring_size;
+       u32                     cp_ring_mask;
+       u32                     cp_bit;
+       int                     cp_nr_pages;
+       int                     cp_nr_rings;
+
+       int                     num_stat_ctxs;
+       struct bnxt_ring_grp_info       *grp_info;
+       struct bnxt_vnic_info   *vnic_info;
+       int                     nr_vnics;
+
+       u8                      max_tc;
+       struct bnxt_queue_info  q_info[BNXT_MAX_QUEUE];
+
+       unsigned int            current_interval;
+#define BNXT_TIMER_INTERVAL    (HZ / 2)
+
+       struct timer_list       timer;
+
+       int                     state;
+#define BNXT_STATE_CLOSED      0
+#define BNXT_STATE_OPEN                1
+
+       struct bnxt_irq *irq_tbl;
+       u8                      mac_addr[ETH_ALEN];
+
+       u32                     msg_enable;
+
+       u16                     hwrm_cmd_seq;
+       u32                     hwrm_intr_seq_id;
+       void                    *hwrm_cmd_resp_addr;
+       dma_addr_t              hwrm_cmd_resp_dma_addr;
+       void                    *hwrm_dbg_resp_addr;
+       dma_addr_t              hwrm_dbg_resp_dma_addr;
+#define HWRM_DBG_REG_BUF_SIZE  128
+       struct mutex            hwrm_cmd_lock;  /* serialize hwrm messages */
+       struct hwrm_ver_get_output      ver_resp;
+#define FW_VER_STR_LEN         32
+#define BC_HWRM_STR_LEN                21
+#define PHY_VER_STR_LEN         (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
+       char                    fw_ver_str[FW_VER_STR_LEN];
+       __be16                  vxlan_port;
+       u8                      vxlan_port_cnt;
+       __le16                  vxlan_fw_dst_port_id;
+       u8                      nge_port_cnt;
+       __le16                  nge_fw_dst_port_id;
+       u16                     coal_ticks;
+       u16                     coal_ticks_irq;
+       u16                     coal_bufs;
+       u16                     coal_bufs_irq;
+
+#define BNXT_USEC_TO_COAL_TIMER(x)     ((x) * 25 / 2)
+#define BNXT_COAL_TIMER_TO_USEC(x) ((x) * 2 / 25)
+
+       struct work_struct      sp_task;
+       unsigned long           sp_event;
+#define BNXT_RX_MASK_SP_EVENT          0
+#define BNXT_RX_NTP_FLTR_SP_EVENT      1
+#define BNXT_LINK_CHNG_SP_EVENT                2
+#define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT        4
+#define BNXT_VXLAN_ADD_PORT_SP_EVENT   8
+#define BNXT_VXLAN_DEL_PORT_SP_EVENT   16
+#define BNXT_RESET_TASK_SP_EVENT       32
+#define BNXT_RST_RING_SP_EVENT         64
+
+       struct bnxt_pf_info     pf;
+#ifdef CONFIG_BNXT_SRIOV
+       int                     nr_vfs;
+       struct bnxt_vf_info     vf;
+       wait_queue_head_t       sriov_cfg_wait;
+       bool                    sriov_cfg;
+#define BNXT_SRIOV_CFG_WAIT_TMO        msecs_to_jiffies(10000)
+#endif
+
+#define BNXT_NTP_FLTR_MAX_FLTR 4096
+#define BNXT_NTP_FLTR_HASH_SIZE        512
+#define BNXT_NTP_FLTR_HASH_MASK        (BNXT_NTP_FLTR_HASH_SIZE - 1)
+       struct hlist_head       ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE];
+       spinlock_t              ntp_fltr_lock;  /* for hash table add, del */
+
+       unsigned long           *ntp_fltr_bmap;
+       int                     ntp_fltr_count;
+
+       struct bnxt_link_info   link_info;
+};
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
+{
+       atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+/* called from the NAPI poll routine to get ownership of a bnapi */
+static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
+{
+       int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+                               BNXT_STATE_NAPI);
+
+       return rc == BNXT_STATE_IDLE;
+}
+
+static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
+{
+       atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+/* called from the busy poll routine to get ownership of a bnapi */
+static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
+{
+       int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+                               BNXT_STATE_POLL);
+
+       return rc == BNXT_STATE_IDLE;
+}
+
+static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
+{
+       atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
+{
+       return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL;
+}
+
+static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
+{
+       int old;
+
+       while (1) {
+               old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+                                    BNXT_STATE_DISABLE);
+               if (old == BNXT_STATE_IDLE)
+                       break;
+               usleep_range(500, 5000);
+       }
+}
+
+#else
+
+static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
+{
+       return true;
+}
+
+static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
+{
+       return false;
+}
+
+static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
+{
+       return false;
+}
+
+static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
+{
+}
+
+#endif
+
+void bnxt_set_ring_params(struct bnxt *);
+void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
+int _hwrm_send_message(struct bnxt *, void *, u32, int);
+int hwrm_send_message(struct bnxt *, void *, u32, int);
+int bnxt_hwrm_set_coal(struct bnxt *);
+int bnxt_hwrm_set_pause(struct bnxt *);
+int bnxt_hwrm_set_link_setting(struct bnxt *, bool);
+int bnxt_open_nic(struct bnxt *, bool, bool);
+int bnxt_close_nic(struct bnxt *, bool, bool);
+void bnxt_get_max_rings(struct bnxt *, int *, int *);
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
new file mode 100644 (file)
index 0000000..45bd628
--- /dev/null
@@ -0,0 +1,1149 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/firmware.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_ethtool.h"
+#include "bnxt_nvm_defs.h"     /* NVRAM content constant and structure defs */
+#include "bnxt_fw_hdr.h"       /* Firmware hdr constant and structure defs */
+#define FLASH_NVRAM_TIMEOUT    ((HWRM_CMD_TIMEOUT) * 100)
+
+static u32 bnxt_get_msglevel(struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       return bp->msg_enable;
+}
+
+static void bnxt_set_msglevel(struct net_device *dev, u32 value)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       bp->msg_enable = value;
+}
+
+static int bnxt_get_coalesce(struct net_device *dev,
+                            struct ethtool_coalesce *coal)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       memset(coal, 0, sizeof(*coal));
+
+       coal->rx_coalesce_usecs =
+               max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks), 1);
+       coal->rx_max_coalesced_frames = bp->coal_bufs / 2;
+       coal->rx_coalesce_usecs_irq =
+               max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks_irq), 1);
+       coal->rx_max_coalesced_frames_irq = bp->coal_bufs_irq / 2;
+
+       return 0;
+}
+
+static int bnxt_set_coalesce(struct net_device *dev,
+                            struct ethtool_coalesce *coal)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc = 0;
+
+       bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs);
+       bp->coal_bufs = coal->rx_max_coalesced_frames * 2;
+       bp->coal_ticks_irq =
+               BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs_irq);
+       bp->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
+
+       if (netif_running(dev))
+               rc = bnxt_hwrm_set_coal(bp);
+
+       return rc;
+}
+
+#define BNXT_NUM_STATS 21
+
+static int bnxt_get_sset_count(struct net_device *dev, int sset)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       switch (sset) {
+       case ETH_SS_STATS:
+               return BNXT_NUM_STATS * bp->cp_nr_rings;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void bnxt_get_ethtool_stats(struct net_device *dev,
+                                  struct ethtool_stats *stats, u64 *buf)
+{
+       u32 i, j = 0;
+       struct bnxt *bp = netdev_priv(dev);
+       u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings;
+       u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
+
+       memset(buf, 0, buf_size);
+
+       if (!bp->bnapi)
+               return;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+               __le64 *hw_stats = (__le64 *)cpr->hw_stats;
+               int k;
+
+               for (k = 0; k < stat_fields; j++, k++)
+                       buf[j] = le64_to_cpu(hw_stats[k]);
+               buf[j++] = cpr->rx_l4_csum_errors;
+       }
+}
+
+static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       u32 i;
+
+       switch (stringset) {
+       /* The number of strings must match BNXT_NUM_STATS defined above. */
+       case ETH_SS_STATS:
+               for (i = 0; i < bp->cp_nr_rings; i++) {
+                       sprintf(buf, "[%d]: rx_ucast_packets", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: rx_mcast_packets", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: rx_bcast_packets", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: rx_discards", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: rx_drops", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: rx_ucast_bytes", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: rx_mcast_bytes", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: rx_bcast_bytes", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tx_ucast_packets", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tx_mcast_packets", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tx_bcast_packets", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tx_discards", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tx_drops", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tx_ucast_bytes", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tx_mcast_bytes", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tx_bcast_bytes", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tpa_packets", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tpa_bytes", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tpa_events", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tpa_aborts", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: rx_l4_csum_errors", i);
+                       buf += ETH_GSTRING_LEN;
+               }
+               break;
+       default:
+               netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
+                          stringset);
+               break;
+       }
+}
+
+static void bnxt_get_ringparam(struct net_device *dev,
+                              struct ethtool_ringparam *ering)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
+       ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
+       ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
+
+       ering->rx_pending = bp->rx_ring_size;
+       ering->rx_jumbo_pending = bp->rx_agg_ring_size;
+       ering->tx_pending = bp->tx_ring_size;
+}
+
+static int bnxt_set_ringparam(struct net_device *dev,
+                             struct ethtool_ringparam *ering)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
+           (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
+           (ering->tx_pending <= MAX_SKB_FRAGS))
+               return -EINVAL;
+
+       if (netif_running(dev))
+               bnxt_close_nic(bp, false, false);
+
+       bp->rx_ring_size = ering->rx_pending;
+       bp->tx_ring_size = ering->tx_pending;
+       bnxt_set_ring_params(bp);
+
+       if (netif_running(dev))
+               return bnxt_open_nic(bp, false, false);
+
+       return 0;
+}
+
+static void bnxt_get_channels(struct net_device *dev,
+                             struct ethtool_channels *channel)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int max_rx_rings, max_tx_rings, tcs;
+
+       bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+       tcs = netdev_get_num_tc(dev);
+       if (tcs > 1)
+               max_tx_rings /= tcs;
+
+       channel->max_rx = max_rx_rings;
+       channel->max_tx = max_tx_rings;
+       channel->max_other = 0;
+       channel->max_combined = 0;
+       channel->rx_count = bp->rx_nr_rings;
+       channel->tx_count = bp->tx_nr_rings_per_tc;
+}
+
+static int bnxt_set_channels(struct net_device *dev,
+                            struct ethtool_channels *channel)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int max_rx_rings, max_tx_rings, tcs;
+       u32 rc = 0;
+
+       if (channel->other_count || channel->combined_count ||
+           !channel->rx_count || !channel->tx_count)
+               return -EINVAL;
+
+       bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+       tcs = netdev_get_num_tc(dev);
+       if (tcs > 1)
+               max_tx_rings /= tcs;
+
+       if (channel->rx_count > max_rx_rings ||
+           channel->tx_count > max_tx_rings)
+               return -EINVAL;
+
+       if (netif_running(dev)) {
+               if (BNXT_PF(bp)) {
+                       /* TODO CHIMP_FW: Send message to all VF's
+                        * before PF unload
+                        */
+               }
+               rc = bnxt_close_nic(bp, true, false);
+               if (rc) {
+                       netdev_err(bp->dev, "Set channel failure rc :%x\n",
+                                  rc);
+                       return rc;
+               }
+       }
+
+       bp->rx_nr_rings = channel->rx_count;
+       bp->tx_nr_rings_per_tc = channel->tx_count;
+       bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+       if (tcs > 1)
+               bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+       bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+       bp->num_stat_ctxs = bp->cp_nr_rings;
+
+       if (netif_running(dev)) {
+               rc = bnxt_open_nic(bp, true, false);
+               if ((!rc) && BNXT_PF(bp)) {
+                       /* TODO CHIMP_FW: Send message to all VF's
+                        * to renable
+                        */
+               }
+       }
+
+       return rc;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
+                           u32 *rule_locs)
+{
+       int i, j = 0;
+
+       cmd->data = bp->ntp_fltr_count;
+       for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+               struct hlist_head *head;
+               struct bnxt_ntuple_filter *fltr;
+
+               head = &bp->ntp_fltr_hash_tbl[i];
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(fltr, head, hash) {
+                       if (j == cmd->rule_cnt)
+                               break;
+                       rule_locs[j++] = fltr->sw_id;
+               }
+               rcu_read_unlock();
+               if (j == cmd->rule_cnt)
+                       break;
+       }
+       cmd->rule_cnt = j;
+       return 0;
+}
+
+static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+       struct ethtool_rx_flow_spec *fs =
+               (struct ethtool_rx_flow_spec *)&cmd->fs;
+       struct bnxt_ntuple_filter *fltr;
+       struct flow_keys *fkeys;
+       int i, rc = -EINVAL;
+
+       if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
+               return rc;
+
+       for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+               struct hlist_head *head;
+
+               head = &bp->ntp_fltr_hash_tbl[i];
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(fltr, head, hash) {
+                       if (fltr->sw_id == fs->location)
+                               goto fltr_found;
+               }
+               rcu_read_unlock();
+       }
+       return rc;
+
+fltr_found:
+       fkeys = &fltr->fkeys;
+       if (fkeys->basic.ip_proto == IPPROTO_TCP)
+               fs->flow_type = TCP_V4_FLOW;
+       else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+               fs->flow_type = UDP_V4_FLOW;
+       else
+               goto fltr_err;
+
+       fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+       fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
+
+       fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+       fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+
+       fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
+       fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+
+       fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
+       fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+
+       fs->ring_cookie = fltr->rxq;
+       rc = 0;
+
+fltr_err:
+       rcu_read_unlock();
+
+       return rc;
+}
+
+static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+                         u32 *rule_locs)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc = 0;
+
+       switch (cmd->cmd) {
+       case ETHTOOL_GRXRINGS:
+               cmd->data = bp->rx_nr_rings;
+               break;
+
+       case ETHTOOL_GRXCLSRLCNT:
+               cmd->rule_cnt = bp->ntp_fltr_count;
+               cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
+               break;
+
+       case ETHTOOL_GRXCLSRLALL:
+               rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
+               break;
+
+       case ETHTOOL_GRXCLSRULE:
+               rc = bnxt_grxclsrule(bp, cmd);
+               break;
+
+       default:
+               rc = -EOPNOTSUPP;
+               break;
+       }
+
+       return rc;
+}
+#endif
+
+static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
+{
+       return HW_HASH_INDEX_SIZE;
+}
+
+static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
+{
+       return HW_HASH_KEY_SIZE;
+}
+
+static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+                        u8 *hfunc)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+       int i = 0;
+
+       if (hfunc)
+               *hfunc = ETH_RSS_HASH_TOP;
+
+       if (indir)
+               for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
+                       indir[i] = le16_to_cpu(vnic->rss_table[i]);
+
+       if (key)
+               memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
+
+       return 0;
+}
+
+static void bnxt_get_drvinfo(struct net_device *dev,
+                            struct ethtool_drvinfo *info)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+       strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
+       strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+       info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
+       info->testinfo_len = BNXT_NUM_TESTS(bp);
+       /* TODO CHIMP_FW: eeprom dump details */
+       info->eedump_len = 0;
+       /* TODO CHIMP FW: reg dump details */
+       info->regdump_len = 0;
+}
+
+static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
+{
+       u16 fw_speeds = link_info->support_speeds;
+       u32 speed_mask = 0;
+
+       if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
+               speed_mask |= SUPPORTED_100baseT_Full;
+       if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
+               speed_mask |= SUPPORTED_1000baseT_Full;
+       if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
+               speed_mask |= SUPPORTED_2500baseX_Full;
+       if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
+               speed_mask |= SUPPORTED_10000baseT_Full;
+       /* TODO: support 25GB, 50GB with different cable type */
+       if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
+               speed_mask |= SUPPORTED_20000baseMLD2_Full |
+                       SUPPORTED_20000baseKR2_Full;
+       if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
+               speed_mask |= SUPPORTED_40000baseKR4_Full |
+                       SUPPORTED_40000baseCR4_Full |
+                       SUPPORTED_40000baseSR4_Full |
+                       SUPPORTED_40000baseLR4_Full;
+
+       return speed_mask;
+}
+
+static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
+{
+       u16 fw_speeds = link_info->auto_link_speeds;
+       u32 speed_mask = 0;
+
+       /* TODO: support 25GB, 40GB, 50GB with different cable type */
+       /* set the advertised speeds */
+       if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
+               speed_mask |= ADVERTISED_100baseT_Full;
+       if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
+               speed_mask |= ADVERTISED_1000baseT_Full;
+       if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
+               speed_mask |= ADVERTISED_2500baseX_Full;
+       if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
+               speed_mask |= ADVERTISED_10000baseT_Full;
+       /* TODO: how to advertise 20, 25, 40, 50GB with different cable type ?*/
+       if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
+               speed_mask |= ADVERTISED_20000baseMLD2_Full |
+                             ADVERTISED_20000baseKR2_Full;
+       if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
+               speed_mask |= ADVERTISED_40000baseKR4_Full |
+                             ADVERTISED_40000baseCR4_Full |
+                             ADVERTISED_40000baseSR4_Full |
+                             ADVERTISED_40000baseLR4_Full;
+       return speed_mask;
+}
+
+u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
+{
+       switch (fw_link_speed) {
+       case BNXT_LINK_SPEED_100MB:
+               return SPEED_100;
+       case BNXT_LINK_SPEED_1GB:
+               return SPEED_1000;
+       case BNXT_LINK_SPEED_2_5GB:
+               return SPEED_2500;
+       case BNXT_LINK_SPEED_10GB:
+               return SPEED_10000;
+       case BNXT_LINK_SPEED_20GB:
+               return SPEED_20000;
+       case BNXT_LINK_SPEED_25GB:
+               return SPEED_25000;
+       case BNXT_LINK_SPEED_40GB:
+               return SPEED_40000;
+       case BNXT_LINK_SPEED_50GB:
+               return SPEED_50000;
+       default:
+               return SPEED_UNKNOWN;
+       }
+}
+
+static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_link_info *link_info = &bp->link_info;
+       u16 ethtool_speed;
+
+       cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
+
+       if (link_info->auto_link_speeds)
+               cmd->supported |= SUPPORTED_Autoneg;
+
+       if (BNXT_AUTO_MODE(link_info->auto_mode)) {
+               cmd->advertising =
+                       bnxt_fw_to_ethtool_advertised_spds(link_info);
+               cmd->advertising |= ADVERTISED_Autoneg;
+               cmd->autoneg = AUTONEG_ENABLE;
+       } else {
+               cmd->autoneg = AUTONEG_DISABLE;
+               cmd->advertising = 0;
+       }
+       if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+               if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
+                   BNXT_LINK_PAUSE_BOTH) {
+                       cmd->advertising |= ADVERTISED_Pause;
+                       cmd->supported |= SUPPORTED_Pause;
+               } else {
+                       cmd->advertising |= ADVERTISED_Asym_Pause;
+                       cmd->supported |= SUPPORTED_Asym_Pause;
+                       if (link_info->auto_pause_setting &
+                           BNXT_LINK_PAUSE_RX)
+                               cmd->advertising |= ADVERTISED_Pause;
+               }
+       } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+               if ((link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
+                   BNXT_LINK_PAUSE_BOTH) {
+                       cmd->supported |= SUPPORTED_Pause;
+               } else {
+                       cmd->supported |= SUPPORTED_Asym_Pause;
+                       if (link_info->force_pause_setting &
+                           BNXT_LINK_PAUSE_RX)
+                               cmd->supported |= SUPPORTED_Pause;
+               }
+       }
+
+       cmd->port = PORT_NONE;
+       if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+               cmd->port = PORT_TP;
+               cmd->supported |= SUPPORTED_TP;
+               cmd->advertising |= ADVERTISED_TP;
+       } else {
+               cmd->supported |= SUPPORTED_FIBRE;
+               cmd->advertising |= ADVERTISED_FIBRE;
+
+               if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
+                       cmd->port = PORT_DA;
+               else if (link_info->media_type ==
+                        PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
+                       cmd->port = PORT_FIBRE;
+       }
+
+       if (link_info->phy_link_status == BNXT_LINK_LINK) {
+               if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+                       cmd->duplex = DUPLEX_FULL;
+       } else {
+               cmd->duplex = DUPLEX_UNKNOWN;
+       }
+       ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
+       ethtool_cmd_speed_set(cmd, ethtool_speed);
+       if (link_info->transceiver ==
+               PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL)
+               cmd->transceiver = XCVR_INTERNAL;
+       else
+               cmd->transceiver = XCVR_EXTERNAL;
+       cmd->phy_address = link_info->phy_addr;
+
+       return 0;
+}
+
+static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
+{
+       switch (ethtool_speed) {
+       case SPEED_100:
+               return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+       case SPEED_1000:
+               return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+       case SPEED_2500:
+               return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+       case SPEED_10000:
+               return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+       case SPEED_20000:
+               return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+       case SPEED_25000:
+               return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+       case SPEED_40000:
+               return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+       case SPEED_50000:
+               return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+       default:
+               netdev_err(dev, "unsupported speed!\n");
+               break;
+       }
+       return 0;
+}
+
+static u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
+{
+       u16 fw_speed_mask = 0;
+
+       /* only support autoneg at speed 100, 1000, and 10000 */
+       if (advertising & (ADVERTISED_100baseT_Full |
+                          ADVERTISED_100baseT_Half)) {
+               fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
+       }
+       if (advertising & (ADVERTISED_1000baseT_Full |
+                          ADVERTISED_1000baseT_Half)) {
+               fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
+       }
+       if (advertising & ADVERTISED_10000baseT_Full)
+               fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
+
+       return fw_speed_mask;
+}
+
+static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       int rc = 0;
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_link_info *link_info = &bp->link_info;
+       u32 speed, fw_advertising = 0;
+       bool set_pause = false;
+
+       if (BNXT_VF(bp))
+               return rc;
+
+       if (cmd->autoneg == AUTONEG_ENABLE) {
+               if (link_info->media_type != PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+                       netdev_err(dev, "Media type doesn't support autoneg\n");
+                       rc = -EINVAL;
+                       goto set_setting_exit;
+               }
+               if (cmd->advertising & ~(BNXT_ALL_COPPER_ETHTOOL_SPEED |
+                                        ADVERTISED_Autoneg |
+                                        ADVERTISED_TP |
+                                        ADVERTISED_Pause |
+                                        ADVERTISED_Asym_Pause)) {
+                       netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n",
+                                  cmd->advertising);
+                       rc = -EINVAL;
+                       goto set_setting_exit;
+               }
+               fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising);
+               if (fw_advertising & ~link_info->support_speeds) {
+                       netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n",
+                                  cmd->advertising);
+                       rc = -EINVAL;
+                       goto set_setting_exit;
+               }
+               link_info->autoneg |= BNXT_AUTONEG_SPEED;
+               if (!fw_advertising)
+                       link_info->advertising = link_info->support_speeds;
+               else
+                       link_info->advertising = fw_advertising;
+               /* any change to autoneg will cause link change, therefore the
+                * driver should put back the original pause setting in autoneg
+                */
+               set_pause = true;
+       } else {
+               /* TODO: currently don't support half duplex */
+               if (cmd->duplex == DUPLEX_HALF) {
+                       netdev_err(dev, "HALF DUPLEX is not supported!\n");
+                       rc = -EINVAL;
+                       goto set_setting_exit;
+               }
+               /* If received a request for an unknown duplex, assume full*/
+               if (cmd->duplex == DUPLEX_UNKNOWN)
+                       cmd->duplex = DUPLEX_FULL;
+               speed = ethtool_cmd_speed(cmd);
+               link_info->req_link_speed = bnxt_get_fw_speed(dev, speed);
+               link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
+               link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+               link_info->advertising = 0;
+       }
+
+       if (netif_running(dev))
+               rc = bnxt_hwrm_set_link_setting(bp, set_pause);
+
+set_setting_exit:
+       return rc;
+}
+
+static void bnxt_get_pauseparam(struct net_device *dev,
+                               struct ethtool_pauseparam *epause)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_link_info *link_info = &bp->link_info;
+
+       if (BNXT_VF(bp))
+               return;
+       epause->autoneg = !!(link_info->auto_pause_setting &
+                            BNXT_LINK_PAUSE_BOTH);
+       epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0);
+       epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0);
+}
+
+static int bnxt_set_pauseparam(struct net_device *dev,
+                              struct ethtool_pauseparam *epause)
+{
+       int rc = 0;
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_link_info *link_info = &bp->link_info;
+
+       if (BNXT_VF(bp))
+               return rc;
+
+       if (epause->autoneg) {
+               link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+               link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH;
+       } else {
+               /* when transition from auto pause to force pause,
+                * force a link change
+                */
+               if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+                       link_info->force_link_chng = true;
+               link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
+               link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_BOTH;
+       }
+       if (epause->rx_pause)
+               link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
+       else
+               link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_RX;
+
+       if (epause->tx_pause)
+               link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
+       else
+               link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_TX;
+
+       if (netif_running(dev))
+               rc = bnxt_hwrm_set_pause(bp);
+       return rc;
+}
+
+static u32 bnxt_get_link(struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       /* TODO: handle MF, VF, driver close case */
+       return bp->link_info.link_up;
+}
+
+static int bnxt_flash_nvram(struct net_device *dev,
+                           u16 dir_type,
+                           u16 dir_ordinal,
+                           u16 dir_ext,
+                           u16 dir_attr,
+                           const u8 *data,
+                           size_t data_len)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc;
+       struct hwrm_nvm_write_input req = {0};
+       dma_addr_t dma_handle;
+       u8 *kmem;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
+
+       req.dir_type = cpu_to_le16(dir_type);
+       req.dir_ordinal = cpu_to_le16(dir_ordinal);
+       req.dir_ext = cpu_to_le16(dir_ext);
+       req.dir_attr = cpu_to_le16(dir_attr);
+       req.dir_data_length = cpu_to_le32(data_len);
+
+       kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
+                                 GFP_KERNEL);
+       if (!kmem) {
+               netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+                          (unsigned)data_len);
+               return -ENOMEM;
+       }
+       memcpy(kmem, data, data_len);
+       req.host_src_addr = cpu_to_le64(dma_handle);
+
+       rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
+       dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
+
+       return rc;
+}
+
+static int bnxt_flash_firmware(struct net_device *dev,
+                              u16 dir_type,
+                              const u8 *fw_data,
+                              size_t fw_size)
+{
+       int     rc = 0;
+       u16     code_type;
+       u32     stored_crc;
+       u32     calculated_crc;
+       struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
+
+       switch (dir_type) {
+       case BNX_DIR_TYPE_BOOTCODE:
+       case BNX_DIR_TYPE_BOOTCODE_2:
+               code_type = CODE_BOOT;
+               break;
+       default:
+               netdev_err(dev, "Unsupported directory entry type: %u\n",
+                          dir_type);
+               return -EINVAL;
+       }
+       if (fw_size < sizeof(struct bnxt_fw_header)) {
+               netdev_err(dev, "Invalid firmware file size: %u\n",
+                          (unsigned int)fw_size);
+               return -EINVAL;
+       }
+       if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
+               netdev_err(dev, "Invalid firmware signature: %08X\n",
+                          le32_to_cpu(header->signature));
+               return -EINVAL;
+       }
+       if (header->code_type != code_type) {
+               netdev_err(dev, "Expected firmware type: %d, read: %d\n",
+                          code_type, header->code_type);
+               return -EINVAL;
+       }
+       if (header->device != DEVICE_CUMULUS_FAMILY) {
+               netdev_err(dev, "Expected firmware device family %d, read: %d\n",
+                          DEVICE_CUMULUS_FAMILY, header->device);
+               return -EINVAL;
+       }
+       /* Confirm the CRC32 checksum of the file: */
+       stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
+                                            sizeof(stored_crc)));
+       calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
+       if (calculated_crc != stored_crc) {
+               netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
+                          (unsigned long)stored_crc,
+                          (unsigned long)calculated_crc);
+               return -EINVAL;
+       }
+       /* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */
+       rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+                             0, 0, fw_data, fw_size);
+       if (rc == 0) {  /* Firmware update successful */
+               /* TODO: Notify processor it needs to reset itself
+                */
+       }
+       return rc;
+}
+
+static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
+{
+       switch (dir_type) {
+       case BNX_DIR_TYPE_CHIMP_PATCH:
+       case BNX_DIR_TYPE_BOOTCODE:
+       case BNX_DIR_TYPE_BOOTCODE_2:
+       case BNX_DIR_TYPE_APE_FW:
+       case BNX_DIR_TYPE_APE_PATCH:
+       case BNX_DIR_TYPE_KONG_FW:
+       case BNX_DIR_TYPE_KONG_PATCH:
+               return true;
+       }
+
+       return false;
+}
+
+static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
+{
+       switch (dir_type) {
+       case BNX_DIR_TYPE_AVS:
+       case BNX_DIR_TYPE_EXP_ROM_MBA:
+       case BNX_DIR_TYPE_PCIE:
+       case BNX_DIR_TYPE_TSCF_UCODE:
+       case BNX_DIR_TYPE_EXT_PHY:
+       case BNX_DIR_TYPE_CCM:
+       case BNX_DIR_TYPE_ISCSI_BOOT:
+       case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
+       case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
+               return true;
+       }
+
+       return false;
+}
+
+static bool bnxt_dir_type_is_executable(u16 dir_type)
+{
+       return bnxt_dir_type_is_ape_bin_format(dir_type) ||
+               bnxt_dir_type_is_unprotected_exec_format(dir_type);
+}
+
+static int bnxt_flash_firmware_from_file(struct net_device *dev,
+                                        u16 dir_type,
+                                        const char *filename)
+{
+       const struct firmware  *fw;
+       int                     rc;
+
+       if (bnxt_dir_type_is_executable(dir_type) == false)
+               return -EINVAL;
+
+       rc = request_firmware(&fw, filename, &dev->dev);
+       if (rc != 0) {
+               netdev_err(dev, "Error %d requesting firmware file: %s\n",
+                          rc, filename);
+               return rc;
+       }
+       if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
+               rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
+       else
+               rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+                                     0, 0, fw->data, fw->size);
+       release_firmware(fw);
+       return rc;
+}
+
+static int bnxt_flash_package_from_file(struct net_device *dev,
+                                       char *filename)
+{
+       netdev_err(dev, "packages are not yet supported\n");
+       return -EINVAL;
+}
+
+static int bnxt_flash_device(struct net_device *dev,
+                            struct ethtool_flash *flash)
+{
+       if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
+               netdev_err(dev, "flashdev not supported from a virtual function\n");
+               return -EINVAL;
+       }
+
+       if (flash->region == ETHTOOL_FLASH_ALL_REGIONS)
+               return bnxt_flash_package_from_file(dev, flash->data);
+
+       return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
+}
+
+static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc;
+       struct hwrm_nvm_get_dir_info_input req = {0};
+       struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc) {
+               *entries = le32_to_cpu(output->entries);
+               *length = le32_to_cpu(output->entry_length);
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_get_eeprom_len(struct net_device *dev)
+{
+       /* The -1 return value allows the entire 32-bit range of offsets to be
+        * passed via the ethtool command-line utility.
+        */
+       return -1;
+}
+
+static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc;
+       u32 dir_entries;
+       u32 entry_length;
+       u8 *buf;
+       size_t buflen;
+       dma_addr_t dma_handle;
+       struct hwrm_nvm_get_dir_entries_input req = {0};
+
+       rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
+       if (rc != 0)
+               return rc;
+
+       /* Insert 2 bytes of directory info (count and size of entries) */
+       if (len < 2)
+               return -EINVAL;
+
+       *data++ = dir_entries;
+       *data++ = entry_length;
+       len -= 2;
+       memset(data, 0xff, len);
+
+       buflen = dir_entries * entry_length;
+       buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
+                                GFP_KERNEL);
+       if (!buf) {
+               netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+                          (unsigned)buflen);
+               return -ENOMEM;
+       }
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
+       req.host_dest_addr = cpu_to_le64(dma_handle);
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc == 0)
+               memcpy(data, buf, len > buflen ? buflen : len);
+       dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
+       return rc;
+}
+
+static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+                              u32 length, u8 *data)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc;
+       u8 *buf;
+       dma_addr_t dma_handle;
+       struct hwrm_nvm_read_input req = {0};
+
+       buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
+                                GFP_KERNEL);
+       if (!buf) {
+               netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+                          (unsigned)length);
+               return -ENOMEM;
+       }
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
+       req.host_dest_addr = cpu_to_le64(dma_handle);
+       req.dir_idx = cpu_to_le16(index);
+       req.offset = cpu_to_le32(offset);
+       req.len = cpu_to_le32(length);
+
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc == 0)
+               memcpy(data, buf, length);
+       dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
+       return rc;
+}
+
+static int bnxt_get_eeprom(struct net_device *dev,
+                          struct ethtool_eeprom *eeprom,
+                          u8 *data)
+{
+       u32 index;
+       u32 offset;
+
+       if (eeprom->offset == 0) /* special offset value to get directory */
+               return bnxt_get_nvram_directory(dev, eeprom->len, data);
+
+       index = eeprom->offset >> 24;
+       offset = eeprom->offset & 0xffffff;
+
+       if (index == 0) {
+               netdev_err(dev, "unsupported index value: %d\n", index);
+               return -EINVAL;
+       }
+
+       return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
+}
+
+static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct hwrm_nvm_erase_dir_entry_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
+       req.dir_idx = cpu_to_le16(index);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_set_eeprom(struct net_device *dev,
+                          struct ethtool_eeprom *eeprom,
+                          u8 *data)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       u8 index, dir_op;
+       u16 type, ext, ordinal, attr;
+
+       if (!BNXT_PF(bp)) {
+               netdev_err(dev, "NVM write not supported from a virtual function\n");
+               return -EINVAL;
+       }
+
+       type = eeprom->magic >> 16;
+
+       if (type == 0xffff) { /* special value for directory operations */
+               index = eeprom->magic & 0xff;
+               dir_op = eeprom->magic >> 8;
+               if (index == 0)
+                       return -EINVAL;
+               switch (dir_op) {
+               case 0x0e: /* erase */
+                       if (eeprom->offset != ~eeprom->magic)
+                               return -EINVAL;
+                       return bnxt_erase_nvram_directory(dev, index - 1);
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       /* Create or re-write an NVM item: */
+       if (bnxt_dir_type_is_executable(type) == true)
+               return -EINVAL;
+       ext = eeprom->magic & 0xffff;
+       ordinal = eeprom->offset >> 16;
+       attr = eeprom->offset & 0xffff;
+
+       return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
+                               eeprom->len);
+}
+
+const struct ethtool_ops bnxt_ethtool_ops = {
+       .get_settings           = bnxt_get_settings,
+       .set_settings           = bnxt_set_settings,
+       .get_pauseparam         = bnxt_get_pauseparam,
+       .set_pauseparam         = bnxt_set_pauseparam,
+       .get_drvinfo            = bnxt_get_drvinfo,
+       .get_coalesce           = bnxt_get_coalesce,
+       .set_coalesce           = bnxt_set_coalesce,
+       .get_msglevel           = bnxt_get_msglevel,
+       .set_msglevel           = bnxt_set_msglevel,
+       .get_sset_count         = bnxt_get_sset_count,
+       .get_strings            = bnxt_get_strings,
+       .get_ethtool_stats      = bnxt_get_ethtool_stats,
+       .set_ringparam          = bnxt_set_ringparam,
+       .get_ringparam          = bnxt_get_ringparam,
+       .get_channels           = bnxt_get_channels,
+       .set_channels           = bnxt_set_channels,
+#ifdef CONFIG_RFS_ACCEL
+       .get_rxnfc              = bnxt_get_rxnfc,
+#endif
+       .get_rxfh_indir_size    = bnxt_get_rxfh_indir_size,
+       .get_rxfh_key_size      = bnxt_get_rxfh_key_size,
+       .get_rxfh               = bnxt_get_rxfh,
+       .flash_device           = bnxt_flash_device,
+       .get_eeprom_len         = bnxt_get_eeprom_len,
+       .get_eeprom             = bnxt_get_eeprom,
+       .set_eeprom             = bnxt_set_eeprom,
+       .get_link               = bnxt_get_link,
+};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
new file mode 100644 (file)
index 0000000..98fa81e
--- /dev/null
@@ -0,0 +1,17 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_ETHTOOL_H
+#define BNXT_ETHTOOL_H
+
+extern const struct ethtool_ops bnxt_ethtool_ops;
+
+u32 bnxt_fw_to_ethtool_speed(u16);
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
new file mode 100644 (file)
index 0000000..e0aac65
--- /dev/null
@@ -0,0 +1,104 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __BNXT_FW_HDR_H__
+#define __BNXT_FW_HDR_H__
+
+#define BNXT_FIRMWARE_BIN_SIGNATURE     0x1a4d4342     /* "BCM"+0x1a */
+
+enum SUPPORTED_FAMILY {
+       DEVICE_5702_3_4_FAMILY,         /* 0  - Denali, Vinson, K2 */
+       DEVICE_5705_FAMILY,             /* 1  - Bachelor */
+       DEVICE_SHASTA_FAMILY,           /* 2  - 5751 */
+       DEVICE_5706_FAMILY,             /* 3  - Teton */
+       DEVICE_5714_FAMILY,             /* 4  - Hamilton */
+       DEVICE_STANFORD_FAMILY,         /* 5  - 5755 */
+       DEVICE_STANFORD_ME_FAMILY,      /* 6  - 5756 */
+       DEVICE_SOLEDAD_FAMILY,          /* 7  - 5761[E] */
+       DEVICE_CILAI_FAMILY,            /* 8  - 57780/60/90/91 */
+       DEVICE_ASPEN_FAMILY,            /* 9  - 57781/85/61/65/91/95 */
+       DEVICE_ASPEN_PLUS_FAMILY,       /* 10 - 57786 */
+       DEVICE_LOGAN_FAMILY,            /* 11 - Any device in the Logan family
+                                        */
+       DEVICE_LOGAN_5762,              /* 12 - Logan Enterprise (aka Columbia)
+                                        */
+       DEVICE_LOGAN_57767,             /* 13 - Logan Client */
+       DEVICE_LOGAN_57787,             /* 14 - Logan Consumer */
+       DEVICE_LOGAN_5725,              /* 15 - Logan Server (TruManage-enabled)
+                                        */
+       DEVICE_SAWTOOTH_FAMILY,         /* 16 - 5717/18 */
+       DEVICE_COTOPAXI_FAMILY,         /* 17 - 5719 */
+       DEVICE_SNAGGLETOOTH_FAMILY,     /* 18 - 5720 */
+       DEVICE_CUMULUS_FAMILY,          /* 19 - Cumulus/Whitney */
+       MAX_DEVICE_FAMILY
+};
+
+enum SUPPORTED_CODE {
+       CODE_ASF1,              /* 0  - ASF VERSION 1.03 <deprecated> */
+       CODE_ASF2,              /* 1  - ASF VERSION 2.00 <deprecated> */
+       CODE_PASSTHRU,          /* 2  - PassThru         <deprecated> */
+       CODE_PT_SEC,            /* 3  - PassThru with security <deprecated> */
+       CODE_UMP,               /* 4  - UMP                     <deprecated> */
+       CODE_BOOT,              /* 5  - Bootcode */
+       CODE_DASH,              /* 6  - TruManage (DASH + ASF + PMCI)
+                                *      Management firmwares
+                                */
+       CODE_MCTP_PASSTHRU,     /* 7  - NCSI / MCTP Passt-hrough firmware */
+       CODE_PM_OFFLOAD,        /* 8  - Power-Management Proxy Offload firmwares
+                                */
+       CODE_MDNS_SD_OFFLOAD,   /* 9  - Multicast DNS Service Discovery Proxys
+                                *      Offload firmware
+                                */
+       CODE_DISC_OFFLOAD,      /* 10 - Discovery Offload firmware */
+       CODE_MUSTANG,           /* 11 - I2C Error reporting APE firmwares
+                                *      <deprecated>
+                                */
+       CODE_ARP_BATCH,         /* 12 - ARP Batch firmware */
+       CODE_SMASH,             /* 13 - TruManage (SMASH + DCMI/IPMI + PMCI)
+                                *      Management firmware
+                                */
+       CODE_APE_DIAG,          /* 14 - APE Test Diag firmware */
+       CODE_APE_PATCH,         /* 15 - APE Patch firmware */
+       CODE_TANG_PATCH,        /* 16 - TANG Patch firmware */
+       CODE_KONG_FW,           /* 17 - KONG firmware */
+       CODE_KONG_PATCH,        /* 18 - KONG Patch firmware */
+       CODE_BONO_FW,           /* 19 - BONO firmware */
+       CODE_BONO_PATCH,        /* 20 - BONO Patch firmware */
+
+       MAX_CODE_TYPE,
+};
+
+enum SUPPORTED_MEDIA {
+       MEDIA_COPPER,           /* 0 */
+       MEDIA_FIBER,            /* 1 */
+       MEDIA_NONE,             /* 2 */
+       MEDIA_COPPER_FIBER,     /* 3 */
+       MAX_MEDIA_TYPE,
+};
+
+struct bnxt_fw_header {
+       __le32 signature;       /* constains the constant value of
+                                * BNXT_Firmware_Bin_Signatures
+                                */
+       u8 flags;               /* reserved for ChiMP use */
+       u8 code_type;           /* enum SUPPORTED_CODE */
+       u8 device;              /* enum SUPPORTED_FAMILY */
+       u8 media;               /* enum SUPPORTED_MEDIA */
+       u8 version[16];         /* the null terminated version string to
+                                * indicate the version of the
+                                * file, this will be copied from the binary
+                                * file version string
+                                */
+       u8 build;
+       u8 revision;
+       u8 minor_ver;
+       u8 major_ver;
+};
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
new file mode 100644 (file)
index 0000000..70fc825
--- /dev/null
@@ -0,0 +1,4046 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_HSI_H
+#define BNXT_HSI_H
+
+/* per-context HW statistics -- chip view */
+struct ctx_hw_stats  {
+       __le64 rx_ucast_pkts;
+       __le64 rx_mcast_pkts;
+       __le64 rx_bcast_pkts;
+       __le64 rx_discard_pkts;
+       __le64 rx_drop_pkts;
+       __le64 rx_ucast_bytes;
+       __le64 rx_mcast_bytes;
+       __le64 rx_bcast_bytes;
+       __le64 tx_ucast_pkts;
+       __le64 tx_mcast_pkts;
+       __le64 tx_bcast_pkts;
+       __le64 tx_discard_pkts;
+       __le64 tx_drop_pkts;
+       __le64 tx_ucast_bytes;
+       __le64 tx_mcast_bytes;
+       __le64 tx_bcast_bytes;
+       __le64 tpa_pkts;
+       __le64 tpa_bytes;
+       __le64 tpa_events;
+       __le64 tpa_aborts;
+};
+
+/* Statistics Ejection Buffer Completion Record (16 bytes) */
+struct eject_cmpl {
+       __le16 type;
+       #define EJECT_CMPL_TYPE_MASK                                0x3fUL
+       #define EJECT_CMPL_TYPE_SFT                                 0
+       #define EJECT_CMPL_TYPE_STAT_EJECT                         (0x1aUL << 0)
+       __le16 len;
+       __le32 opaque;
+       __le32 v;
+       #define EJECT_CMPL_V                                        0x1UL
+       __le32 unused_2;
+};
+
+/* HWRM Completion Record (16 bytes) */
+struct hwrm_cmpl {
+       __le16 type;
+       #define HWRM_CMPL_TYPE_MASK                                 0x3fUL
+       #define HWRM_CMPL_TYPE_SFT                                  0
+       #define HWRM_CMPL_TYPE_HWRM_DONE                           (0x20UL << 0)
+       __le16 sequence_id;
+       __le32 unused_1;
+       __le32 v;
+       #define HWRM_CMPL_V                                         0x1UL
+       __le32 unused_3;
+};
+
+/* HWRM Forwarded Request (16 bytes) */
+struct hwrm_fwd_req_cmpl {
+       __le16 req_len_type;
+       #define HWRM_FWD_REQ_CMPL_TYPE_MASK                         0x3fUL
+       #define HWRM_FWD_REQ_CMPL_TYPE_SFT                          0
+       #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ                (0x22UL << 0)
+       #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK                      0xffc0UL
+       #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT                       6
+       __le16 source_id;
+       __le32 unused_0;
+       __le32 req_buf_addr_v[2];
+       #define HWRM_FWD_REQ_CMPL_V                                 0x1UL
+       #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK                 0xfffffffeUL
+       #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT                  1
+};
+
+/* HWRM Forwarded Response (16 bytes) */
+struct hwrm_fwd_resp_cmpl {
+       __le16 type;
+       #define HWRM_FWD_RESP_CMPL_TYPE_MASK                        0x3fUL
+       #define HWRM_FWD_RESP_CMPL_TYPE_SFT                         0
+       #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP              (0x24UL << 0)
+       __le16 source_id;
+       __le16 resp_len;
+       __le16 unused_1;
+       __le32 resp_buf_addr_v[2];
+       #define HWRM_FWD_RESP_CMPL_V                                0x1UL
+       #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_MASK               0xfffffffeUL
+       #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_SFT                1
+};
+
+/* HWRM Asynchronous Event Completion Record (16 bytes) */
+struct hwrm_async_event_cmpl {
+       __le16 type;
+       #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK             0x3fUL
+       #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT                      0
+       #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT       (0x2eUL << 0)
+       __le16 event_id;
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE    (0x1UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE  (0x2UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE  (0x3UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD   (0x10UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD     (0x11UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD     (0x20UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD       (0x20UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR              (0x30UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR          (0xffUL << 0)
+       __le32 event_data2;
+       u8 opaque_v;
+       #define HWRM_ASYNC_EVENT_CMPL_V                     0x1UL
+       #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK                   0xfeUL
+       #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT                    1
+       u8 unused_1[3];
+       __le32 event_data1;
+};
+
+/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */
+struct hwrm_async_event_cmpl_link_status_change {
+       __le16 type;
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT  0
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       __le16 event_id;
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+       __le32 event_data2;
+       u8 opaque_v;
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V          0x1UL
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+       u8 unused_1[3];
+       __le32 event_data1;
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_UP 0x1UL
+};
+
+/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */
+struct hwrm_async_event_cmpl_link_mtu_change {
+       __le16 type;
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK    0x3fUL
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT     0
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       __le16 event_id;
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
+       __le32 event_data2;
+       u8 opaque_v;
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V     0x1UL
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK  0xfeUL
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT   1
+       u8 unused_1[3];
+       __le32 event_data1;
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */
+struct hwrm_async_event_cmpl_link_speed_change {
+       __le16 type;
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK  0x3fUL
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT   0
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       __le16 event_id;
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
+       __le32 event_data2;
+       u8 opaque_v;
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V           0x1UL
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
+       u8 unused_1[3];
+       __le32 event_data1;
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
+};
+
+/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */
+struct hwrm_async_event_cmpl_dcb_config_change {
+       __le16 type;
+       #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK  0x3fUL
+       #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT   0
+       #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       __le16 event_id;
+       #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
+       __le32 event_data2;
+       u8 opaque_v;
+       #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V           0x1UL
+       #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL
+       #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
+       u8 unused_1[3];
+       __le32 event_data1;
+       #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+       #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */
+struct hwrm_async_event_cmpl_port_conn_not_allowed {
+       __le16 type;
+       #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+       #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+       #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       __le16 event_id;
+       #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+       __le32 event_data2;
+       u8 opaque_v;
+       #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V      0x1UL
+       #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+       #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+       u8 unused_1[3];
+       __le32 event_data1;
+       #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+       #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
+struct hwrm_async_event_cmpl_func_drvr_unload {
+       __le16 type;
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK   0x3fUL
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT    0
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       __le16 event_id;
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
+       __le32 event_data2;
+       u8 opaque_v;
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V            0x1UL
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT  1
+       u8 unused_1[3];
+       __le32 event_data1;
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */
+struct hwrm_async_event_cmpl_func_drvr_load {
+       __le16 type;
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK     0x3fUL
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT      0
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       __le16 event_id;
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
+       __le32 event_data2;
+       u8 opaque_v;
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V              0x1UL
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK   0xfeUL
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT    1
+       u8 unused_1[3];
+       __le32 event_data1;
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */
+struct hwrm_async_event_cmpl_pf_drvr_unload {
+       __le16 type;
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK     0x3fUL
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT      0
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       __le16 event_id;
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
+       __le32 event_data2;
+       u8 opaque_v;
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V              0x1UL
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK   0xfeUL
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT    1
+       u8 unused_1[3];
+       __le32 event_data1;
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */
+struct hwrm_async_event_cmpl_pf_drvr_load {
+       __le16 type;
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK       0x3fUL
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT         0
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       __le16 event_id;
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0)
+       __le32 event_data2;
+       u8 opaque_v;
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V                0x1UL
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK     0xfeUL
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT      1
+       u8 unused_1[3];
+       __le32 event_data1;
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */
+struct hwrm_async_event_cmpl_vf_flr {
+       __le16 type;
+       #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK              0x3fUL
+       #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT               0
+       #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       __le16 event_id;
+       #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR      (0x30UL << 0)
+       __le32 event_data2;
+       u8 opaque_v;
+       #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V                      0x1UL
+       #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK            0xfeUL
+       #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT     1
+       u8 unused_1[3];
+       __le32 event_data1;
+       #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL
+       #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */
+struct hwrm_async_event_cmpl_vf_mac_addr_change {
+       __le16 type;
+       #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
+       #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT  0
+       #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       __le16 event_id;
+       #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+       __le32 event_data2;
+       u8 opaque_v;
+       #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V          0x1UL
+       #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL
+       #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
+       u8 unused_1[3];
+       __le32 event_data1;
+       #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
+       #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
+struct hwrm_async_event_cmpl_hwrm_error {
+       __le16 type;
+       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK          0x3fUL
+       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT           0
+       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       __le16 event_id;
+       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+       __le32 event_data2;
+       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0)
+       u8 opaque_v;
+       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V                  0x1UL
+       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK       0xfeUL
+       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT         1
+       u8 unused_1[3];
+       __le32 event_data1;
+       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
+};
+
+/* HW Resource Manager Specification 0.7.8 */
+#define HWRM_VERSION_MAJOR     0
+#define HWRM_VERSION_MINOR     7
+#define HWRM_VERSION_UPDATE    8
+
+#define HWRM_VERSION_STR       "0.7.8"
+/* Following is the signature for HWRM message field that indicates not
+ * applicable (All F's). Need to cast it the size of the field if needed.
+ */
+#define HWRM_NA_SIGNATURE      ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN    (128)  /* hwrm_func_buf_rgtr */
+#define HWRM_MAX_RESP_LEN    (176)  /* hwrm_func_qstats */
+#define HW_HASH_INDEX_SIZE      0x80    /* 7 bit indirection table index. */
+#define HW_HASH_KEY_SIZE       40
+#define HWRM_RESP_VALID_KEY      1 /* valid key for HWRM response */
+/* Input (16 bytes) */
+struct input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (8 bytes) */
+struct output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+};
+
+/* Command numbering (8 bytes) */
+struct cmd_nums {
+       __le16 req_type;
+       #define HWRM_VER_GET                                       (0x0UL)
+       #define HWRM_FUNC_DISABLE                                  (0x10UL)
+       #define HWRM_FUNC_RESET                            (0x11UL)
+       #define HWRM_FUNC_GETFID                                   (0x12UL)
+       #define HWRM_FUNC_VF_ALLOC                                 (0x13UL)
+       #define HWRM_FUNC_VF_FREE                                  (0x14UL)
+       #define HWRM_FUNC_QCAPS                            (0x15UL)
+       #define HWRM_FUNC_QCFG                                     (0x16UL)
+       #define HWRM_FUNC_CFG                                      (0x17UL)
+       #define HWRM_FUNC_QSTATS                                   (0x18UL)
+       #define HWRM_FUNC_CLR_STATS                                (0x19UL)
+       #define HWRM_FUNC_DRV_UNRGTR                               (0x1aUL)
+       #define HWRM_FUNC_VF_RESC_FREE                             (0x1bUL)
+       #define HWRM_FUNC_VF_VNIC_IDS_QUERY                        (0x1cUL)
+       #define HWRM_FUNC_DRV_RGTR                                 (0x1dUL)
+       #define HWRM_FUNC_DRV_QVER                                 (0x1eUL)
+       #define HWRM_FUNC_BUF_RGTR                                 (0x1fUL)
+       #define HWRM_FUNC_VF_CFG                                   (0x20UL)
+       #define HWRM_PORT_PHY_CFG                                  (0x20UL)
+       #define HWRM_PORT_MAC_CFG                                  (0x21UL)
+       #define HWRM_PORT_ENABLE                                   (0x22UL)
+       #define HWRM_PORT_QSTATS                                   (0x23UL)
+       #define HWRM_PORT_LPBK_QSTATS                              (0x24UL)
+       #define HWRM_PORT_CLR_STATS                                (0x25UL)
+       #define HWRM_PORT_LPBK_CLR_STATS                           (0x26UL)
+       #define HWRM_PORT_PHY_QCFG                                 (0x27UL)
+       #define HWRM_PORT_MAC_QCFG                                 (0x28UL)
+       #define HWRM_PORT_BLINK_LED                                (0x29UL)
+       #define HWRM_QUEUE_QPORTCFG                                (0x30UL)
+       #define HWRM_QUEUE_QCFG                            (0x31UL)
+       #define HWRM_QUEUE_CFG                                     (0x32UL)
+       #define HWRM_QUEUE_BUFFERS_QCFG                    (0x33UL)
+       #define HWRM_QUEUE_BUFFERS_CFG                             (0x34UL)
+       #define HWRM_QUEUE_PFCENABLE_QCFG                          (0x35UL)
+       #define HWRM_QUEUE_PFCENABLE_CFG                           (0x36UL)
+       #define HWRM_QUEUE_PRI2COS_QCFG                    (0x37UL)
+       #define HWRM_QUEUE_PRI2COS_CFG                             (0x38UL)
+       #define HWRM_QUEUE_COS2BW_QCFG                             (0x39UL)
+       #define HWRM_QUEUE_COS2BW_CFG                              (0x3aUL)
+       #define HWRM_VNIC_ALLOC                            (0x40UL)
+       #define HWRM_VNIC_FREE                                     (0x41UL)
+       #define HWRM_VNIC_CFG                                      (0x42UL)
+       #define HWRM_VNIC_QCFG                                     (0x43UL)
+       #define HWRM_VNIC_TPA_CFG                                  (0x44UL)
+       #define HWRM_VNIC_TPA_QCFG                                 (0x45UL)
+       #define HWRM_VNIC_RSS_CFG                                  (0x46UL)
+       #define HWRM_VNIC_RSS_QCFG                                 (0x47UL)
+       #define HWRM_VNIC_PLCMODES_CFG                             (0x48UL)
+       #define HWRM_VNIC_PLCMODES_QCFG                    (0x49UL)
+       #define HWRM_RING_ALLOC                            (0x50UL)
+       #define HWRM_RING_FREE                                     (0x51UL)
+       #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS                 (0x52UL)
+       #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS              (0x53UL)
+       #define HWRM_RING_RESET                            (0x5eUL)
+       #define HWRM_RING_GRP_ALLOC                                (0x60UL)
+       #define HWRM_RING_GRP_FREE                                 (0x61UL)
+       #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC                     (0x70UL)
+       #define HWRM_VNIC_RSS_COS_LB_CTX_FREE                      (0x71UL)
+       #define HWRM_ARB_GRP_ALLOC                                 (0x80UL)
+       #define HWRM_ARB_GRP_CFG                                   (0x81UL)
+       #define HWRM_CFA_L2_FILTER_ALLOC                           (0x90UL)
+       #define HWRM_CFA_L2_FILTER_FREE                    (0x91UL)
+       #define HWRM_CFA_L2_FILTER_CFG                             (0x92UL)
+       #define HWRM_CFA_L2_SET_RX_MASK                    (0x93UL)
+       #define HWRM_CFA_L2_SET_BCASTMCAST_MIRRORING               (0x94UL)
+       #define HWRM_CFA_TUNNEL_FILTER_ALLOC                       (0x95UL)
+       #define HWRM_CFA_TUNNEL_FILTER_FREE                        (0x96UL)
+       #define HWRM_CFA_ENCAP_RECORD_ALLOC                        (0x97UL)
+       #define HWRM_CFA_ENCAP_RECORD_FREE                         (0x98UL)
+       #define HWRM_CFA_NTUPLE_FILTER_ALLOC                       (0x99UL)
+       #define HWRM_CFA_NTUPLE_FILTER_FREE                        (0x9aUL)
+       #define HWRM_CFA_NTUPLE_FILTER_CFG                         (0x9bUL)
+       #define HWRM_TUNNEL_DST_PORT_QUERY                         (0xa0UL)
+       #define HWRM_TUNNEL_DST_PORT_ALLOC                         (0xa1UL)
+       #define HWRM_TUNNEL_DST_PORT_FREE                          (0xa2UL)
+       #define HWRM_STAT_CTX_ALLOC                                (0xb0UL)
+       #define HWRM_STAT_CTX_FREE                                 (0xb1UL)
+       #define HWRM_STAT_CTX_QUERY                                (0xb2UL)
+       #define HWRM_STAT_CTX_CLR_STATS                    (0xb3UL)
+       #define HWRM_FW_RESET                                      (0xc0UL)
+       #define HWRM_FW_QSTATUS                            (0xc1UL)
+       #define HWRM_EXEC_FWD_RESP                                 (0xd0UL)
+       #define HWRM_REJECT_FWD_RESP                               (0xd1UL)
+       #define HWRM_FWD_RESP                                      (0xd2UL)
+       #define HWRM_FWD_ASYNC_EVENT_CMPL                          (0xd3UL)
+       #define HWRM_TEMP_MONITOR_QUERY                    (0xe0UL)
+       #define HWRM_MGMT_L2_FILTER_ALLOC                          (0x100UL)
+       #define HWRM_MGMT_L2_FILTER_FREE                           (0x101UL)
+       #define HWRM_DBG_READ_DIRECT                               (0xff10UL)
+       #define HWRM_DBG_READ_INDIRECT                             (0xff11UL)
+       #define HWRM_DBG_WRITE_DIRECT                              (0xff12UL)
+       #define HWRM_DBG_WRITE_INDIRECT                    (0xff13UL)
+       #define HWRM_DBG_DUMP                                      (0xff14UL)
+       #define HWRM_NVM_MODIFY                            (0xfff4UL)
+       #define HWRM_NVM_VERIFY_UPDATE                             (0xfff5UL)
+       #define HWRM_NVM_GET_DEV_INFO                              (0xfff6UL)
+       #define HWRM_NVM_ERASE_DIR_ENTRY                           (0xfff7UL)
+       #define HWRM_NVM_MOD_DIR_ENTRY                             (0xfff8UL)
+       #define HWRM_NVM_FIND_DIR_ENTRY                    (0xfff9UL)
+       #define HWRM_NVM_GET_DIR_ENTRIES                           (0xfffaUL)
+       #define HWRM_NVM_GET_DIR_INFO                              (0xfffbUL)
+       #define HWRM_NVM_RAW_DUMP                                  (0xfffcUL)
+       #define HWRM_NVM_READ                                      (0xfffdUL)
+       #define HWRM_NVM_WRITE                                     (0xfffeUL)
+       #define HWRM_NVM_RAW_WRITE_BLK                             (0xffffUL)
+       __le16 unused_0[3];
+};
+
+/* Return Codes (8 bytes) */
+struct ret_codes {
+       __le16 error_code;
+       #define HWRM_ERR_CODE_SUCCESS                              (0x0UL)
+       #define HWRM_ERR_CODE_FAIL                                 (0x1UL)
+       #define HWRM_ERR_CODE_INVALID_PARAMS                       (0x2UL)
+       #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED               (0x3UL)
+       #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR                 (0x4UL)
+       #define HWRM_ERR_CODE_INVALID_FLAGS                        (0x5UL)
+       #define HWRM_ERR_CODE_INVALID_ENABLES                      (0x6UL)
+       #define HWRM_ERR_CODE_HWRM_ERROR                           (0xfUL)
+       #define HWRM_ERR_CODE_UNKNOWN_ERR                          (0xfffeUL)
+       #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED            (0xffffUL)
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_err_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 opaque_0;
+       __le16 opaque_1;
+       u8 opaque_2;
+       u8 valid;
+};
+
+/* Port Tx Statistics Formats (408 bytes) */
+struct tx_port_stats {
+       __le64 tx_64b_frames;
+       __le64 tx_65b_127b_frames;
+       __le64 tx_128b_255b_frames;
+       __le64 tx_256b_511b_frames;
+       __le64 tx_512b_1023b_frames;
+       __le64 tx_1024b_1518_frames;
+       __le64 tx_good_vlan_frames;
+       __le64 tx_1519b_2047_frames;
+       __le64 tx_2048b_4095b_frames;
+       __le64 tx_4096b_9216b_frames;
+       __le64 tx_9217b_16383b_frames;
+       __le64 tx_good_frames;
+       __le64 tx_total_frames;
+       __le64 tx_ucast_frames;
+       __le64 tx_mcast_frames;
+       __le64 tx_bcast_frames;
+       __le64 tx_pause_frames;
+       __le64 tx_pfc_frames;
+       __le64 tx_jabber_frames;
+       __le64 tx_fcs_err_frames;
+       __le64 tx_control_frames;
+       __le64 tx_oversz_frames;
+       __le64 tx_single_dfrl_frames;
+       __le64 tx_multi_dfrl_frames;
+       __le64 tx_single_coll_frames;
+       __le64 tx_multi_coll_frames;
+       __le64 tx_late_coll_frames;
+       __le64 tx_excessive_coll_frames;
+       __le64 tx_frag_frames;
+       __le64 tx_err;
+       __le64 tx_tagged_frames;
+       __le64 tx_dbl_tagged_frames;
+       __le64 tx_runt_frames;
+       __le64 tx_fifo_underruns;
+       __le64 tx_pfc_ena_frames_pri0;
+       __le64 tx_pfc_ena_frames_pri1;
+       __le64 tx_pfc_ena_frames_pri2;
+       __le64 tx_pfc_ena_frames_pri3;
+       __le64 tx_pfc_ena_frames_pri4;
+       __le64 tx_pfc_ena_frames_pri5;
+       __le64 tx_pfc_ena_frames_pri6;
+       __le64 tx_pfc_ena_frames_pri7;
+       __le64 tx_eee_lpi_events;
+       __le64 tx_eee_lpi_duration;
+       __le64 tx_llfc_logical_msgs;
+       __le64 tx_hcfc_msgs;
+       __le64 tx_total_collisions;
+       __le64 tx_bytes;
+       __le64 tx_xthol_frames;
+       __le64 tx_stat_discard;
+       __le64 tx_stat_error;
+};
+
+/* Port Rx Statistics Formats (528 bytes) */
+struct rx_port_stats {
+       __le64 rx_64b_frames;
+       __le64 rx_65b_127b_frames;
+       __le64 rx_128b_255b_frames;
+       __le64 rx_256b_511b_frames;
+       __le64 rx_512b_1023b_frames;
+       __le64 rx_1024b_1518_frames;
+       __le64 rx_good_vlan_frames;
+       __le64 rx_1519b_2047b_frames;
+       __le64 rx_2048b_4095b_frames;
+       __le64 rx_4096b_9216b_frames;
+       __le64 rx_9217b_16383b_frames;
+       __le64 rx_total_frames;
+       __le64 rx_ucast_frames;
+       __le64 rx_mcast_frames;
+       __le64 rx_bcast_frames;
+       __le64 rx_fcs_err_frames;
+       __le64 rx_ctrl_frames;
+       __le64 rx_pause_frames;
+       __le64 rx_pfc_frames;
+       __le64 rx_unsupported_opcode_frames;
+       __le64 rx_unsupported_da_pausepfc_frames;
+       __le64 rx_wrong_sa_frames;
+       __le64 rx_align_err_frames;
+       __le64 rx_oor_len_frames;
+       __le64 rx_code_err_frames;
+       __le64 rx_false_carrier_frames;
+       __le64 rx_ovrsz_frames;
+       __le64 rx_jbr_frames;
+       __le64 rx_mtu_err_frames;
+       __le64 rx_match_crc_frames;
+       __le64 rx_promiscuous_frames;
+       __le64 rx_tagged_frames;
+       __le64 rx_double_tagged_frames;
+       __le64 rx_trunc_frames;
+       __le64 rx_good_frames;
+       __le64 rx_pfc_xon2xoff_frames_pri0;
+       __le64 rx_pfc_xon2xoff_frames_pri1;
+       __le64 rx_pfc_xon2xoff_frames_pri2;
+       __le64 rx_pfc_xon2xoff_frames_pri3;
+       __le64 rx_pfc_xon2xoff_frames_pri4;
+       __le64 rx_pfc_xon2xoff_frames_pri5;
+       __le64 rx_pfc_xon2xoff_frames_pri6;
+       __le64 rx_pfc_xon2xoff_frames_pri7;
+       __le64 rx_pfc_ena_frames_pri0;
+       __le64 rx_pfc_ena_frames_pri1;
+       __le64 rx_pfc_ena_frames_pri2;
+       __le64 rx_pfc_ena_frames_pri3;
+       __le64 rx_pfc_ena_frames_pri4;
+       __le64 rx_pfc_ena_frames_pri5;
+       __le64 rx_pfc_ena_frames_pri6;
+       __le64 rx_pfc_ena_frames_pri7;
+       __le64 rx_sch_crc_err_frames;
+       __le64 rx_undrsz_frames;
+       __le64 rx_frag_frames;
+       __le64 rx_eee_lpi_events;
+       __le64 rx_eee_lpi_duration;
+       __le64 rx_llfc_physical_msgs;
+       __le64 rx_llfc_logical_msgs;
+       __le64 rx_llfc_msgs_with_crc_err;
+       __le64 rx_hcfc_msgs;
+       __le64 rx_hcfc_msgs_with_crc_err;
+       __le64 rx_bytes;
+       __le64 rx_runt_bytes;
+       __le64 rx_runt_frames;
+       __le64 rx_stat_discard;
+       __le64 rx_stat_err;
+};
+
+/* hwrm_ver_get */
+/* Input (24 bytes) */
+struct hwrm_ver_get_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 hwrm_intf_maj;
+       u8 hwrm_intf_min;
+       u8 hwrm_intf_upd;
+       u8 unused_0[5];
+};
+
+/* Output (128 bytes) */
+struct hwrm_ver_get_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 hwrm_intf_maj;
+       u8 hwrm_intf_min;
+       u8 hwrm_intf_upd;
+       u8 hwrm_intf_rsvd;
+       u8 hwrm_fw_maj;
+       u8 hwrm_fw_min;
+       u8 hwrm_fw_bld;
+       u8 hwrm_fw_rsvd;
+       u8 ape_fw_maj;
+       u8 ape_fw_min;
+       u8 ape_fw_bld;
+       u8 ape_fw_rsvd;
+       u8 kong_fw_maj;
+       u8 kong_fw_min;
+       u8 kong_fw_bld;
+       u8 kong_fw_rsvd;
+       u8 tang_fw_maj;
+       u8 tang_fw_min;
+       u8 tang_fw_bld;
+       u8 tang_fw_rsvd;
+       u8 bono_fw_maj;
+       u8 bono_fw_min;
+       u8 bono_fw_bld;
+       u8 bono_fw_rsvd;
+       char hwrm_fw_name[16];
+       char ape_fw_name[16];
+       char kong_fw_name[16];
+       char tang_fw_name[16];
+       char bono_fw_name[16];
+       __le16 chip_num;
+       u8 chip_rev;
+       u8 chip_metal;
+       u8 chip_bond_id;
+       u8 unused_0;
+       __le16 max_req_win_len;
+       __le16 max_resp_len;
+       __le16 def_req_timeout;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_disable */
+/* Input (24 bytes) */
+struct hwrm_func_disable_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define FUNC_DISABLE_REQ_ENABLES_VF_ID_VALID                0x1UL
+       __le16 vf_id;
+       __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_disable_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_reset */
+/* Input (24 bytes) */
+struct hwrm_func_reset_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID                  0x1UL
+       __le16 vf_id;
+       __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_reset_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_getfid */
+/* Input (24 bytes) */
+struct hwrm_func_getfid_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define FUNC_GETFID_REQ_ENABLES_PCI_ID                      0x1UL
+       __le16 pci_id;
+       __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_getfid_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 fid;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_func_vf_alloc */
+/* Input (24 bytes) */
+struct hwrm_func_vf_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID               0x1UL
+       __le16 first_vf_id;
+       __le16 num_vfs;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 first_vf_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_func_vf_free */
+/* Input (24 bytes) */
+struct hwrm_func_vf_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID                0x1UL
+       __le16 first_vf_id;
+       __le16 num_vfs;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_vf_cfg */
+/* Input (24 bytes) */
+struct hwrm_func_vf_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define FUNC_VF_CFG_REQ_ENABLES_MTU                         0x1UL
+       #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN                  0x2UL
+       __le16 mtu;
+       __le16 guest_vlan;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_qcaps */
+/* Input (24 bytes) */
+struct hwrm_func_qcaps_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 fid;
+       __le16 unused_0[3];
+};
+
+/* Output (80 bytes) */
+struct hwrm_func_qcaps_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 fid;
+       __le16 port_id;
+       __le32 flags;
+       #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED           0x1UL
+       #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING      0x2UL
+       u8 perm_mac_address[6];
+       __le16 max_rsscos_ctx;
+       __le16 max_cmpl_rings;
+       __le16 max_tx_rings;
+       __le16 max_rx_rings;
+       __le16 max_l2_ctxs;
+       __le16 max_vnics;
+       __le16 first_vf_id;
+       __le16 max_vfs;
+       __le16 max_stat_ctx;
+       __le32 max_encap_records;
+       __le32 max_decap_records;
+       __le32 max_tx_em_flows;
+       __le32 max_tx_wm_flows;
+       __le32 max_rx_em_flows;
+       __le32 max_rx_wm_flows;
+       __le32 max_mcast_filters;
+       __le32 max_flow_id;
+       __le32 max_hw_ring_grps;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_func_cfg */
+/* Input (88 bytes) */
+struct hwrm_func_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 vf_id;
+       u8 unused_0;
+       u8 unused_1;
+       __le32 flags;
+       #define FUNC_CFG_REQ_FLAGS_PROM_MODE                        0x1UL
+       #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK               0x2UL
+       #define FUNC_CFG_REQ_FLAGS_SRC_IP_ADDR_CHECK                0x4UL
+       #define FUNC_CFG_REQ_FLAGS_VLAN_PRI_MATCH                   0x8UL
+       #define FUNC_CFG_REQ_FLAGS_DFLT_PRI_NOMATCH                 0x10UL
+       #define FUNC_CFG_REQ_FLAGS_DISABLE_PAUSE                    0x20UL
+       #define FUNC_CFG_REQ_FLAGS_DISABLE_STP                      0x40UL
+       #define FUNC_CFG_REQ_FLAGS_DISABLE_LLDP             0x80UL
+       #define FUNC_CFG_REQ_FLAGS_DISABLE_PTPV2                    0x100UL
+       __le32 enables;
+       #define FUNC_CFG_REQ_ENABLES_MTU                            0x1UL
+       #define FUNC_CFG_REQ_ENABLES_MRU                            0x2UL
+       #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS                0x4UL
+       #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS                 0x8UL
+       #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS                   0x10UL
+       #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS                   0x20UL
+       #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS                    0x40UL
+       #define FUNC_CFG_REQ_ENABLES_NUM_VNICS                      0x80UL
+       #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS                  0x100UL
+       #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR                  0x200UL
+       #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN                      0x400UL
+       #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR                   0x800UL
+       #define FUNC_CFG_REQ_ENABLES_MIN_BW                         0x1000UL
+       #define FUNC_CFG_REQ_ENABLES_MAX_BW                         0x2000UL
+       #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR                 0x4000UL
+       #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE            0x8000UL
+       #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS              0x10000UL
+       #define FUNC_CFG_REQ_ENABLES_EVB_MODE                       0x20000UL
+       #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS              0x40000UL
+       #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS               0x80000UL
+       __le16 mtu;
+       __le16 mru;
+       __le16 num_rsscos_ctxs;
+       __le16 num_cmpl_rings;
+       __le16 num_tx_rings;
+       __le16 num_rx_rings;
+       __le16 num_l2_ctxs;
+       __le16 num_vnics;
+       __le16 num_stat_ctxs;
+       __le16 num_hw_ring_grps;
+       u8 dflt_mac_addr[6];
+       __le16 dflt_vlan;
+       __be32 dflt_ip_addr[4];
+       __le32 min_bw;
+       __le32 max_bw;
+       __le16 async_event_cr;
+       u8 vlan_antispoof_mode;
+       #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK           (0x0UL << 0)
+       #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN    (0x1UL << 0)
+       #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE (0x2UL << 0)
+       #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+       u8 allowed_vlan_pris;
+       #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_NOCHECK             (0x0UL << 0)
+       #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_VALIDATE_VLAN      (0x1UL << 0)
+       #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_IF_VLANDNE  (0x2UL << 0)
+       #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+       u8 evb_mode;
+       #define FUNC_CFG_REQ_EVB_MODE_NO_EVB                       (0x0UL << 0)
+       #define FUNC_CFG_REQ_EVB_MODE_VEB                          (0x1UL << 0)
+       #define FUNC_CFG_REQ_EVB_MODE_VEPA                         (0x2UL << 0)
+       u8 unused_2;
+       __le16 num_mcast_filters;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_qstats */
+/* Input (24 bytes) */
+struct hwrm_func_qstats_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 fid;
+       __le16 unused_0[3];
+};
+
+/* Output (176 bytes) */
+struct hwrm_func_qstats_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 tx_ucast_pkts;
+       __le64 tx_mcast_pkts;
+       __le64 tx_bcast_pkts;
+       __le64 tx_err_pkts;
+       __le64 tx_drop_pkts;
+       __le64 tx_ucast_bytes;
+       __le64 tx_mcast_bytes;
+       __le64 tx_bcast_bytes;
+       __le64 rx_ucast_pkts;
+       __le64 rx_mcast_pkts;
+       __le64 rx_bcast_pkts;
+       __le64 rx_err_pkts;
+       __le64 rx_drop_pkts;
+       __le64 rx_ucast_bytes;
+       __le64 rx_mcast_bytes;
+       __le64 rx_bcast_bytes;
+       __le64 rx_agg_pkts;
+       __le64 rx_agg_bytes;
+       __le64 rx_agg_events;
+       __le64 rx_agg_aborts;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_func_clr_stats_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 fid;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_clr_stats_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_vf_resc_free */
+/* Input (24 bytes) */
+struct hwrm_func_vf_resc_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 vf_id;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_resc_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_vf_vnic_ids_query */
+/* Input (32 bytes) */
+struct hwrm_func_vf_vnic_ids_query_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 vf_id;
+       u8 unused_0;
+       u8 unused_1;
+       __le32 max_vnic_id_cnt;
+       __le64 vnic_id_tbl_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_vnic_ids_query_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 vnic_id_cnt;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_func_drv_rgtr */
+/* Input (80 bytes) */
+struct hwrm_func_drv_rgtr_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE                0x1UL
+       #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE               0x2UL
+       __le32 enables;
+       #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE                   0x1UL
+       #define FUNC_DRV_RGTR_REQ_ENABLES_VER                       0x2UL
+       #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP                 0x4UL
+       #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD                0x8UL
+       #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD           0x10UL
+       __le16 os_type;
+       u8 ver_maj;
+       u8 ver_min;
+       u8 ver_upd;
+       u8 unused_0;
+       __le16 unused_1;
+       __le32 timestamp;
+       __le32 unused_2;
+       __le32 vf_req_fwd[8];
+       __le32 async_event_fwd[8];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_rgtr_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_drv_unrgtr */
+/* Input (24 bytes) */
+struct hwrm_func_drv_unrgtr_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN     0x1UL
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_unrgtr_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_buf_rgtr */
+/* Input (128 bytes) */
+struct hwrm_func_buf_rgtr_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID             0x1UL
+       #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR              0x2UL
+       __le16 vf_id;
+       __le16 req_buf_num_pages;
+       __le16 req_buf_page_size;
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B    (0x4UL << 0)
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K             (0xcUL << 0)
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K             (0xdUL << 0)
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K    (0x10UL << 0)
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M             (0x16UL << 0)
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M             (0x17UL << 0)
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G             (0x1eUL << 0)
+       __le16 req_buf_len;
+       __le16 resp_buf_len;
+       u8 unused_0;
+       u8 unused_1;
+       __le64 req_buf_page_addr0;
+       __le64 req_buf_page_addr1;
+       __le64 req_buf_page_addr2;
+       __le64 req_buf_page_addr3;
+       __le64 req_buf_page_addr4;
+       __le64 req_buf_page_addr5;
+       __le64 req_buf_page_addr6;
+       __le64 req_buf_page_addr7;
+       __le64 req_buf_page_addr8;
+       __le64 req_buf_page_addr9;
+       __le64 error_buf_addr;
+       __le64 resp_buf_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_buf_rgtr_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_drv_qver */
+/* Input (24 bytes) */
+struct hwrm_func_drv_qver_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define FUNC_DRV_QVER_REQ_ENABLES_OS_TYPE_VALID     0x1UL
+       #define FUNC_DRV_QVER_REQ_ENABLES_VER_VALID                 0x2UL
+       __le16 fid;
+       __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_qver_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 os_type;
+       u8 ver_maj;
+       u8 ver_min;
+       u8 ver_upd;
+       u8 unused_0;
+       u8 unused_1;
+       u8 valid;
+};
+
+/* hwrm_port_phy_cfg */
+/* Input (48 bytes) */
+struct hwrm_port_phy_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY                    0x1UL
+       #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN              0x2UL
+       #define PORT_PHY_CFG_REQ_FLAGS_FORCE                        0x4UL
+       #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG              0x8UL
+       __le32 enables;
+       #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE                  0x1UL
+       #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX                0x2UL
+       #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE                 0x4UL
+       #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED            0x8UL
+       #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK      0x10UL
+       #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED                  0x20UL
+       #define PORT_PHY_CFG_REQ_ENABLES_LPBK                       0x40UL
+       #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS                0x80UL
+       #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE                0x100UL
+       __le16 port_id;
+       __le16 force_link_speed;
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB    (0x1UL << 0)
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB              (0xaUL << 0)
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB              (0x14UL << 0)
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB    (0x19UL << 0)
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB             (0x64UL << 0)
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB             (0xc8UL << 0)
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB             (0xfaUL << 0)
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB             (0x190UL << 0)
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB             (0x1f4UL << 0)
+       u8 auto_mode;
+       #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE            (0x0UL << 0)
+       #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS              (0x1UL << 0)
+       #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED               (0x2UL << 0)
+       #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW    (0x3UL << 0)
+       #define PORT_PHY_CFG_REQ_AUTO_MODE_MASK            (0x4UL << 0)
+       u8 auto_duplex;
+       #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF                  (0x0UL << 0)
+       #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL                  (0x1UL << 0)
+       #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH                  (0x2UL << 0)
+       u8 auto_pause;
+       #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX                      0x1UL
+       #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX                      0x2UL
+       u8 unused_0;
+       __le16 auto_link_speed;
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB             (0x1UL << 0)
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB               (0xaUL << 0)
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB               (0x14UL << 0)
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB             (0x19UL << 0)
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB              (0x64UL << 0)
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB              (0xc8UL << 0)
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB              (0xfaUL << 0)
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB              (0x190UL << 0)
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB              (0x1f4UL << 0)
+       __le16 auto_link_speed_mask;
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD      0x1UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB         0x2UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD         0x4UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB           0x8UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB           0x10UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB         0x20UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB          0x40UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB          0x80UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB          0x100UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB          0x200UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB          0x400UL
+       u8 wirespeed;
+       #define PORT_PHY_CFG_REQ_WIRESPEED_OFF                     (0x0UL << 0)
+       #define PORT_PHY_CFG_REQ_WIRESPEED_ON                      (0x1UL << 0)
+       u8 lpbk;
+       #define PORT_PHY_CFG_REQ_LPBK_NONE                         (0x0UL << 0)
+       #define PORT_PHY_CFG_REQ_LPBK_LOCAL                        (0x1UL << 0)
+       #define PORT_PHY_CFG_REQ_LPBK_REMOTE                       (0x2UL << 0)
+       u8 force_pause;
+       #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX             0x1UL
+       #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX             0x2UL
+       u8 unused_1;
+       __le32 preemphasis;
+       __le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_phy_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_port_phy_qcfg */
+/* Input (24 bytes) */
+struct hwrm_port_phy_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 port_id;
+       __le16 unused_0[3];
+};
+
+/* Output (48 bytes) */
+struct hwrm_port_phy_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 link;
+       #define PORT_PHY_QCFG_RESP_LINK_NO_LINK            (0x0UL << 0)
+       #define PORT_PHY_QCFG_RESP_LINK_SIGNAL                     (0x1UL << 0)
+       #define PORT_PHY_QCFG_RESP_LINK_LINK                       (0x2UL << 0)
+       u8 unused_0;
+       __le16 link_speed;
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB                (0x1UL << 0)
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB                  (0xaUL << 0)
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB                  (0x14UL << 0)
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB                (0x19UL << 0)
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB                 (0x64UL << 0)
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB                 (0xc8UL << 0)
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB                 (0xfaUL << 0)
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB                 (0x190UL << 0)
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB                 (0x1f4UL << 0)
+       u8 duplex;
+       #define PORT_PHY_QCFG_RESP_DUPLEX_HALF                     (0x0UL << 0)
+       #define PORT_PHY_QCFG_RESP_DUPLEX_FULL                     (0x1UL << 0)
+       u8 pause;
+       #define PORT_PHY_QCFG_RESP_PAUSE_TX                         0x1UL
+       #define PORT_PHY_QCFG_RESP_PAUSE_RX                         0x2UL
+       __le16 support_speeds;
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD           0x1UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB     0x2UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD     0x4UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB               0x8UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB               0x10UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB     0x20UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB              0x40UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB              0x80UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB              0x100UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB              0x200UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB              0x400UL
+       __le16 force_link_speed;
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB          (0x1UL << 0)
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB    (0xaUL << 0)
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB    (0x14UL << 0)
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB          (0x19UL << 0)
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB           (0x64UL << 0)
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB           (0xc8UL << 0)
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB           (0xfaUL << 0)
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB           (0x190UL << 0)
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB           (0x1f4UL << 0)
+       u8 auto_mode;
+       #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE                  (0x0UL << 0)
+       #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS    (0x1UL << 0)
+       #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED             (0x2UL << 0)
+       #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW          (0x3UL << 0)
+       #define PORT_PHY_QCFG_RESP_AUTO_MODE_MASK                  (0x4UL << 0)
+       u8 auto_pause;
+       #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX                    0x1UL
+       #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX                    0x2UL
+       __le16 auto_link_speed;
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB           (0x1UL << 0)
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB             (0xaUL << 0)
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB             (0x14UL << 0)
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB           (0x19UL << 0)
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB    (0x64UL << 0)
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB    (0xc8UL << 0)
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB    (0xfaUL << 0)
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB    (0x190UL << 0)
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB    (0x1f4UL << 0)
+       __le16 auto_link_speed_mask;
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD    0x1UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB      0x2UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD      0x4UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB         0x8UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB         0x10UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB      0x20UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB       0x40UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB       0x80UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB       0x100UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB       0x200UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB       0x400UL
+       u8 wirespeed;
+       #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF                   (0x0UL << 0)
+       #define PORT_PHY_QCFG_RESP_WIRESPEED_ON            (0x1UL << 0)
+       u8 lpbk;
+       #define PORT_PHY_QCFG_RESP_LPBK_NONE                       (0x0UL << 0)
+       #define PORT_PHY_QCFG_RESP_LPBK_LOCAL                      (0x1UL << 0)
+       #define PORT_PHY_QCFG_RESP_LPBK_REMOTE                     (0x2UL << 0)
+       u8 force_pause;
+       #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX                   0x1UL
+       #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX                   0x2UL
+       u8 duplex_setting;
+       #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_HALF             (0x0UL << 0)
+       #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_FULL             (0x1UL << 0)
+       __le32 preemphasis;
+       u8 phy_maj;
+       u8 phy_min;
+       u8 phy_bld;
+       u8 phy_type;
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR4                (0x1UL << 0)
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4                (0x2UL << 0)
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR4                (0x3UL << 0)
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR4                (0x4UL << 0)
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2                (0x5UL << 0)
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX4                (0x6UL << 0)
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR                 (0x7UL << 0)
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET                  (0x8UL << 0)
+       u8 media_type;
+       #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP                   (0x1UL << 0)
+       #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC                  (0x2UL << 0)
+       #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE                (0x3UL << 0)
+       u8 transceiver_type;
+       #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL (0x1UL << 0)
+       #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_EXTERNAL (0x2UL << 0)
+       u8 phy_addr;
+       #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK                    0x1fUL
+       #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT             0
+       u8 unused_2;
+       __le16 link_partner_adv_speeds;
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB   0x2UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD   0x4UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB     0x8UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB     0x10UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB   0x20UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB    0x40UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB    0x80UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB    0x100UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB    0x200UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB    0x400UL
+       u8 link_partner_adv_auto_mode;
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0)
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0)
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_MASK (0x4UL << 0)
+       u8 link_partner_adv_pause;
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX       0x1UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX       0x2UL
+       u8 unused_3;
+       u8 unused_4;
+       u8 unused_5;
+       u8 valid;
+};
+
+/* hwrm_port_mac_cfg */
+/* Input (32 bytes) */
+struct hwrm_port_mac_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK                   0x1UL
+       #define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE       0x2UL
+       #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE       0x4UL
+       #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE           0x8UL
+       __le32 enables;
+       #define PORT_MAC_CFG_REQ_ENABLES_IPG                        0x1UL
+       #define PORT_MAC_CFG_REQ_ENABLES_LPBK                       0x2UL
+       #define PORT_MAC_CFG_REQ_ENABLES_IVLAN_PRI2COS_MAP_PRI     0x4UL
+       #define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI               0x8UL
+       #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI    0x10UL
+       #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI           0x20UL
+       __le16 port_id;
+       u8 ipg;
+       u8 lpbk;
+       #define PORT_MAC_CFG_REQ_LPBK_NONE                         (0x0UL << 0)
+       #define PORT_MAC_CFG_REQ_LPBK_LOCAL                        (0x1UL << 0)
+       #define PORT_MAC_CFG_REQ_LPBK_REMOTE                       (0x2UL << 0)
+       u8 ivlan_pri2cos_map_pri;
+       u8 lcos_map_pri;
+       u8 tunnel_pri2cos_map_pri;
+       u8 dscp2pri_map_pri;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_mac_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 mru;
+       __le16 mtu;
+       u8 ipg;
+       u8 lpbk;
+       #define PORT_MAC_CFG_RESP_LPBK_NONE                        (0x0UL << 0)
+       #define PORT_MAC_CFG_RESP_LPBK_LOCAL                       (0x1UL << 0)
+       #define PORT_MAC_CFG_RESP_LPBK_REMOTE                      (0x2UL << 0)
+       u8 unused_0;
+       u8 valid;
+};
+
+/* hwrm_port_enable */
+/* Input (24 bytes) */
+struct hwrm_port_enable_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define PORT_ENABLE_REQ_FLAGS_FORWARD_TRAFFIC               0x1UL
+       __le16 port_id;
+       __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_enable_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_port_qstats */
+/* Input (40 bytes) */
+struct hwrm_port_qstats_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 port_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2[3];
+       u8 unused_3;
+       __le64 tx_stat_host_addr;
+       __le64 rx_stat_host_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_qstats_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_port_lpbk_qstats */
+/* Input (16 bytes) */
+struct hwrm_port_lpbk_qstats_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (64 bytes) */
+struct hwrm_port_lpbk_qstats_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 lpbk_ucast_frames;
+       __le64 lpbk_mcast_frames;
+       __le64 lpbk_bcast_frames;
+       __le64 lpbk_ucast_bytes;
+       __le64 lpbk_mcast_bytes;
+       __le64 lpbk_bcast_bytes;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_port_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_port_clr_stats_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 port_id;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_clr_stats_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_port_lpbk_clr_stats */
+/* Input (16 bytes) */
+struct hwrm_port_lpbk_clr_stats_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_lpbk_clr_stats_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_port_blink_led */
+/* Input (24 bytes) */
+struct hwrm_port_blink_led_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 num_blinks;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_blink_led_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_queue_qportcfg */
+/* Input (24 bytes) */
+struct hwrm_queue_qportcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define QUEUE_QPORTCFG_REQ_FLAGS_PATH                       0x1UL
+       #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX                   (0x0UL << 0)
+       #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX                   (0x1UL << 0)
+       __le16 port_id;
+       __le16 unused_0;
+};
+
+/* Output (32 bytes) */
+struct hwrm_queue_qportcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 max_configurable_queues;
+       u8 max_configurable_lossless_queues;
+       u8 queue_cfg_allowed;
+       u8 queue_buffers_cfg_allowed;
+       u8 queue_pfcenable_cfg_allowed;
+       u8 queue_pri2cos_cfg_allowed;
+       u8 queue_cos2bw_cfg_allowed;
+       u8 queue_id0;
+       u8 queue_id0_service_profile;
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+       u8 queue_id1;
+       u8 queue_id1_service_profile;
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+       u8 queue_id2;
+       u8 queue_id2_service_profile;
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+       u8 queue_id3;
+       u8 queue_id3_service_profile;
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+       u8 queue_id4;
+       u8 queue_id4_service_profile;
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+       u8 queue_id5;
+       u8 queue_id5_service_profile;
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+       u8 queue_id6;
+       u8 queue_id6_service_profile;
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+       u8 queue_id7;
+       u8 queue_id7_service_profile;
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+       u8 valid;
+};
+
+/* hwrm_queue_cfg */
+/* Input (40 bytes) */
+struct hwrm_queue_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define QUEUE_CFG_REQ_FLAGS_PATH                            0x1UL
+       #define QUEUE_CFG_REQ_FLAGS_PATH_TX                        (0x0UL << 0)
+       #define QUEUE_CFG_REQ_FLAGS_PATH_RX                        (0x1UL << 0)
+       __le32 enables;
+       #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN                      0x1UL
+       #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE               0x2UL
+       __le32 queue_id;
+       __le32 dflt_len;
+       u8 service_profile;
+       #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY                (0x0UL << 0)
+       #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS             (0x1UL << 0)
+       #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN              (0xffUL << 0)
+       u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_queue_buffers_cfg */
+/* Input (56 bytes) */
+struct hwrm_queue_buffers_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH                    0x1UL
+       #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX                (0x0UL << 0)
+       #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX                (0x1UL << 0)
+       __le32 enables;
+       #define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED              0x1UL
+       #define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED                0x2UL
+       #define QUEUE_BUFFERS_CFG_REQ_ENABLES_GROUP                 0x4UL
+       #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF                  0x8UL
+       #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON                   0x10UL
+       #define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL                  0x20UL
+       #define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL               0x40UL
+       #define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX                   0x80UL
+       __le32 queue_id;
+       __le32 reserved;
+       __le32 shared;
+       __le32 xoff;
+       __le32 xon;
+       __le32 full;
+       __le32 notfull;
+       __le32 max;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_buffers_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_queue_pfcenable_cfg */
+/* Input (24 bytes) */
+struct hwrm_queue_pfcenable_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI0_PFC_ENABLED   0x1UL
+       #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI1_PFC_ENABLED   0x2UL
+       #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI2_PFC_ENABLED   0x4UL
+       #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI3_PFC_ENABLED   0x8UL
+       #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI4_PFC_ENABLED   0x10UL
+       #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI5_PFC_ENABLED   0x20UL
+       #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI6_PFC_ENABLED   0x40UL
+       #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI7_PFC_ENABLED   0x80UL
+       __le16 port_id;
+       __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pfcenable_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_queue_pri2cos_cfg */
+/* Input (40 bytes) */
+struct hwrm_queue_pri2cos_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH                    0x1UL
+       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX                (0x0UL << 0)
+       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX                (0x1UL << 0)
+       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN                   0x2UL
+       __le32 enables;
+       u8 port_id;
+       u8 pri0_cos;
+       u8 pri1_cos;
+       u8 pri2_cos;
+       u8 pri3_cos;
+       u8 pri4_cos;
+       u8 pri5_cos;
+       u8 pri6_cos;
+       u8 pri7_cos;
+       u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pri2cos_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_queue_cos2bw_cfg */
+/* Input (128 bytes) */
+struct hwrm_queue_cos2bw_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       __le32 enables;
+       #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID   0x1UL
+       #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID   0x2UL
+       #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID   0x4UL
+       #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID   0x8UL
+       #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID   0x10UL
+       #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID   0x20UL
+       #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID   0x40UL
+       #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID   0x80UL
+       __le16 port_id;
+       u8 queue_id0;
+       u8 unused_0;
+       __le32 queue_id0_min_bw;
+       __le32 queue_id0_max_bw;
+       u8 queue_id0_tsa_assign;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP      (0x0UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS     (0x1UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+       u8 queue_id0_pri_lvl;
+       u8 queue_id0_bw_weight;
+       u8 queue_id1;
+       __le32 queue_id1_min_bw;
+       __le32 queue_id1_max_bw;
+       u8 queue_id1_tsa_assign;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP      (0x0UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS     (0x1UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+       u8 queue_id1_pri_lvl;
+       u8 queue_id1_bw_weight;
+       u8 queue_id2;
+       __le32 queue_id2_min_bw;
+       __le32 queue_id2_max_bw;
+       u8 queue_id2_tsa_assign;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP      (0x0UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS     (0x1UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+       u8 queue_id2_pri_lvl;
+       u8 queue_id2_bw_weight;
+       u8 queue_id3;
+       __le32 queue_id3_min_bw;
+       __le32 queue_id3_max_bw;
+       u8 queue_id3_tsa_assign;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP      (0x0UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS     (0x1UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+       u8 queue_id3_pri_lvl;
+       u8 queue_id3_bw_weight;
+       u8 queue_id4;
+       __le32 queue_id4_min_bw;
+       __le32 queue_id4_max_bw;
+       u8 queue_id4_tsa_assign;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP      (0x0UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS     (0x1UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+       u8 queue_id4_pri_lvl;
+       u8 queue_id4_bw_weight;
+       u8 queue_id5;
+       __le32 queue_id5_min_bw;
+       __le32 queue_id5_max_bw;
+       u8 queue_id5_tsa_assign;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP      (0x0UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS     (0x1UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+       u8 queue_id5_pri_lvl;
+       u8 queue_id5_bw_weight;
+       u8 queue_id6;
+       __le32 queue_id6_min_bw;
+       __le32 queue_id6_max_bw;
+       u8 queue_id6_tsa_assign;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP      (0x0UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS     (0x1UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+       u8 queue_id6_pri_lvl;
+       u8 queue_id6_bw_weight;
+       u8 queue_id7;
+       __le32 queue_id7_min_bw;
+       __le32 queue_id7_max_bw;
+       u8 queue_id7_tsa_assign;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP      (0x0UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS     (0x1UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+       u8 queue_id7_pri_lvl;
+       u8 queue_id7_bw_weight;
+       u8 unused_1[5];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_cos2bw_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_vnic_alloc */
+/* Input (24 bytes) */
+struct hwrm_vnic_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define VNIC_ALLOC_REQ_FLAGS_DEFAULT                        0x1UL
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 vnic_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_vnic_free */
+/* Input (24 bytes) */
+struct hwrm_vnic_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 vnic_id;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_vnic_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define VNIC_CFG_REQ_FLAGS_DEFAULT                          0x1UL
+       #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE                  0x2UL
+       __le32 enables;
+       #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP                  0x1UL
+       #define VNIC_CFG_REQ_ENABLES_RSS_RULE                       0x2UL
+       #define VNIC_CFG_REQ_ENABLES_COS_RULE                       0x4UL
+       #define VNIC_CFG_REQ_ENABLES_LB_RULE                        0x8UL
+       #define VNIC_CFG_REQ_ENABLES_MRU                            0x10UL
+       __le16 vnic_id;
+       __le16 dflt_ring_grp;
+       __le16 rss_rule;
+       __le16 cos_rule;
+       __le16 lb_rule;
+       __le16 mru;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_vnic_tpa_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_tpa_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define VNIC_TPA_CFG_REQ_FLAGS_TPA                          0x1UL
+       #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA                    0x2UL
+       #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE               0x4UL
+       #define VNIC_TPA_CFG_REQ_FLAGS_GRO                          0x8UL
+       #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN                 0x10UL
+       #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ       0x20UL
+       #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK               0x40UL
+       #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK                0x80UL
+       __le32 enables;
+       #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS               0x1UL
+       #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS                   0x2UL
+       #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER              0x4UL
+       #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN                0x8UL
+       __le16 vnic_id;
+       __le16 max_agg_segs;
+       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1            (0x0UL << 0)
+       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2            (0x1UL << 0)
+       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4            (0x2UL << 0)
+       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8            (0x3UL << 0)
+       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX                  (0x1fUL << 0)
+       __le16 max_aggs;
+       #define VNIC_TPA_CFG_REQ_MAX_AGGS_1                        (0x0UL << 0)
+       #define VNIC_TPA_CFG_REQ_MAX_AGGS_2                        (0x1UL << 0)
+       #define VNIC_TPA_CFG_REQ_MAX_AGGS_4                        (0x2UL << 0)
+       #define VNIC_TPA_CFG_REQ_MAX_AGGS_8                        (0x3UL << 0)
+       #define VNIC_TPA_CFG_REQ_MAX_AGGS_16                       (0x4UL << 0)
+       #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX                      (0x7UL << 0)
+       u8 unused_0;
+       u8 unused_1;
+       __le32 max_agg_timer;
+       __le32 min_agg_len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_tpa_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg */
+/* Input (48 bytes) */
+struct hwrm_vnic_rss_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 hash_type;
+       #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4             0x1UL
+       #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4                 0x2UL
+       #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4                 0x4UL
+       #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6             0x8UL
+       #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6                 0x10UL
+       #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6                 0x20UL
+       __le32 unused_0;
+       __le64 ring_grp_tbl_addr;
+       __le64 hash_key_tbl_addr;
+       __le16 rss_ctx_idx;
+       __le16 unused_1[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_plcmodes_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT      0x1UL
+       #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT         0x2UL
+       #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4                0x4UL
+       #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6                0x8UL
+       #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE                0x10UL
+       #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE                0x20UL
+       __le32 enables;
+       #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID   0x1UL
+       #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID     0x2UL
+       #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID  0x4UL
+       __le32 vnic_id;
+       __le16 jumbo_thresh;
+       __le16 hds_offset;
+       __le16 hds_threshold;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_plcmodes_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc */
+/* Input (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 rss_cos_lb_ctx_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free */
+/* Input (24 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 rss_cos_lb_ctx_id;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_ring_alloc */
+/* Input (80 bytes) */
+struct hwrm_ring_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define RING_ALLOC_REQ_ENABLES_ARB_GRP_ID_VALID     0x1UL
+       #define RING_ALLOC_REQ_ENABLES_INPUT_NUM_VALID              0x2UL
+       #define RING_ALLOC_REQ_ENABLES_WEIGHT_VALID                 0x4UL
+       #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID            0x8UL
+       #define RING_ALLOC_REQ_ENABLES_MIN_BW_VALID                 0x10UL
+       #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID                 0x20UL
+       u8 ring_type;
+       #define RING_ALLOC_REQ_RING_TYPE_CMPL                      (0x0UL << 0)
+       #define RING_ALLOC_REQ_RING_TYPE_TX                        (0x1UL << 0)
+       #define RING_ALLOC_REQ_RING_TYPE_RX                        (0x2UL << 0)
+       #define RING_ALLOC_REQ_RING_TYPE_STATUS            (0x3UL << 0)
+       #define RING_ALLOC_REQ_RING_TYPE_CMD                       (0x4UL << 0)
+       u8 unused_0;
+       __le16 unused_1;
+       __le64 page_tbl_addr;
+       __le32 fbo;
+       u8 page_size;
+       u8 page_tbl_depth;
+       u8 unused_2;
+       u8 unused_3;
+       __le32 length;
+       __le16 logical_id;
+       __le16 cmpl_ring_id;
+       __le16 queue_id;
+       u8 unused_4;
+       u8 unused_5;
+       __le32 arb_grp_id;
+       __le16 input_number;
+       u8 unused_6;
+       u8 unused_7;
+       __le32 weight;
+       __le32 stat_ctx_id;
+       __le32 min_bw;
+       __le32 max_bw;
+       u8 int_mode;
+       #define RING_ALLOC_REQ_INT_MODE_LEGACY                     (0x0UL << 0)
+       #define RING_ALLOC_REQ_INT_MODE_MSI                        (0x1UL << 0)
+       #define RING_ALLOC_REQ_INT_MODE_MSIX                       (0x2UL << 0)
+       #define RING_ALLOC_REQ_INT_MODE_POLL                       (0x3UL << 0)
+       u8 unused_8[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 ring_id;
+       __le16 logical_ring_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_ring_free */
+/* Input (24 bytes) */
+struct hwrm_ring_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 ring_type;
+       #define RING_FREE_REQ_RING_TYPE_CMPL                       (0x0UL << 0)
+       #define RING_FREE_REQ_RING_TYPE_TX                         (0x1UL << 0)
+       #define RING_FREE_REQ_RING_TYPE_RX                         (0x2UL << 0)
+       #define RING_FREE_REQ_RING_TYPE_STATUS                     (0x3UL << 0)
+       #define RING_FREE_REQ_RING_TYPE_CMD                        (0x4UL << 0)
+       u8 unused_0;
+       __le16 ring_id;
+       __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params */
+/* Input (24 bytes) */
+struct hwrm_ring_cmpl_ring_qaggint_params_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 ring_id;
+       __le16 unused_0[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_ring_cmpl_ring_qaggint_params_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 flags;
+       #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+       #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+       __le16 num_cmpl_dma_aggr;
+       __le16 num_cmpl_dma_aggr_during_int;
+       __le16 cmpl_aggr_dma_tmr;
+       __le16 cmpl_aggr_dma_tmr_during_int;
+       __le16 int_lat_tmr_min;
+       __le16 int_lat_tmr_max;
+       __le16 num_cmpl_aggr_int;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params */
+/* Input (40 bytes) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 ring_id;
+       __le16 flags;
+       #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+       #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+       __le16 num_cmpl_dma_aggr;
+       __le16 num_cmpl_dma_aggr_during_int;
+       __le16 cmpl_aggr_dma_tmr;
+       __le16 cmpl_aggr_dma_tmr_during_int;
+       __le16 int_lat_tmr_min;
+       __le16 int_lat_tmr_max;
+       __le16 num_cmpl_aggr_int;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_ring_reset */
+/* Input (24 bytes) */
+struct hwrm_ring_reset_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 ring_type;
+       #define RING_RESET_REQ_RING_TYPE_CMPL                      (0x0UL << 0)
+       #define RING_RESET_REQ_RING_TYPE_TX                        (0x1UL << 0)
+       #define RING_RESET_REQ_RING_TYPE_RX                        (0x2UL << 0)
+       #define RING_RESET_REQ_RING_TYPE_STATUS            (0x3UL << 0)
+       #define RING_RESET_REQ_RING_TYPE_CMD                       (0x4UL << 0)
+       u8 unused_0;
+       __le16 ring_id;
+       __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_reset_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_ring_grp_alloc */
+/* Input (24 bytes) */
+struct hwrm_ring_grp_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 cr;
+       __le16 rr;
+       __le16 ar;
+       __le16 sc;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_grp_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 ring_group_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_ring_grp_free */
+/* Input (24 bytes) */
+struct hwrm_ring_grp_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 ring_group_id;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_grp_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_arb_grp_alloc */
+/* Input (24 bytes) */
+struct hwrm_arb_grp_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 input_number;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_arb_grp_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 arb_grp_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_arb_grp_cfg */
+/* Input (32 bytes) */
+struct hwrm_arb_grp_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 arb_grp_id;
+       __le16 input_number;
+       __le16 tx_ring;
+       __le32 weight;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_arb_grp_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_alloc */
+/* Input (96 bytes) */
+struct hwrm_cfa_l2_filter_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH                  0x1UL
+       #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX              (0x0UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX              (0x1UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK              0x2UL
+       #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP                  0x4UL
+       #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST     0x8UL
+       __le32 enables;
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR     0x1UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK       0x2UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN            0x4UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK      0x8UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN            0x10UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK      0x20UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR           0x40UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK     0x80UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN          0x100UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK    0x200UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN          0x400UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK    0x800UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE            0x1000UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID              0x2000UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE         0x4000UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID         0x8000UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID     0x10000UL
+       u8 l2_addr[6];
+       u8 unused_0;
+       u8 unused_1;
+       u8 l2_addr_mask[6];
+       __le16 l2_ovlan;
+       __le16 l2_ovlan_mask;
+       __le16 l2_ivlan;
+       __le16 l2_ivlan_mask;
+       u8 unused_2;
+       u8 unused_3;
+       u8 t_l2_addr[6];
+       u8 unused_4;
+       u8 unused_5;
+       u8 t_l2_addr_mask[6];
+       __le16 t_l2_ovlan;
+       __le16 t_l2_ovlan_mask;
+       __le16 t_l2_ivlan;
+       __le16 t_l2_ivlan_mask;
+       u8 src_type;
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT             (0x0UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF                (0x1UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF                (0x2UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC              (0x3UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG              (0x4UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE               (0x5UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO              (0x6UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG              (0x7UL << 0)
+       u8 unused_6;
+       __le32 src_id;
+       u8 tunnel_type;
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL     (0x0UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN          (0x1UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE          (0x2UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE          (0x3UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP           (0x4UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE         (0x5UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS           (0x6UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT    (0x7UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE          (0x8UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL     (0xffUL << 0)
+       u8 unused_7;
+       __le16 dst_vnic_id;
+       __le16 mirror_vnic_id;
+       u8 pri_hint;
+       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER         (0x0UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER     (0x1UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER     (0x2UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX               (0x3UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN               (0x4UL << 0)
+       u8 unused_8;
+       __le32 unused_9;
+       __le64 l2_filter_id_hint;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_l2_filter_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 l2_filter_id;
+       __le32 flow_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_l2_filter_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 l2_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_filter_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg */
+/* Input (40 bytes) */
+struct hwrm_cfa_l2_filter_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH                    0x1UL
+       #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX                (0x0UL << 0)
+       #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX                (0x1UL << 0)
+       #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP                    0x2UL
+       __le32 enables;
+       #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_VNIC_ID_VALID    0x1UL
+       __le64 l2_filter_id;
+       __le32 dst_vnic_id;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_filter_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask */
+/* Input (40 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 dflt_vnic_id;
+       __le32 mask;
+       #define CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST                 0x1UL
+       #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST                   0x2UL
+       #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST               0x4UL
+       #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST                   0x8UL
+       #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS     0x10UL
+       #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST               0x20UL
+       __le64 mc_tbl_addr;
+       __le32 num_mc_entries;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_l2_set_bcastmcast_mirroring */
+/* Input (32 bytes) */
+struct hwrm_cfa_l2_set_bcastmcast_mirroring_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 dflt_vnic_id;
+       __le32 mirroring_flags;
+       #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_MIRRORING 0x1UL
+       #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_MIRRORING 0x2UL
+       #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_SRC_KNOCKOUT 0x4UL
+       #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_SRC_KNOCKOUT 0x8UL
+       #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_VLAN_ID_VALID 0x10UL
+       __le16 vlan_id;
+       u8 bcast_domain;
+       #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_PFONLY (0x0UL << 0)
+       #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFS (0x1UL << 0)
+       #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFSVFS (0x2UL << 0)
+       u8 mcast_domain;
+       #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_PFONLY (0x0UL << 0)
+       #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFS (0x1UL << 0)
+       #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFSVFS (0x2UL << 0)
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_set_bcastmcast_mirroring_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_alloc */
+/* Input (88 bytes) */
+struct hwrm_cfa_tunnel_filter_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK          0x1UL
+       __le32 enables;
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID   0x1UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR         0x2UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN       0x4UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR         0x8UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE   0x10UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR      0x40UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE    0x80UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI     0x100UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID    0x200UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+       __le64 l2_filter_id;
+       u8 l2_addr[6];
+       __le16 l2_ivlan;
+       __le32 l3_addr[4];
+       __le32 t_l3_addr[4];
+       u8 l3_addr_type;
+       u8 t_l3_addr_type;
+       u8 tunnel_type;
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     (0x1UL << 0)
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     (0x2UL << 0)
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE     (0x3UL << 0)
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP      (0x4UL << 0)
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE    (0x5UL << 0)
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS      (0x6UL << 0)
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT       (0x7UL << 0)
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     (0x8UL << 0)
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+       u8 unused_0;
+       __le32 vni;
+       __le32 dst_vnic_id;
+       __le32 mirror_vnic_id;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_tunnel_filter_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 tunnel_filter_id;
+       __le32 flow_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_tunnel_filter_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 tunnel_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_tunnel_filter_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_encap_record_alloc */
+/* Input (32 bytes) */
+struct hwrm_cfa_encap_record_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK           0x1UL
+       u8 encap_type;
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN       (0x1UL << 0)
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE       (0x2UL << 0)
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE       (0x3UL << 0)
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP         (0x4UL << 0)
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE      (0x5UL << 0)
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS         (0x6UL << 0)
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN         (0x7UL << 0)
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE       (0x8UL << 0)
+       u8 unused_0;
+       __le16 unused_1;
+       __le32 encap_data[16];
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_encap_record_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 encap_record_id;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_encap_record_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_encap_record_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 encap_record_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_encap_record_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc */
+/* Input (128 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK          0x1UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP              0x2UL
+       __le32 enables;
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID   0x1UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE      0x2UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE    0x4UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR    0x8UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE    0x10UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR     0x20UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR     0x80UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL    0x200UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT       0x400UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK  0x800UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT       0x1000UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK  0x2000UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT       0x4000UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID    0x10000UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
+       __le64 l2_filter_id;
+       u8 src_macaddr[6];
+       __be16 ethertype;
+       u8 ipaddr_type;
+       u8 ip_protocol;
+       __le16 dst_vnic_id;
+       __le16 mirror_vnic_id;
+       u8 tunnel_type;
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     (0x1UL << 0)
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     (0x2UL << 0)
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE     (0x3UL << 0)
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP      (0x4UL << 0)
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE    (0x5UL << 0)
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS      (0x6UL << 0)
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT       (0x7UL << 0)
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     (0x8UL << 0)
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+       u8 pri_hint;
+       __be32 src_ipaddr[4];
+       __be32 src_ipaddr_mask[4];
+       __be32 dst_ipaddr[4];
+       __be32 dst_ipaddr_mask[4];
+       __be16 src_port;
+       __be16 src_port_mask;
+       __be16 dst_port;
+       __be16 dst_port_mask;
+       __le64 ntuple_filter_id_hint;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 ntuple_filter_id;
+       __le32 flow_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_ntuple_filter_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 ntuple_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_ntuple_filter_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg */
+/* Input (40 bytes) */
+struct hwrm_cfa_ntuple_filter_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_VNIC_ID_VALID 0x1UL
+       #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID_VALID 0x2UL
+       __le32 unused_0;
+       __le64 ntuple_filter_id;
+       __le32 new_dst_vnic_id;
+       __le32 new_mirror_vnic_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_ntuple_filter_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_query */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_query_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 tunnel_type;
+       #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NONTUNNEL   (0x0UL << 0)
+       #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN       (0x1UL << 0)
+       #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NVGRE       (0x2UL << 0)
+       #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2GRE       (0x3UL << 0)
+       #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPIP         (0x4UL << 0)
+       #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE      (0x5UL << 0)
+       #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_MPLS         (0x6UL << 0)
+       #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_STT          (0x7UL << 0)
+       #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE       (0x8UL << 0)
+       #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ANYTUNNEL   (0xffUL << 0)
+       u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_query_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 tunnel_dst_port_id;
+       __be16 tunnel_dst_port_val;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 tunnel_type;
+       #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL   (0x0UL << 0)
+       #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN       (0x1UL << 0)
+       #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NVGRE       (0x2UL << 0)
+       #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2GRE       (0x3UL << 0)
+       #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPIP         (0x4UL << 0)
+       #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE      (0x5UL << 0)
+       #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_MPLS         (0x6UL << 0)
+       #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_STT          (0x7UL << 0)
+       #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE       (0x8UL << 0)
+       #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL   (0xffUL << 0)
+       u8 unused_0;
+       __be16 tunnel_dst_port_val;
+       __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 tunnel_dst_port_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_free */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 tunnel_type;
+       #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NONTUNNEL    (0x0UL << 0)
+       #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN         (0x1UL << 0)
+       #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NVGRE         (0x2UL << 0)
+       #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2GRE         (0x3UL << 0)
+       #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPIP          (0x4UL << 0)
+       #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE       (0x5UL << 0)
+       #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_MPLS          (0x6UL << 0)
+       #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_STT           (0x7UL << 0)
+       #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE         (0x8UL << 0)
+       #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL    (0xffUL << 0)
+       u8 unused_0;
+       __le16 tunnel_dst_port_id;
+       __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_stat_ctx_alloc */
+/* Input (32 bytes) */
+struct hwrm_stat_ctx_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 stats_dma_addr;
+       __le32 update_period_ms;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 stat_ctx_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_stat_ctx_free */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 stat_ctx_id;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 stat_ctx_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_stat_ctx_query */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_query_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 stat_ctx_id;
+       __le32 unused_0;
+};
+
+/* Output (176 bytes) */
+struct hwrm_stat_ctx_query_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 tx_ucast_pkts;
+       __le64 tx_mcast_pkts;
+       __le64 tx_bcast_pkts;
+       __le64 tx_err_pkts;
+       __le64 tx_drop_pkts;
+       __le64 tx_ucast_bytes;
+       __le64 tx_mcast_bytes;
+       __le64 tx_bcast_bytes;
+       __le64 rx_ucast_pkts;
+       __le64 rx_mcast_pkts;
+       __le64 rx_bcast_pkts;
+       __le64 rx_err_pkts;
+       __le64 rx_drop_pkts;
+       __le64 rx_ucast_bytes;
+       __le64 rx_mcast_bytes;
+       __le64 rx_bcast_bytes;
+       __le64 rx_agg_pkts;
+       __le64 rx_agg_bytes;
+       __le64 rx_agg_events;
+       __le64 rx_agg_aborts;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_stat_ctx_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_clr_stats_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 stat_ctx_id;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_clr_stats_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_mgmt_l2_filter_alloc */
+/* Input (56 bytes) */
+struct hwrm_mgmt_l2_filter_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH                 0x1UL
+       #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX             (0x0UL << 0)
+       #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX             (0x1UL << 0)
+       __le32 enables;
+       #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDRESS         0x1UL
+       #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_OVLAN              0x2UL
+       #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_IVLAN              0x4UL
+       #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_ACTION_ID          0x8UL
+       u8 l2_address[6];
+       u8 unused_0;
+       u8 unused_1;
+       u8 l2_address_mask[6];
+       __le16 ovlan;
+       __le16 ovlan_mask;
+       __le16 ivlan;
+       __le16 ivlan_mask;
+       u8 unused_2;
+       u8 unused_3;
+       __le32 action_id;
+       u8 action_bypass;
+       #define MGMT_L2_FILTER_ALLOC_REQ_ACTION_BYPASS              0x1UL
+       u8 unused_5[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_mgmt_l2_filter_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 mgmt_l2_filter_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_mgmt_l2_filter_free */
+/* Input (24 bytes) */
+struct hwrm_mgmt_l2_filter_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 mgmt_l2_filter_id;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_mgmt_l2_filter_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_raw_write_blk */
+/* Input (32 bytes) */
+struct hwrm_nvm_raw_write_blk_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 host_src_addr;
+       __le32 dest_addr;
+       __le32 len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_raw_write_blk_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_read */
+/* Input (40 bytes) */
+struct hwrm_nvm_read_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 host_dest_addr;
+       __le16 dir_idx;
+       u8 unused_0;
+       u8 unused_1;
+       __le32 offset;
+       __le32 len;
+       __le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_read_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_raw_dump */
+/* Input (32 bytes) */
+struct hwrm_nvm_raw_dump_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 host_dest_addr;
+       __le32 offset;
+       __le32 len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_raw_dump_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_get_dir_entries */
+/* Input (24 bytes) */
+struct hwrm_nvm_get_dir_entries_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 host_dest_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_get_dir_entries_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_get_dir_info */
+/* Input (16 bytes) */
+struct hwrm_nvm_get_dir_info_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (24 bytes) */
+struct hwrm_nvm_get_dir_info_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 entries;
+       __le32 entry_length;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_write */
+/* Input (40 bytes) */
+struct hwrm_nvm_write_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 host_src_addr;
+       __le16 dir_type;
+       __le16 dir_ordinal;
+       __le16 dir_ext;
+       __le16 dir_attr;
+       __le32 dir_data_length;
+       __le16 option;
+       __le16 flags;
+       #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG            0x1UL
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_write_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_modify */
+/* Input (40 bytes) */
+struct hwrm_nvm_modify_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 host_src_addr;
+       __le16 dir_idx;
+       u8 unused_0;
+       u8 unused_1;
+       __le32 offset;
+       __le32 len;
+       __le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_modify_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_find_dir_entry */
+/* Input (32 bytes) */
+struct hwrm_nvm_find_dir_entry_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID       0x1UL
+       __le16 dir_idx;
+       __le16 dir_type;
+       __le16 dir_ordinal;
+       __le16 dir_ext;
+       u8 opt_ordinal;
+       #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK     0x3UL
+       #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT              0
+       #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ              (0x0UL << 0)
+       #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE              (0x1UL << 0)
+       #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT              (0x2UL << 0)
+       u8 unused_1[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_nvm_find_dir_entry_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 dir_item_length;
+       __le32 dir_data_length;
+       __le32 fw_ver;
+       __le16 dir_ordinal;
+       __le16 dir_idx;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_erase_dir_entry */
+/* Input (24 bytes) */
+struct hwrm_nvm_erase_dir_entry_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 dir_idx;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_erase_dir_entry_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_get_dev_info */
+/* Input (16 bytes) */
+struct hwrm_nvm_get_dev_info_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (32 bytes) */
+struct hwrm_nvm_get_dev_info_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 manufacturer_id;
+       __le16 device_id;
+       __le32 sector_size;
+       __le32 nvram_size;
+       __le32 reserved_size;
+       __le32 available_size;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry */
+/* Input (32 bytes) */
+struct hwrm_nvm_mod_dir_entry_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM              0x1UL
+       __le16 dir_idx;
+       __le16 dir_ordinal;
+       __le16 dir_ext;
+       __le16 dir_attr;
+       __le32 checksum;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_mod_dir_entry_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_verify_update */
+/* Input (24 bytes) */
+struct hwrm_nvm_verify_update_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 dir_type;
+       __le16 dir_ordinal;
+       __le16 dir_ext;
+       __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_verify_update_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_exec_fwd_resp */
+/* Input (120 bytes) */
+struct hwrm_exec_fwd_resp_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 encap_request[24];
+       __le16 encap_resp_target_id;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_exec_fwd_resp_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_reject_fwd_resp */
+/* Input (120 bytes) */
+struct hwrm_reject_fwd_resp_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 encap_request[24];
+       __le16 encap_resp_target_id;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_reject_fwd_resp_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_fwd_resp */
+/* Input (40 bytes) */
+struct hwrm_fwd_resp_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 encap_resp_target_id;
+       __le16 encap_resp_cmpl_ring;
+       __le16 encap_resp_len;
+       u8 unused_0;
+       u8 unused_1;
+       __le64 encap_resp_addr;
+       __le32 encap_resp[24];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fwd_resp_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_fwd_async_event_cmpl */
+/* Input (32 bytes) */
+struct hwrm_fwd_async_event_cmpl_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 encap_async_event_target_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2[3];
+       u8 unused_3;
+       __le32 encap_async_event_cmpl[4];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fwd_async_event_cmpl_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_fw_reset */
+/* Input (24 bytes) */
+struct hwrm_fw_reset_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 embedded_proc_type;
+       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIMP              (0x0UL << 0)
+       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_APE                (0x1UL << 0)
+       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_KONG               (0x2UL << 0)
+       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BONO               (0x3UL << 0)
+       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_TANG               (0x4UL << 0)
+       u8 selfrst_status;
+       #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE    (0x0UL << 0)
+       #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP    (0x1UL << 0)
+       #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST         (0x2UL << 0)
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_reset_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 selfrst_status;
+       #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE           (0x0UL << 0)
+       #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP           (0x1UL << 0)
+       #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST       (0x2UL << 0)
+       u8 unused_0;
+       __le16 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_fw_qstatus */
+/* Input (24 bytes) */
+struct hwrm_fw_qstatus_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 embedded_proc_type;
+       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIMP    (0x0UL << 0)
+       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_APE              (0x1UL << 0)
+       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_KONG             (0x2UL << 0)
+       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BONO             (0x3UL << 0)
+       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_TANG             (0x4UL << 0)
+       u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_qstatus_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 selfrst_status;
+       #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE         (0x0UL << 0)
+       #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP         (0x1UL << 0)
+       #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST     (0x2UL << 0)
+       u8 unused_0;
+       __le16 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_temp_monitor_query */
+/* Input (16 bytes) */
+struct hwrm_temp_monitor_query_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_temp_monitor_query_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 temp;
+       u8 unused_0;
+       __le16 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
new file mode 100644 (file)
index 0000000..3cf3e1b
--- /dev/null
@@ -0,0 +1,59 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _BNXT_NVM_DEFS_H_
+#define _BNXT_NVM_DEFS_H_
+
+enum bnxt_nvm_directory_type {
+       BNX_DIR_TYPE_UNUSED = 0,
+       BNX_DIR_TYPE_PKG_LOG = 1,
+       BNX_DIR_TYPE_CHIMP_PATCH = 3,
+       BNX_DIR_TYPE_BOOTCODE = 4,
+       BNX_DIR_TYPE_VPD = 5,
+       BNX_DIR_TYPE_EXP_ROM_MBA = 6,
+       BNX_DIR_TYPE_AVS = 7,
+       BNX_DIR_TYPE_PCIE = 8,
+       BNX_DIR_TYPE_PORT_MACRO = 9,
+       BNX_DIR_TYPE_APE_FW = 10,
+       BNX_DIR_TYPE_APE_PATCH = 11,
+       BNX_DIR_TYPE_KONG_FW = 12,
+       BNX_DIR_TYPE_KONG_PATCH = 13,
+       BNX_DIR_TYPE_BONO_FW = 14,
+       BNX_DIR_TYPE_BONO_PATCH = 15,
+       BNX_DIR_TYPE_TANG_FW = 16,
+       BNX_DIR_TYPE_TANG_PATCH = 17,
+       BNX_DIR_TYPE_BOOTCODE_2 = 18,
+       BNX_DIR_TYPE_CCM = 19,
+       BNX_DIR_TYPE_PCI_CFG = 20,
+       BNX_DIR_TYPE_TSCF_UCODE = 21,
+       BNX_DIR_TYPE_ISCSI_BOOT = 22,
+       BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24,
+       BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25,
+       BNX_DIR_TYPE_ISCSI_BOOT_CFG6 = 26,
+       BNX_DIR_TYPE_EXT_PHY = 27,
+       BNX_DIR_TYPE_SHARED_CFG = 40,
+       BNX_DIR_TYPE_PORT_CFG = 41,
+       BNX_DIR_TYPE_FUNC_CFG = 42,
+       BNX_DIR_TYPE_MGMT_CFG = 48,
+       BNX_DIR_TYPE_MGMT_DATA = 49,
+       BNX_DIR_TYPE_MGMT_WEB_DATA = 50,
+       BNX_DIR_TYPE_MGMT_WEB_META = 51,
+       BNX_DIR_TYPE_MGMT_EVENT_LOG = 52,
+       BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53
+};
+
+#define BNX_DIR_ORDINAL_FIRST                  0
+
+#define BNX_DIR_EXT_INACTIVE                   (1 << 0)
+#define BNX_DIR_EXT_UPDATE                     (1 << 1)
+
+#define BNX_DIR_ATTR_NO_CHKSUM                 (1 << 0)
+#define BNX_DIR_ATTR_PROP_STREAM               (1 << 1)
+
+#endif                         /* Don't add anything after this line */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
new file mode 100644 (file)
index 0000000..60989e7
--- /dev/null
@@ -0,0 +1,816 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_sriov.h"
+#include "bnxt_ethtool.h"
+
+#ifdef CONFIG_BNXT_SRIOV
+static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
+{
+       if (bp->state != BNXT_STATE_OPEN) {
+               netdev_err(bp->dev, "vf ndo called though PF is down\n");
+               return -EINVAL;
+       }
+       if (!bp->pf.active_vfs) {
+               netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
+               return -EINVAL;
+       }
+       if (vf_id >= bp->pf.max_vfs) {
+               netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
+{
+       struct hwrm_func_cfg_input req = {0};
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_vf_info *vf;
+       bool old_setting = false;
+       u32 func_flags;
+       int rc;
+
+       rc = bnxt_vf_ndo_prep(bp, vf_id);
+       if (rc)
+               return rc;
+
+       vf = &bp->pf.vf[vf_id];
+       if (vf->flags & BNXT_VF_SPOOFCHK)
+               old_setting = true;
+       if (old_setting == setting)
+               return 0;
+
+       func_flags = vf->func_flags;
+       if (setting)
+               func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+       else
+               func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+       /*TODO: if the driver supports VLAN filter on guest VLAN,
+        * the spoof check should also include vlan anti-spoofing
+        */
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+       req.vf_id = cpu_to_le16(vf->fw_fid);
+       req.flags = cpu_to_le32(func_flags);
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc) {
+               vf->func_flags = func_flags;
+               if (setting)
+                       vf->flags |= BNXT_VF_SPOOFCHK;
+               else
+                       vf->flags &= ~BNXT_VF_SPOOFCHK;
+       }
+       return rc;
+}
+
+int bnxt_get_vf_config(struct net_device *dev, int vf_id,
+                      struct ifla_vf_info *ivi)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_vf_info *vf;
+       int rc;
+
+       rc = bnxt_vf_ndo_prep(bp, vf_id);
+       if (rc)
+               return rc;
+
+       ivi->vf = vf_id;
+       vf = &bp->pf.vf[vf_id];
+
+       memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+       ivi->max_tx_rate = vf->max_tx_rate;
+       ivi->min_tx_rate = vf->min_tx_rate;
+       ivi->vlan = vf->vlan;
+       ivi->qos = vf->flags & BNXT_VF_QOS;
+       ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK;
+       if (!(vf->flags & BNXT_VF_LINK_FORCED))
+               ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+       else if (vf->flags & BNXT_VF_LINK_UP)
+               ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+       else
+               ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+
+       return 0;
+}
+
+int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
+{
+       struct hwrm_func_cfg_input req = {0};
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_vf_info *vf;
+       int rc;
+
+       rc = bnxt_vf_ndo_prep(bp, vf_id);
+       if (rc)
+               return rc;
+       /* reject bc or mc mac addr, zero mac addr means allow
+        * VF to use its own mac addr
+        */
+       if (is_multicast_ether_addr(mac)) {
+               netdev_err(dev, "Invalid VF ethernet address\n");
+               return -EINVAL;
+       }
+       vf = &bp->pf.vf[vf_id];
+
+       memcpy(vf->mac_addr, mac, ETH_ALEN);
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+       req.vf_id = cpu_to_le16(vf->fw_fid);
+       req.flags = cpu_to_le32(vf->func_flags);
+       req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+       memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
+{
+       struct hwrm_func_cfg_input req = {0};
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_vf_info *vf;
+       u16 vlan_tag;
+       int rc;
+
+       rc = bnxt_vf_ndo_prep(bp, vf_id);
+       if (rc)
+               return rc;
+
+       /* TODO: needed to implement proper handling of user priority,
+        * currently fail the command if there is valid priority
+        */
+       if (vlan_id > 4095 || qos)
+               return -EINVAL;
+
+       vf = &bp->pf.vf[vf_id];
+       vlan_tag = vlan_id;
+       if (vlan_tag == vf->vlan)
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+       req.vf_id = cpu_to_le16(vf->fw_fid);
+       req.flags = cpu_to_le32(vf->func_flags);
+       req.dflt_vlan = cpu_to_le16(vlan_tag);
+       req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc)
+               vf->vlan = vlan_tag;
+       return rc;
+}
+
+int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
+                  int max_tx_rate)
+{
+       struct hwrm_func_cfg_input req = {0};
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_vf_info *vf;
+       u32 pf_link_speed;
+       int rc;
+
+       rc = bnxt_vf_ndo_prep(bp, vf_id);
+       if (rc)
+               return rc;
+
+       vf = &bp->pf.vf[vf_id];
+       pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+       if (max_tx_rate > pf_link_speed) {
+               netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
+                           max_tx_rate, vf_id);
+               return -EINVAL;
+       }
+
+       if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
+               netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
+                           min_tx_rate, vf_id);
+               return -EINVAL;
+       }
+       if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
+               return 0;
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+       req.vf_id = cpu_to_le16(vf->fw_fid);
+       req.flags = cpu_to_le32(vf->func_flags);
+       req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
+       req.max_bw = cpu_to_le32(max_tx_rate);
+       req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
+       req.min_bw = cpu_to_le32(min_tx_rate);
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc) {
+               vf->min_tx_rate = min_tx_rate;
+               vf->max_tx_rate = max_tx_rate;
+       }
+       return rc;
+}
+
+int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_vf_info *vf;
+       int rc;
+
+       rc = bnxt_vf_ndo_prep(bp, vf_id);
+       if (rc)
+               return rc;
+
+       vf = &bp->pf.vf[vf_id];
+
+       vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
+       switch (link) {
+       case IFLA_VF_LINK_STATE_AUTO:
+               vf->flags |= BNXT_VF_LINK_UP;
+               break;
+       case IFLA_VF_LINK_STATE_DISABLE:
+               vf->flags |= BNXT_VF_LINK_FORCED;
+               break;
+       case IFLA_VF_LINK_STATE_ENABLE:
+               vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
+               break;
+       default:
+               netdev_err(bp->dev, "Invalid link option\n");
+               rc = -EINVAL;
+               break;
+       }
+       /* CHIMP TODO: send msg to VF to update new link state */
+
+       return rc;
+}
+
+static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
+{
+       int i;
+       struct bnxt_vf_info *vf;
+
+       for (i = 0; i < num_vfs; i++) {
+               vf = &bp->pf.vf[i];
+               memset(vf, 0, sizeof(*vf));
+               vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP;
+       }
+       return 0;
+}
+
+static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp)
+{
+       int i, rc = 0;
+       struct bnxt_pf_info *pf = &bp->pf;
+       struct hwrm_func_vf_resc_free_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       for (i = pf->first_vf_id; i < pf->first_vf_id + pf->active_vfs; i++) {
+               req.vf_id = cpu_to_le16(i);
+               rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                       HWRM_CMD_TIMEOUT);
+               if (rc)
+                       break;
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static void bnxt_free_vf_resources(struct bnxt *bp)
+{
+       struct pci_dev *pdev = bp->pdev;
+       int i;
+
+       kfree(bp->pf.vf_event_bmap);
+       bp->pf.vf_event_bmap = NULL;
+
+       for (i = 0; i < 4; i++) {
+               if (bp->pf.hwrm_cmd_req_addr[i]) {
+                       dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
+                                         bp->pf.hwrm_cmd_req_addr[i],
+                                         bp->pf.hwrm_cmd_req_dma_addr[i]);
+                       bp->pf.hwrm_cmd_req_addr[i] = NULL;
+               }
+       }
+
+       kfree(bp->pf.vf);
+       bp->pf.vf = NULL;
+}
+
+static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
+{
+       struct pci_dev *pdev = bp->pdev;
+       u32 nr_pages, size, i, j, k = 0;
+
+       bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
+       if (!bp->pf.vf)
+               return -ENOMEM;
+
+       bnxt_set_vf_attr(bp, num_vfs);
+
+       size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
+       nr_pages = size / BNXT_PAGE_SIZE;
+       if (size & (BNXT_PAGE_SIZE - 1))
+               nr_pages++;
+
+       for (i = 0; i < nr_pages; i++) {
+               bp->pf.hwrm_cmd_req_addr[i] =
+                       dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
+                                          &bp->pf.hwrm_cmd_req_dma_addr[i],
+                                          GFP_KERNEL);
+
+               if (!bp->pf.hwrm_cmd_req_addr[i])
+                       return -ENOMEM;
+
+               for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
+                       struct bnxt_vf_info *vf = &bp->pf.vf[k];
+
+                       vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
+                                               j * BNXT_HWRM_REQ_MAX_SIZE;
+                       vf->hwrm_cmd_req_dma_addr =
+                               bp->pf.hwrm_cmd_req_dma_addr[i] + j *
+                               BNXT_HWRM_REQ_MAX_SIZE;
+                       k++;
+               }
+       }
+
+       /* Max 128 VF's */
+       bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
+       if (!bp->pf.vf_event_bmap)
+               return -ENOMEM;
+
+       bp->pf.hwrm_cmd_req_pages = nr_pages;
+       return 0;
+}
+
+static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
+{
+       struct hwrm_func_buf_rgtr_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
+
+       req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
+       req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
+       req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
+       req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
+       req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
+       req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
+       req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
+
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+/* only call by PF to reserve resources for VF */
+static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
+{
+       u32 rc = 0, mtu, i;
+       u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
+       struct hwrm_func_cfg_input req = {0};
+       struct bnxt_pf_info *pf = &bp->pf;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+
+       /* Remaining rings are distributed equally amongs VF's for now */
+       /* TODO: the following workaroud is needed to restrict total number
+        * of vf_cp_rings not exceed number of HW ring groups. This WA should
+        * be removed once new HWRM provides HW ring groups capability in
+        * hwrm_func_qcap.
+        */
+       vf_cp_rings = min_t(u16, bp->pf.max_cp_rings, bp->pf.max_stat_ctxs);
+       vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / *num_vfs;
+       /* TODO: restore this logic below once the WA above is removed */
+       /* vf_cp_rings = (bp->pf.max_cp_rings - bp->cp_nr_rings) / *num_vfs; */
+       vf_stat_ctx = (bp->pf.max_stat_ctxs - bp->num_stat_ctxs) / *num_vfs;
+       if (bp->flags & BNXT_FLAG_AGG_RINGS)
+               vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings * 2) /
+                             *num_vfs;
+       else
+               vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings) /
+                             *num_vfs;
+       vf_tx_rings = (bp->pf.max_tx_rings - bp->tx_nr_rings) / *num_vfs;
+
+       req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
+                                 FUNC_CFG_REQ_ENABLES_MRU |
+                                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
+                                 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+                                 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+                                 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
+                                 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
+                                 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
+                                 FUNC_CFG_REQ_ENABLES_NUM_VNICS);
+
+       mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+       req.mru = cpu_to_le16(mtu);
+       req.mtu = cpu_to_le16(mtu);
+
+       req.num_rsscos_ctxs = cpu_to_le16(1);
+       req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
+       req.num_tx_rings = cpu_to_le16(vf_tx_rings);
+       req.num_rx_rings = cpu_to_le16(vf_rx_rings);
+       req.num_l2_ctxs = cpu_to_le16(4);
+       vf_vnics = 1;
+
+       req.num_vnics = cpu_to_le16(vf_vnics);
+       /* FIXME spec currently uses 1 bit for stats ctx */
+       req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       for (i = 0; i < *num_vfs; i++) {
+               req.vf_id = cpu_to_le16(pf->first_vf_id + i);
+               rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                       HWRM_CMD_TIMEOUT);
+               if (rc)
+                       break;
+               bp->pf.active_vfs = i + 1;
+               bp->pf.vf[i].fw_fid = le16_to_cpu(req.vf_id);
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       if (!rc) {
+               bp->pf.max_pf_tx_rings = bp->tx_nr_rings;
+               if (bp->flags & BNXT_FLAG_AGG_RINGS)
+                       bp->pf.max_pf_rx_rings = bp->rx_nr_rings * 2;
+               else
+                       bp->pf.max_pf_rx_rings = bp->rx_nr_rings;
+       }
+       return rc;
+}
+
+static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
+{
+       int rc = 0, vfs_supported;
+       int min_rx_rings, min_tx_rings, min_rss_ctxs;
+       int tx_ok = 0, rx_ok = 0, rss_ok = 0;
+
+       /* Check if we can enable requested num of vf's. At a mininum
+        * we require 1 RX 1 TX rings for each VF. In this minimum conf
+        * features like TPA will not be available.
+        */
+       vfs_supported = *num_vfs;
+
+       while (vfs_supported) {
+               min_rx_rings = vfs_supported;
+               min_tx_rings = vfs_supported;
+               min_rss_ctxs = vfs_supported;
+
+               if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+                       if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
+                           min_rx_rings)
+                               rx_ok = 1;
+               } else {
+                       if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
+                           min_rx_rings)
+                               rx_ok = 1;
+               }
+
+               if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
+                       tx_ok = 1;
+
+               if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
+                       rss_ok = 1;
+
+               if (tx_ok && rx_ok && rss_ok)
+                       break;
+
+               vfs_supported--;
+       }
+
+       if (!vfs_supported) {
+               netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
+               return -EINVAL;
+       }
+
+       if (vfs_supported != *num_vfs) {
+               netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
+                           *num_vfs, vfs_supported);
+               *num_vfs = vfs_supported;
+       }
+
+       rc = bnxt_alloc_vf_resources(bp, *num_vfs);
+       if (rc)
+               goto err_out1;
+
+       /* Reserve resources for VFs */
+       rc = bnxt_hwrm_func_cfg(bp, num_vfs);
+       if (rc)
+               goto err_out2;
+
+       /* Register buffers for VFs */
+       rc = bnxt_hwrm_func_buf_rgtr(bp);
+       if (rc)
+               goto err_out2;
+
+       rc = pci_enable_sriov(bp->pdev, *num_vfs);
+       if (rc)
+               goto err_out2;
+
+       return 0;
+
+err_out2:
+       /* Free the resources reserved for various VF's */
+       bnxt_hwrm_func_vf_resource_free(bp);
+
+err_out1:
+       bnxt_free_vf_resources(bp);
+
+       return rc;
+}
+
+void bnxt_sriov_disable(struct bnxt *bp)
+{
+       if (!bp->pf.active_vfs)
+               return;
+
+       pci_disable_sriov(bp->pdev);
+
+       /* Free the resources reserved for various VF's */
+       bnxt_hwrm_func_vf_resource_free(bp);
+
+       bnxt_free_vf_resources(bp);
+
+       bp->pf.active_vfs = 0;
+       bp->pf.max_pf_rx_rings = bp->pf.max_rx_rings;
+       bp->pf.max_pf_tx_rings = bp->pf.max_tx_rings;
+}
+
+int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct bnxt *bp = netdev_priv(dev);
+
+       if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+               netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
+               return 0;
+       }
+
+       rtnl_lock();
+       if (!netif_running(dev)) {
+               netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
+               rtnl_unlock();
+               return 0;
+       }
+       bp->sriov_cfg = true;
+       rtnl_unlock();
+       if (!num_vfs) {
+               bnxt_sriov_disable(bp);
+               return 0;
+       }
+
+       /* Check if enabled VFs is same as requested */
+       if (num_vfs == bp->pf.active_vfs)
+               return 0;
+
+       bnxt_sriov_enable(bp, &num_vfs);
+
+       bp->sriov_cfg = false;
+       wake_up(&bp->sriov_cfg_wait);
+
+       return num_vfs;
+}
+
+static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+                             void *encap_resp, __le64 encap_resp_addr,
+                             __le16 encap_resp_cpr, u32 msg_size)
+{
+       int rc = 0;
+       struct hwrm_fwd_resp_input req = {0};
+       struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
+
+       /* Set the new target id */
+       req.target_id = cpu_to_le16(vf->fw_fid);
+       req.encap_resp_len = cpu_to_le16(msg_size);
+       req.encap_resp_addr = encap_resp_addr;
+       req.encap_resp_cmpl_ring = encap_resp_cpr;
+       memcpy(req.encap_resp, encap_resp, msg_size);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+       if (rc) {
+               netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
+               goto fwd_resp_exit;
+       }
+
+       if (resp->error_code) {
+               netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
+                          resp->error_code);
+               rc = -1;
+       }
+
+fwd_resp_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+                                 u32 msg_size)
+{
+       int rc = 0;
+       struct hwrm_reject_fwd_resp_input req = {0};
+       struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
+       /* Set the new target id */
+       req.target_id = cpu_to_le16(vf->fw_fid);
+       memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+       if (rc) {
+               netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
+               goto fwd_err_resp_exit;
+       }
+
+       if (resp->error_code) {
+               netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
+                          resp->error_code);
+               rc = -1;
+       }
+
+fwd_err_resp_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+                                  u32 msg_size)
+{
+       int rc = 0;
+       struct hwrm_exec_fwd_resp_input req = {0};
+       struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
+       /* Set the new target id */
+       req.target_id = cpu_to_le16(vf->fw_fid);
+       memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+       if (rc) {
+               netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
+               goto exec_fwd_resp_exit;
+       }
+
+       if (resp->error_code) {
+               netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
+                          resp->error_code);
+               rc = -1;
+       }
+
+exec_fwd_resp_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+       u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
+       struct hwrm_cfa_l2_filter_alloc_input *req =
+               (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
+
+       if (!is_valid_ether_addr(vf->mac_addr) ||
+           ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+               return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+       else
+               return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+}
+
+static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+       int rc = 0;
+
+       if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
+               /* real link */
+               rc = bnxt_hwrm_exec_fwd_resp(
+                       bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
+       } else {
+               struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+               struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
+
+               phy_qcfg_req =
+               (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
+               mutex_lock(&bp->hwrm_cmd_lock);
+               memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
+                      sizeof(phy_qcfg_resp));
+               mutex_unlock(&bp->hwrm_cmd_lock);
+               phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
+
+               if (vf->flags & BNXT_VF_LINK_UP) {
+                       /* if physical link is down, force link up on VF */
+                       if (phy_qcfg_resp.link ==
+                           PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
+                               phy_qcfg_resp.link =
+                                       PORT_PHY_QCFG_RESP_LINK_LINK;
+                               if (phy_qcfg_resp.auto_link_speed)
+                                       phy_qcfg_resp.link_speed =
+                                               phy_qcfg_resp.auto_link_speed;
+                               else
+                                       phy_qcfg_resp.link_speed =
+                                               phy_qcfg_resp.force_link_speed;
+                               phy_qcfg_resp.duplex =
+                                       PORT_PHY_QCFG_RESP_DUPLEX_FULL;
+                               phy_qcfg_resp.pause =
+                                       (PORT_PHY_QCFG_RESP_PAUSE_TX |
+                                        PORT_PHY_QCFG_RESP_PAUSE_RX);
+                       }
+               } else {
+                       /* force link down */
+                       phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
+                       phy_qcfg_resp.link_speed = 0;
+                       phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF;
+                       phy_qcfg_resp.pause = 0;
+               }
+               rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
+                                       phy_qcfg_req->resp_addr,
+                                       phy_qcfg_req->cmpl_ring,
+                                       sizeof(phy_qcfg_resp));
+       }
+       return rc;
+}
+
+static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+       int rc = 0;
+       struct hwrm_cmd_req_hdr *encap_req = vf->hwrm_cmd_req_addr;
+       u32 req_type = le32_to_cpu(encap_req->cmpl_ring_req_type) & 0xffff;
+
+       switch (req_type) {
+       case HWRM_CFA_L2_FILTER_ALLOC:
+               rc = bnxt_vf_validate_set_mac(bp, vf);
+               break;
+       case HWRM_FUNC_CFG:
+               /* TODO Validate if VF is allowed to change mac address,
+                * mtu, num of rings etc
+                */
+               rc = bnxt_hwrm_exec_fwd_resp(
+                       bp, vf, sizeof(struct hwrm_func_cfg_input));
+               break;
+       case HWRM_PORT_PHY_QCFG:
+               rc = bnxt_vf_set_link(bp, vf);
+               break;
+       default:
+               break;
+       }
+       return rc;
+}
+
+void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
+{
+       u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
+
+       /* Scan through VF's and process commands */
+       while (1) {
+               vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
+               if (vf_id >= active_vfs)
+                       break;
+
+               clear_bit(vf_id, bp->pf.vf_event_bmap);
+               bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
+               i = vf_id + 1;
+       }
+}
+
+void bnxt_update_vf_mac(struct bnxt *bp)
+{
+       struct hwrm_func_qcaps_input req = {0};
+       struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
+       req.fid = cpu_to_le16(0xffff);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+               goto update_vf_mac_exit;
+
+       if (!is_valid_ether_addr(resp->perm_mac_address))
+               goto update_vf_mac_exit;
+
+       if (ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
+               goto update_vf_mac_exit;
+
+       memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
+       memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+update_vf_mac_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+}
+
+#else
+
+void bnxt_sriov_disable(struct bnxt *bp)
+{
+}
+
+void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
+{
+       netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
+}
+
+void bnxt_update_vf_mac(struct bnxt *bp)
+{
+}
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
new file mode 100644 (file)
index 0000000..c151280
--- /dev/null
@@ -0,0 +1,23 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_SRIOV_H
+#define BNXT_SRIOV_H
+
+int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
+int bnxt_set_vf_mac(struct net_device *, int, u8 *);
+int bnxt_set_vf_vlan(struct net_device *, int, u16, u8);
+int bnxt_set_vf_bw(struct net_device *, int, int, int);
+int bnxt_set_vf_link_state(struct net_device *, int, int);
+int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
+int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
+void bnxt_sriov_disable(struct bnxt *);
+void bnxt_hwrm_exec_fwd_req(struct bnxt *);
+void bnxt_update_vf_mac(struct bnxt *);
+#endif
index 1a3988f51305859c8f17771dbe2f7de21f0481c3..50f63b7f3c3e4f97e9b0f456ad96c96c009e8300 100644 (file)
@@ -793,7 +793,6 @@ static void bcmgenet_get_drvinfo(struct net_device *dev,
 {
        strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
        strlcpy(info->version, "v2.0", sizeof(info->version));
-       info->n_stats = BCMGENET_STATS_LEN;
 }
 
 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
@@ -1832,6 +1831,24 @@ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
        bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
 }
 
+static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
+{
+       u32 int0_enable = 0;
+
+       /* Monitor cable plug/unplugged event for internal PHY, external PHY
+        * and MoCA PHY
+        */
+       if (priv->internal_phy) {
+               int0_enable |= UMAC_IRQ_LINK_EVENT;
+       } else if (priv->ext_phy) {
+               int0_enable |= UMAC_IRQ_LINK_EVENT;
+       } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+               if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+                       int0_enable |= UMAC_IRQ_LINK_EVENT;
+       }
+       bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+}
+
 static int init_umac(struct bcmgenet_priv *priv)
 {
        struct device *kdev = &priv->pdev->dev;
@@ -1872,15 +1889,8 @@ static int init_umac(struct bcmgenet_priv *priv)
        /* Enable Tx default queue 16 interrupts */
        int0_enable |= UMAC_IRQ_TXDMA_DONE;
 
-       /* Monitor cable plug/unplugged event for internal PHY */
-       if (priv->internal_phy) {
-               int0_enable |= UMAC_IRQ_LINK_EVENT;
-       } else if (priv->ext_phy) {
-               int0_enable |= UMAC_IRQ_LINK_EVENT;
-       } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
-               if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
-                       int0_enable |= UMAC_IRQ_LINK_EVENT;
-
+       /* Configure backpressure vectors for MoCA */
+       if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
                reg = bcmgenet_bp_mc_get(priv);
                reg |= BIT(priv->hw_params->bp_in_en_shift);
 
@@ -2794,6 +2804,9 @@ static void bcmgenet_netif_start(struct net_device *dev)
 
        netif_tx_start_all_queues(dev);
 
+       /* Monitor link interrupts now */
+       bcmgenet_link_intr_enable(priv);
+
        phy_start(priv->phydev);
 }
 
index 9b35d142f47accfbaec039f0d8100fb45d68bbf8..8fb84e69c30ec8ddfdf6be710cbfab0a3cd1aa60 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 config NET_VENDOR_CAVIUM
-       tristate "Cavium ethernet drivers"
+       bool "Cavium ethernet drivers"
        depends on PCI
        default y
        ---help---
index 29f33083178431ac3735094683663d1e4ab2b836..245c063ed4db0f5dcf70fce926cdd6d1108b01db 100644 (file)
@@ -153,7 +153,6 @@ lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
        strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
                ETHTOOL_FWVERS_LEN);
        strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
-       drvinfo->regdump_len = OCT_ETHTOOL_REGDUMP_LEN;
 }
 
 static void
index 02e4e028a647e2ab63871b67709fad8da477891c..a077f9476daf3583ff7ca5a60c406c3b4d6e43c7 100644 (file)
@@ -35,79 +35,79 @@ static void set_msglevel(struct net_device *dev, u32 val)
 }
 
 static const char stats_strings[][ETH_GSTRING_LEN] = {
-       "TxOctetsOK         ",
-       "TxFramesOK         ",
-       "TxBroadcastFrames  ",
-       "TxMulticastFrames  ",
-       "TxUnicastFrames    ",
-       "TxErrorFrames      ",
-
-       "TxFrames64         ",
-       "TxFrames65To127    ",
-       "TxFrames128To255   ",
-       "TxFrames256To511   ",
-       "TxFrames512To1023  ",
-       "TxFrames1024To1518 ",
-       "TxFrames1519ToMax  ",
-
-       "TxFramesDropped    ",
-       "TxPauseFrames      ",
-       "TxPPP0Frames       ",
-       "TxPPP1Frames       ",
-       "TxPPP2Frames       ",
-       "TxPPP3Frames       ",
-       "TxPPP4Frames       ",
-       "TxPPP5Frames       ",
-       "TxPPP6Frames       ",
-       "TxPPP7Frames       ",
-
-       "RxOctetsOK         ",
-       "RxFramesOK         ",
-       "RxBroadcastFrames  ",
-       "RxMulticastFrames  ",
-       "RxUnicastFrames    ",
-
-       "RxFramesTooLong    ",
-       "RxJabberErrors     ",
-       "RxFCSErrors        ",
-       "RxLengthErrors     ",
-       "RxSymbolErrors     ",
-       "RxRuntFrames       ",
-
-       "RxFrames64         ",
-       "RxFrames65To127    ",
-       "RxFrames128To255   ",
-       "RxFrames256To511   ",
-       "RxFrames512To1023  ",
-       "RxFrames1024To1518 ",
-       "RxFrames1519ToMax  ",
-
-       "RxPauseFrames      ",
-       "RxPPP0Frames       ",
-       "RxPPP1Frames       ",
-       "RxPPP2Frames       ",
-       "RxPPP3Frames       ",
-       "RxPPP4Frames       ",
-       "RxPPP5Frames       ",
-       "RxPPP6Frames       ",
-       "RxPPP7Frames       ",
-
-       "RxBG0FramesDropped ",
-       "RxBG1FramesDropped ",
-       "RxBG2FramesDropped ",
-       "RxBG3FramesDropped ",
-       "RxBG0FramesTrunc   ",
-       "RxBG1FramesTrunc   ",
-       "RxBG2FramesTrunc   ",
-       "RxBG3FramesTrunc   ",
-
-       "TSO                ",
-       "TxCsumOffload      ",
-       "RxCsumGood         ",
-       "VLANextractions    ",
-       "VLANinsertions     ",
-       "GROpackets         ",
-       "GROmerged          ",
+       "tx_octets_ok           ",
+       "tx_frames_ok           ",
+       "tx_broadcast_frames    ",
+       "tx_multicast_frames    ",
+       "tx_unicast_frames      ",
+       "tx_error_frames        ",
+
+       "tx_frames_64           ",
+       "tx_frames_65_to_127    ",
+       "tx_frames_128_to_255   ",
+       "tx_frames_256_to_511   ",
+       "tx_frames_512_to_1023  ",
+       "tx_frames_1024_to_1518 ",
+       "tx_frames_1519_to_max  ",
+
+       "tx_frames_dropped      ",
+       "tx_pause_frames        ",
+       "tx_ppp0_frames         ",
+       "tx_ppp1_frames         ",
+       "tx_ppp2_frames         ",
+       "tx_ppp3_frames         ",
+       "tx_ppp4_frames         ",
+       "tx_ppp5_frames         ",
+       "tx_ppp6_frames         ",
+       "tx_ppp7_frames         ",
+
+       "rx_octets_ok           ",
+       "rx_frames_ok           ",
+       "rx_broadcast_frames    ",
+       "rx_multicast_frames    ",
+       "rx_unicast_frames      ",
+
+       "rx_frames_too_long     ",
+       "rx_jabber_errors       ",
+       "rx_fcs_errors          ",
+       "rx_length_errors       ",
+       "rx_symbol_errors       ",
+       "rx_runt_frames         ",
+
+       "rx_frames_64           ",
+       "rx_frames_65_to_127    ",
+       "rx_frames_128_to_255   ",
+       "rx_frames_256_to_511   ",
+       "rx_frames_512_to_1023  ",
+       "rx_frames_1024_to_1518 ",
+       "rx_frames_1519_to_max  ",
+
+       "rx_pause_frames        ",
+       "rx_ppp0_frames         ",
+       "rx_ppp1_frames         ",
+       "rx_ppp2_frames         ",
+       "rx_ppp3_frames         ",
+       "rx_ppp4_frames         ",
+       "rx_ppp5_frames         ",
+       "rx_ppp6_frames         ",
+       "rx_ppp7_frames         ",
+
+       "rx_bg0_frames_dropped  ",
+       "rx_bg1_frames_dropped  ",
+       "rx_bg2_frames_dropped  ",
+       "rx_bg3_frames_dropped  ",
+       "rx_bg0_frames_trunc    ",
+       "rx_bg1_frames_trunc    ",
+       "rx_bg2_frames_trunc    ",
+       "rx_bg3_frames_trunc    ",
+
+       "tso                    ",
+       "tx_csum_offload        ",
+       "rx_csum_good           ",
+       "vlan_extractions       ",
+       "vlan_insertions        ",
+       "gro_packets            ",
+       "gro_merged             ",
 };
 
 static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
@@ -211,8 +211,11 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
                sizeof(info->version));
        strlcpy(info->bus_info, pci_name(adapter->pdev),
                sizeof(info->bus_info));
+       info->regdump_len = get_regs_len(dev);
 
-       if (adapter->params.fw_vers)
+       if (!adapter->params.fw_vers)
+               strcpy(info->fw_version, "N/A");
+       else
                snprintf(info->fw_version, sizeof(info->fw_version),
                         "%u.%u.%u.%u, TP %u.%u.%u.%u",
                         FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
@@ -612,6 +615,8 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        struct port_info *p = netdev_priv(dev);
        struct link_config *lc = &p->link_cfg;
        u32 speed = ethtool_cmd_speed(cmd);
+       struct link_config old_lc;
+       int ret;
 
        if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
                return -EINVAL;
@@ -626,13 +631,11 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                return -EINVAL;
        }
 
+       old_lc = *lc;
        if (cmd->autoneg == AUTONEG_DISABLE) {
                cap = speed_to_caps(speed);
 
-               if (!(lc->supported & cap) ||
-                   (speed == 1000) ||
-                   (speed == 10000) ||
-                   (speed == 40000))
+               if (!(lc->supported & cap))
                        return -EINVAL;
                lc->requested_speed = cap;
                lc->advertising = 0;
@@ -645,10 +648,14 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        }
        lc->autoneg = cmd->autoneg;
 
-       if (netif_running(dev))
-               return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
-                                    lc);
-       return 0;
+       /* If the firmware rejects the Link Configuration request, back out
+        * the changes and report the error.
+        */
+       ret = t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan, lc);
+       if (ret)
+               *lc = old_lc;
+
+       return ret;
 }
 
 static void get_pauseparam(struct net_device *dev,
@@ -847,7 +854,7 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
 {
        int i, err = 0;
        struct adapter *adapter = netdev2adap(dev);
-       u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
+       u8 *buf = t4_alloc_mem(EEPROMSIZE);
 
        if (!buf)
                return -ENOMEM;
@@ -858,7 +865,7 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
 
        if (!err)
                memcpy(data, buf + e->offset, e->len);
-       kfree(buf);
+       t4_free_mem(buf);
        return err;
 }
 
@@ -887,7 +894,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
        if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
                /* RMW possibly needed for first or last words.
                 */
-               buf = kmalloc(aligned_len, GFP_KERNEL);
+               buf = t4_alloc_mem(aligned_len);
                if (!buf)
                        return -ENOMEM;
                err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
@@ -915,7 +922,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
                err = t4_seeprom_wp(adapter, true);
 out:
        if (buf != data)
-               kfree(buf);
+               t4_free_mem(buf);
        return err;
 }
 
@@ -1011,11 +1018,15 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
        if (!p)
                return 0;
 
-       for (i = 0; i < pi->rss_size; i++)
-               pi->rss[i] = p[i];
-       if (pi->adapter->flags & FULL_INIT_DONE)
+       /* Interface must be brought up atleast once */
+       if (pi->adapter->flags & FULL_INIT_DONE) {
+               for (i = 0; i < pi->rss_size; i++)
+                       pi->rss[i] = p[i];
+
                return cxgb4_write_rss(pi, pi->rss);
-       return 0;
+       }
+
+       return -EPERM;
 }
 
 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
index c29227ee9ee8953b4d1bdb27b44373b02e9d93c5..2cf81857a2971b280005715992c0842e4f21385f 100644 (file)
@@ -83,7 +83,7 @@ char cxgb4_driver_name[] = KBUILD_MODNAME;
 #endif
 #define DRV_VERSION "2.0.0-ko"
 const char cxgb4_driver_version[] = DRV_VERSION;
-#define DRV_DESC "Chelsio T4/T5 Network Driver"
+#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
 
 /* Host shadow copy of ingress filter entry.  This is in host native format
  * and doesn't match the ordering or bit order, etc. of the hardware of the
@@ -151,6 +151,7 @@ MODULE_VERSION(DRV_VERSION);
 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
 MODULE_FIRMWARE(FW4_FNAME);
 MODULE_FIRMWARE(FW5_FNAME);
+MODULE_FIRMWARE(FW6_FNAME);
 
 /*
  * Normally we're willing to become the firmware's Master PF but will be happy
@@ -4485,6 +4486,10 @@ static int enable_msix(struct adapter *adap)
        }
        for (i = 0; i < allocated; ++i)
                adap->msix_info[i].vec = entries[i].vector;
+       dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
+                "nic %d iscsi %d rdma cpl %d rdma ciq %d\n",
+                allocated, s->max_ethqsets, s->ofldqsets, s->rdmaqs,
+                s->rdmaciqs);
 
        kfree(entries);
        return 0;
index b2b5e5bbe04c5f3b307a79ba9371bd017d9b1147..0cfa5d72cafd4027a1f06593bfc3f63c0703ba49 100644 (file)
@@ -56,7 +56,7 @@
  * Generic information about the driver.
  */
 #define DRV_VERSION "2.0.0-ko"
-#define DRV_DESC "Chelsio T4/T5 Virtual Function (VF) Network Driver"
+#define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
 
 /*
  * Module Parameters.
index a02ecc4f90022e70026e648e2ae3ad1cfc2b53e3..cadcee645f74e8fe45a6f34e2b7e0bf949ea14e7 100644 (file)
@@ -1597,7 +1597,6 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
        strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
-       info->eedump_len = DE_EEPROM_SIZE;
 }
 
 static int de_get_regs_len(struct net_device *dev)
index 821540913343db10f59ae3a03835a084ca82063d..d463563e1f7039ee5176ca36abfdc6bae3f2ed46 100644 (file)
@@ -592,6 +592,7 @@ struct be_adapter {
        int be_get_temp_freq;
        struct be_hwmon hwmon_info;
        u8 pf_number;
+       u8 pci_func_num;
        struct rss_info rss_info;
        /* Filters for packets that need to be sent to BMC */
        u32 bmc_filt_mask;
index eb323913cd39fb981a8c0cc02140c0c7205ee4f8..1795c935ff023fcf795008a49a9a4cd0fce63d9c 100644 (file)
@@ -851,8 +851,10 @@ static int be_cmd_notify_wait(struct be_adapter *adapter,
                return status;
 
        dest_wrb = be_cmd_copy(adapter, wrb);
-       if (!dest_wrb)
-               return -EBUSY;
+       if (!dest_wrb) {
+               status = -EBUSY;
+               goto unlock;
+       }
 
        if (use_mcc(adapter))
                status = be_mcc_notify_wait(adapter);
@@ -862,6 +864,7 @@ static int be_cmd_notify_wait(struct be_adapter *adapter,
        if (!status)
                memcpy(wrb, dest_wrb, sizeof(*wrb));
 
+unlock:
        be_cmd_unlock(adapter);
        return status;
 }
@@ -1984,6 +1987,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
                         be_if_cap_flags(adapter));
        }
        flags &= be_if_cap_flags(adapter);
+       if (!flags)
+               return -ENOTSUPP;
 
        return __be_cmd_rx_filter(adapter, flags, value);
 }
@@ -2887,6 +2892,7 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
        if (!status) {
                attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
                adapter->hba_port_num = attribs->hba_attribs.phy_port;
+               adapter->pci_func_num = attribs->pci_func_num;
                serial_num = attribs->hba_attribs.controller_serial_number;
                for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
                        adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
@@ -3709,7 +3715,6 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
                        status = -EINVAL;
                        goto err;
                }
-
                adapter->pf_number = desc->pf_num;
                be_copy_nic_desc(res, desc);
        }
@@ -3721,7 +3726,10 @@ err:
        return status;
 }
 
-/* Will use MBOX only if MCCQ has not been created */
+/* Will use MBOX only if MCCQ has not been created
+ * non-zero domain => a PF is querying this on behalf of a VF
+ * zero domain => a PF or a VF is querying this for itself
+ */
 int be_cmd_get_profile_config(struct be_adapter *adapter,
                              struct be_resources *res, u8 query, u8 domain)
 {
@@ -3748,10 +3756,15 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
                               OPCODE_COMMON_GET_PROFILE_CONFIG,
                               cmd.size, &wrb, &cmd);
 
-       req->hdr.domain = domain;
        if (!lancer_chip(adapter))
                req->hdr.version = 1;
        req->type = ACTIVE_PROFILE_TYPE;
+       /* When a function is querying profile information relating to
+        * itself hdr.pf_number must be set to it's pci_func_num + 1
+        */
+       req->hdr.domain = domain;
+       if (domain == 0)
+               req->hdr.pf_num = adapter->pci_func_num + 1;
 
        /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
         * descriptors with all bits set to "1" for the fields which can be
@@ -3921,12 +3934,16 @@ static void be_fill_vf_res_template(struct be_adapter *adapter,
                        vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
                                             BE_IF_FLAGS_DEFQ_RSS);
                }
-
-               nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
        } else {
                num_vf_qs = 1;
        }
 
+       if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
+               nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
+               vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
+       }
+
+       nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
        nic_vft->rq_count = cpu_to_le16(num_vf_qs);
        nic_vft->txq_count = cpu_to_le16(num_vf_qs);
        nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
index 7d178bdb112eb7d14d5a62d74ea21ecaa30ba0e9..91155ea74f342e2663f18848c9fe5546635e7c05 100644 (file)
@@ -289,7 +289,9 @@ struct be_cmd_req_hdr {
        u32 timeout;            /* dword 1 */
        u32 request_length;     /* dword 2 */
        u8 version;             /* dword 3 */
-       u8 rsvd[3];             /* dword 3 */
+       u8 rsvd1;               /* dword 3 */
+       u8 pf_num;              /* dword 3 */
+       u8 rsvd2;               /* dword 3 */
 };
 
 #define RESP_HDR_INFO_OPCODE_SHIFT     0       /* bits 0 - 7 */
@@ -1652,7 +1654,11 @@ struct mgmt_hba_attribs {
 
 struct mgmt_controller_attrib {
        struct mgmt_hba_attribs hba_attribs;
-       u32 rsvd0[10];
+       u32 rsvd0[2];
+       u16 rsvd1;
+       u8 pci_func_num;
+       u8 rsvd2;
+       u32 rsvd3[7];
 } __packed;
 
 struct be_cmd_req_cntl_attribs {
index 2c9ed1710ba6f4c16d8a3d800602b6d2337abb59..f4cb8e425853a3fd85151d3b6df5e7651b95f81a 100644 (file)
@@ -234,9 +234,6 @@ static void be_get_drvinfo(struct net_device *netdev,
 
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
-       drvinfo->testinfo_len = 0;
-       drvinfo->regdump_len = 0;
-       drvinfo->eedump_len = 0;
 }
 
 static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
index 7bf51a1a0a77e12a4812662767dd21767813dc1b..eb48a977f8daabe78d6d5b6a94f6607bf19287b8 100644 (file)
@@ -1123,11 +1123,12 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
                                           struct sk_buff *skb,
                                           struct be_wrb_params *wrb_params)
 {
-       /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
-        * less may cause a transmit stall on that port. So the work-around is
-        * to pad short packets (<= 32 bytes) to a 36-byte length.
+       /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
+        * packets that are 32b or less may cause a transmit stall
+        * on that port. The workaround is to pad such packets
+        * (len <= 32 bytes) to a minimum length of 36b.
         */
-       if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
+       if (skb->len <= 32) {
                if (skb_put_padto(skb, 36))
                        return NULL;
        }
@@ -4205,10 +4206,6 @@ static int be_get_config(struct be_adapter *adapter)
        int status, level;
        u16 profile_id;
 
-       status = be_cmd_get_cntl_attributes(adapter);
-       if (status)
-               return status;
-
        status = be_cmd_query_fw_cfg(adapter);
        if (status)
                return status;
@@ -4407,6 +4404,11 @@ static int be_setup(struct be_adapter *adapter)
        if (!lancer_chip(adapter))
                be_cmd_req_native_mode(adapter);
 
+       /* Need to invoke this cmd first to get the PCI Function Number */
+       status = be_cmd_get_cntl_attributes(adapter);
+       if (status)
+               return status;
+
        if (!BE2_chip(adapter) && be_physfn(adapter))
                be_alloc_sriov_res(adapter);
 
@@ -4999,7 +5001,15 @@ static bool be_check_ufi_compatibility(struct be_adapter *adapter,
                return false;
        }
 
-       return (fhdr->asic_type_rev >= adapter->asic_rev);
+       /* In BE3 FW images the "asic_type_rev" field doesn't track the
+        * asic_rev of the chips it is compatible with.
+        * When asic_type_rev is 0 the image is compatible only with
+        * pre-BE3-R chips (asic_rev < 0x10)
+        */
+       if (BEx_chip(adapter) && fhdr->asic_type_rev == 0)
+               return adapter->asic_rev < 0x10;
+       else
+               return (fhdr->asic_type_rev >= adapter->asic_rev);
 }
 
 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
index 3c40f6b9922436a32d255aa627e9d94db45f8477..55c36230e17634c3e063bdb20f4bb6a896bce4a6 100644 (file)
@@ -198,17 +198,28 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
 
 #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
 /*
+ * Return the TBIPA address, starting from the address
+ * of the mapped GFAR MDIO registers (struct gfar)
  * This is mildly evil, but so is our hardware for doing this.
  * Also, we have to cast back to struct gfar because of
  * definition weirdness done in gianfar.h.
  */
-static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
+static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p)
 {
        struct gfar __iomem *enet_regs = p;
 
        return &enet_regs->tbipa;
 }
 
+/*
+ * Return the TBIPA address, starting from the address
+ * of the mapped GFAR MII registers (gfar_mii_regs[] within struct gfar)
+ */
+static uint32_t __iomem *get_gfar_tbipa_from_mii(void __iomem *p)
+{
+       return get_gfar_tbipa_from_mdio(container_of(p, struct gfar, gfar_mii_regs));
+}
+
 /*
  * Return the TBIPAR address for an eTSEC2 node
  */
@@ -220,11 +231,12 @@ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
 
 #if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
 /*
- * Return the TBIPAR address for a QE MDIO node
+ * Return the TBIPAR address for a QE MDIO node, starting from the address
+ * of the mapped MII registers (struct fsl_pq_mii)
  */
 static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
 {
-       struct fsl_pq_mdio __iomem *mdio = p;
+       struct fsl_pq_mdio __iomem *mdio = container_of(p, struct fsl_pq_mdio, mii);
 
        return &mdio->utbipar;
 }
@@ -300,14 +312,14 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
                .compatible = "fsl,gianfar-tbi",
                .data = &(struct fsl_pq_mdio_data) {
                        .mii_offset = 0,
-                       .get_tbipa = get_gfar_tbipa,
+                       .get_tbipa = get_gfar_tbipa_from_mii,
                },
        },
        {
                .compatible = "fsl,gianfar-mdio",
                .data = &(struct fsl_pq_mdio_data) {
                        .mii_offset = 0,
-                       .get_tbipa = get_gfar_tbipa,
+                       .get_tbipa = get_gfar_tbipa_from_mii,
                },
        },
        {
@@ -315,7 +327,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
                .compatible = "gianfar",
                .data = &(struct fsl_pq_mdio_data) {
                        .mii_offset = offsetof(struct fsl_pq_mdio, mii),
-                       .get_tbipa = get_gfar_tbipa,
+                       .get_tbipa = get_gfar_tbipa_from_mdio,
                },
        },
        {
@@ -445,6 +457,16 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
 
                        tbipa = data->get_tbipa(priv->map);
 
+                       /*
+                        * Add consistency check to make sure TBI is contained
+                        * within the mapped range (not because we would get a
+                        * segfault, rather to catch bugs in computing TBI
+                        * address). Print error message but continue anyway.
+                        */
+                       if ((void *)tbipa > priv->map + resource_size(&res) - 4)
+                               dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n",
+                                       ((void *)tbipa - priv->map) + 4);
+
                        iowrite32be(be32_to_cpup(prop), tbipa);
                }
        }
index 7f5389c3c0cf62dd19cc89f90f98784ecfb8999d..47f0400ad02060e603be521ba60a5fa81accff02 100644 (file)
 
 #include "gianfar.h"
 
-#define TX_TIMEOUT      (1*HZ)
+#define TX_TIMEOUT      (5*HZ)
 
 const char gfar_driver_version[] = "2.0";
 
index fb7f8d67aef43f401b3bf78369b3c07ff8a478ec..928ca2bdd238e46e735da4c4a068e7f01e630fed 100644 (file)
@@ -182,8 +182,6 @@ static void gfar_gdrvinfo(struct net_device *dev,
                sizeof(drvinfo->version));
        strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
-       drvinfo->regdump_len = 0;
-       drvinfo->eedump_len = 0;
 }
 
 
index cc83350d56ba1c05aa7eb619b607635f76de6400..89714f5e0dfc57b2246affedf002e19da0d17c3c 100644 (file)
@@ -351,8 +351,6 @@ uec_get_drvinfo(struct net_device *netdev,
        strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
        strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
-       drvinfo->eedump_len = 0;
-       drvinfo->regdump_len = uec_get_regs_len(netdev);
 }
 
 #ifdef CONFIG_PM
index 8d12b587809eecbeb67f953993f1895a6abb6c34..f250dec488fd2a2b908a0cb67bb2df61c53ac7ee 100644 (file)
@@ -5,7 +5,7 @@
 config NET_VENDOR_HISILICON
        bool "Hisilicon devices"
        default y
-       depends on ARM || ARM64
+       depends on OF && (ARM || ARM64 || COMPILE_TEST)
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y.
 
index a5e077eac99a3b8292fd5355c142733dccbf8f89..e51892d518ff3ccca8aaed43838d7343b4844dc3 100644 (file)
@@ -371,7 +371,7 @@ static void hix5hd2_port_enable(struct hix5hd2_priv *priv)
 
 static void hix5hd2_port_disable(struct hix5hd2_priv *priv)
 {
-       writel_relaxed(~(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN);
+       writel_relaxed(~(u32)(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN);
        writel_relaxed(0, priv->base + DESC_WR_RD_ENA);
 }
 
index f52e99acf46342afcb657af10e59df5ae6d18fe4..b3645297477e53309918e6762c44a6bfde031880 100644 (file)
@@ -436,60 +436,10 @@ void hnae_ae_unregister(struct hnae_ae_dev *hdev)
 }
 EXPORT_SYMBOL(hnae_ae_unregister);
 
-static ssize_t handles_show(struct device *dev,
-                           struct device_attribute *attr, char *buf)
-{
-       ssize_t s = 0;
-       struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
-       struct hnae_handle *h;
-       int i = 0, j;
-
-       list_for_each_entry_rcu(h, &hdev->handle_list, node) {
-               s += sprintf(buf + s, "handle %d (eport_id=%u from %s):\n",
-                           i++, h->eport_id, h->dev->name);
-               for (j = 0; j < h->q_num; j++) {
-                       s += sprintf(buf + s, "\tqueue[%d] on %p\n",
-                                    j, h->qs[i]->io_base);
-#define HANDEL_TX_MSG "\t\ttx_ring on %p:%u,%u,%u,%u,%u,%llu,%llu\n"
-                       s += sprintf(buf + s,
-                                    HANDEL_TX_MSG,
-                                    h->qs[i]->tx_ring.io_base,
-                                    h->qs[i]->tx_ring.buf_size,
-                                    h->qs[i]->tx_ring.desc_num,
-                                    h->qs[i]->tx_ring.max_desc_num_per_pkt,
-                                    h->qs[i]->tx_ring.max_raw_data_sz_per_desc,
-                                    h->qs[i]->tx_ring.max_pkt_size,
-                                h->qs[i]->tx_ring.stats.sw_err_cnt,
-                                h->qs[i]->tx_ring.stats.io_err_cnt);
-                       s += sprintf(buf + s,
-                               "\t\trx_ring on %p:%u,%u,%llu,%llu,%llu\n",
-                               h->qs[i]->rx_ring.io_base,
-                               h->qs[i]->rx_ring.buf_size,
-                               h->qs[i]->rx_ring.desc_num,
-                               h->qs[i]->rx_ring.stats.sw_err_cnt,
-                               h->qs[i]->rx_ring.stats.io_err_cnt,
-                               h->qs[i]->rx_ring.stats.seg_pkt_cnt);
-               }
-       }
-
-       return s;
-}
-
-static DEVICE_ATTR_RO(handles);
-static struct attribute *hnae_class_attrs[] = {
-       &dev_attr_handles.attr,
-       NULL,
-};
-ATTRIBUTE_GROUPS(hnae_class);
-
 static int __init hnae_init(void)
 {
        hnae_class = class_create(THIS_MODULE, "hnae");
-       if (IS_ERR(hnae_class))
-               return PTR_ERR(hnae_class);
-
-       hnae_class->dev_groups = hnae_class_groups;
-       return 0;
+       return PTR_ERR_OR_ZERO(hnae_class);
 }
 
 static void __exit hnae_exit(void)
index d4a1eb1b8e54c559dd51d12c9b347651a5d71029..cec95ac8687df423227e870584500dc7bfd0baa9 100644 (file)
@@ -430,6 +430,7 @@ struct hnae_ae_ops {
        void (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
        int (*set_coalesce_frames)(struct hnae_handle *handle,
                                   u32 coalesce_frames);
+       void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
        int (*get_mac_addr)(struct hnae_handle *handle, void **p);
        int (*set_mac_addr)(struct hnae_handle *handle, void *p);
        int (*set_mc_addr)(struct hnae_handle *handle, void *addr);
index a2c72f84e397b29f5eeb470cb7bbae5291209f70..1a16c0307b475bdaaf4a525562b7fb2d474c382b 100644 (file)
@@ -392,6 +392,11 @@ static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable)
        return hns_mac_set_autoneg(hns_get_mac_cb(handle), enable);
 }
 
+static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en)
+{
+       hns_dsaf_set_promisc_mode(hns_ae_get_dsaf_dev(handle->dev), en);
+}
+
 static int hns_ae_get_autoneg(struct hnae_handle *handle)
 {
        u32     auto_neg;
@@ -748,6 +753,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
        .get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames,
        .set_coalesce_usecs = hns_ae_set_coalesce_usecs,
        .set_coalesce_frames = hns_ae_set_coalesce_frames,
+       .set_promisc_mode = hns_ae_set_promisc_mode,
        .set_mac_addr = hns_ae_set_mac_address,
        .set_mc_addr = hns_ae_set_multicast_one,
        .set_mtu = hns_ae_set_mtu,
index 95bf42aae24ce44f90c1a511e8e4582fc987311c..026b38676cbaf208615549859c61d1ff5042474e 100644 (file)
@@ -179,7 +179,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
                        return -EINVAL;
                }
        } else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) {
-               if (mac_cb->mac_id <= DSAF_MAX_PORT_NUM_PER_CHIP) {
+               if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM_PER_CHIP) {
                        dev_err(mac_cb->dev,
                                "input invalid,%s mac%d vmid%d!\n",
                                mac_cb->dsaf_dev->ae_dev.name,
@@ -744,9 +744,11 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx)
        mac_cb->serdes_vaddr = dsaf_dev->sds_base;
 
        if (dsaf_dev->cpld_base &&
-           mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
+           mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
                mac_cb->cpld_vaddr = dsaf_dev->cpld_base +
                        mac_cb->mac_id * CPLD_ADDR_PORT_OFFSET;
+               cpld_led_reset(mac_cb);
+       }
        mac_cb->sfp_prsnt = 0;
        mac_cb->txpkt_for_led = 0;
        mac_cb->rxpkt_for_led = 0;
index 26ae6c64d74cd5bd550700a379f2f6d6c136e685..2a98eba660c06a4dc3736bf84686a20d87597e20 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/device.h>
+#include <linux/vmalloc.h>
+
 #include "hns_dsaf_main.h"
 #include "hns_dsaf_rcb.h"
 #include "hns_dsaf_ppe.h"
@@ -217,6 +219,25 @@ hns_dsaf_ppe_qid_cfg(struct dsaf_device *dsaf_dev, u32 qid_cfg)
        }
 }
 
+static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev)
+{
+       u16 max_q_per_vf, max_vfn;
+       u32 q_id, q_num_per_port;
+       u32 i;
+
+       hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
+                              HNS_DSAF_COMM_SERVICE_NW_IDX,
+                              &max_vfn, &max_q_per_vf);
+       q_num_per_port = max_vfn * max_q_per_vf;
+
+       for (i = 0, q_id = 0; i < DSAF_SERVICE_NW_NUM; i++) {
+               dsaf_set_dev_field(dsaf_dev,
+                                  DSAF_MIX_DEF_QID_0_REG + 0x0004 * i,
+                                  0xff, 0, q_id);
+               q_id += q_num_per_port;
+       }
+}
+
 /**
  * hns_dsaf_sw_port_type_cfg - cfg sw type
  * @dsaf_id: dsa fabric id
@@ -592,6 +613,11 @@ static void hns_dsaf_tbl_tcam_data_ucast_pul(
        dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
 }
 
+void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
+{
+       dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en);
+}
+
 /**
  * hns_dsaf_tbl_stat_en - tbl
  * @dsaf_id: dsa fabric id
@@ -920,6 +946,9 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
        /* set 22 queue per tx ppe engine, only used in switch mode */
        hns_dsaf_ppe_qid_cfg(dsaf_dev, DSAF_DEFAUTL_QUEUE_NUM_PER_PPE);
 
+       /* set promisc def queue id */
+       hns_dsaf_mix_def_qid_cfg(dsaf_dev);
+
        /* in non switch mode, set all port to access mode */
        hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN);
 
index 315b07ecd2916c8b14012482dde922fe04d4c22d..b2b93484995ca776057bd1736ab5f34af55a00a2 100644 (file)
@@ -423,5 +423,6 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port);
 
 void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
 int hns_dsaf_get_regs_count(void);
+void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en);
 
 #endif /* __HNS_DSAF_MAIN_H__ */
index d611388aecef17f3721cfd67c11194398f47c429..523e9b83d30429c7e39884c4503d55c4bca38210 100644 (file)
@@ -64,17 +64,10 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb,
        switch (status) {
        case HNAE_LED_ACTIVE:
                mac_cb->cpld_led_value = dsaf_read_b(mac_cb->cpld_vaddr);
-               return 2;
-       case HNAE_LED_ON:
                dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
                             CPLD_LED_ON_VALUE);
                dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
-               break;
-       case HNAE_LED_OFF:
-               dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
-                            CPLD_LED_DEFAULT_VALUE);
-               dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
-               break;
+               return 2;
        case HNAE_LED_INACTIVE:
                dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
                             CPLD_LED_DEFAULT_VALUE);
index 05ea244d999c3bd946355acbe7361d95b108dff4..4db32c62f06242c2c80a199fe5a1c0565da70836 100644 (file)
@@ -575,8 +575,8 @@ int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev,
  *@max_vfn : max vfn number
  *@max_q_per_vf:max ring number per vm
  */
-static void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
-                                  u16 *max_vfn, u16 *max_q_per_vf)
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
+                           u16 *max_vfn, u16 *max_q_per_vf)
 {
        if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
                switch (dsaf_mode) {
index c7db6130a3cfae4230c7c5ecb22fdac51f58d53e..3a2afe2dd8bba944bd3a05ec9f285fce15015d57 100644 (file)
@@ -107,6 +107,8 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
 void hns_rcb_start(struct hnae_queue *q, u32 val);
 void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
 void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common);
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
+                           u16 *max_vfn, u16 *max_q_per_vf);
 
 void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val);
 void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag);
index dab5ecf382a0515cb387300ff42f9a2949f3cfea..802d55457f19ff7c31d6c01e0bbe6a86942ad945 100644 (file)
@@ -51,9 +51,9 @@ static const struct mac_stats_string g_xgmac_stats_string[] = {
        {"xgmac_rx_bad_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_bad_from_sw)},
        {"xgmac_tx_bad_pkt_64tomax", MAC_STATS_FIELD_OFF(tx_bad_pkts)},
 
-       {"xgmac_rx_not_well_pkt", MAC_STATS_FIELD_OFF(rx_fragment_err)},
-       {"xgmac_rx_good_well_pkt", MAC_STATS_FIELD_OFF(rx_undersize)},
-       {"xgmac_rx_total_pkt", MAC_STATS_FIELD_OFF(rx_under_min)},
+       {"xgmac_rx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(rx_fragment_err)},
+       {"xgmac_rx_good_pkts_minto64", MAC_STATS_FIELD_OFF(rx_undersize)},
+       {"xgmac_rx_total_pkts_minto64", MAC_STATS_FIELD_OFF(rx_under_min)},
        {"xgmac_rx_pkt_64", MAC_STATS_FIELD_OFF(rx_64bytes)},
        {"xgmac_rx_pkt_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
        {"xgmac_rx_pkt_128to255", MAC_STATS_FIELD_OFF(rx_128to255)},
index ce7f2e0e3fd104e131aab71e6c093bbf97a0b795..302d3ae8e9e594a48da1791564cade416837128e 100644 (file)
@@ -1161,6 +1161,21 @@ void hns_set_multicast_list(struct net_device *ndev)
        }
 }
 
+void hns_nic_set_rx_mode(struct net_device *ndev)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct hnae_handle *h = priv->ae_handle;
+
+       if (h->dev->ops->set_promisc_mode) {
+               if (ndev->flags & IFF_PROMISC)
+                       h->dev->ops->set_promisc_mode(h, 1);
+               else
+                       h->dev->ops->set_promisc_mode(h, 0);
+       }
+
+       hns_set_multicast_list(ndev);
+}
+
 struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
                                              struct rtnl_link_stats64 *stats)
 {
@@ -1220,7 +1235,7 @@ static const struct net_device_ops hns_nic_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = hns_nic_poll_controller,
 #endif
-       .ndo_set_rx_mode = hns_set_multicast_list,
+       .ndo_set_rx_mode = hns_nic_set_rx_mode,
 };
 
 static void hns_nic_update_link_status(struct net_device *netdev)
@@ -1300,16 +1315,15 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
                return;
 
        hns_nic_dump(priv);
-       netdev_err(priv->netdev, "Reset %s port\n",
-                  (type == HNAE_PORT_DEBUG ? "debug" : "business"));
+       netdev_info(priv->netdev, "Reset %s port\n",
+                   (type == HNAE_PORT_DEBUG ? "debug" : "business"));
 
        rtnl_lock();
-       if (type == HNAE_PORT_DEBUG) {
+       /* put off any impending NetWatchDogTimeout */
+       priv->netdev->trans_start = jiffies;
+
+       if (type == HNAE_PORT_DEBUG)
                hns_nic_net_reinit(priv->netdev);
-       } else {
-               hns_nic_net_down(priv->netdev);
-               hns_nic_net_reset(priv->netdev);
-       }
        rtnl_unlock();
 }
 
index 2550208cb22eaf516eec7b6ed65f6bf6b3b2fbf5..a0332129970ba56deb16cee881a7853d274a1d6e 100644 (file)
@@ -194,9 +194,7 @@ static int hns_nic_set_settings(struct net_device *net_dev,
 {
        struct hns_nic_priv *priv = netdev_priv(net_dev);
        struct hnae_handle *h;
-       int link_stat;
        u32 speed;
-       u8 duplex, autoneg;
 
        if (!netif_running(net_dev))
                return -ESRCH;
@@ -206,48 +204,35 @@ static int hns_nic_set_settings(struct net_device *net_dev,
                return -ENODEV;
 
        h = priv->ae_handle;
-       link_stat = hns_nic_get_link(net_dev);
-       duplex = cmd->duplex;
        speed = ethtool_cmd_speed(cmd);
-       autoneg = cmd->autoneg;
-
-       if (!link_stat) {
-               if (duplex != (u8)DUPLEX_UNKNOWN || speed != (u32)SPEED_UNKNOWN)
-                       return -EINVAL;
-
-               if (h->phy_if == PHY_INTERFACE_MODE_SGMII && h->phy_node) {
-                       priv->phy->autoneg = autoneg;
-                       return phy_start_aneg(priv->phy);
-               }
-       }
 
        if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
-               if (autoneg != AUTONEG_DISABLE)
-                       return -EINVAL;
-
-               if (speed != SPEED_10000 || duplex != DUPLEX_FULL)
+               if (cmd->autoneg == AUTONEG_ENABLE || speed != SPEED_10000 ||
+                   cmd->duplex != DUPLEX_FULL)
                        return -EINVAL;
        } else if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
-               if (!h->phy_node && autoneg != AUTONEG_DISABLE)
+               if (!priv->phy && cmd->autoneg == AUTONEG_ENABLE)
                        return -EINVAL;
 
-               if (speed == SPEED_1000 && duplex == DUPLEX_HALF)
+               if (speed == SPEED_1000 && cmd->duplex == DUPLEX_HALF)
                        return -EINVAL;
+               if (priv->phy)
+                       return phy_ethtool_sset(priv->phy, cmd);
 
-               if (speed != SPEED_10 && speed != SPEED_100 &&
-                   speed != SPEED_1000)
+               if ((speed != SPEED_10 && speed != SPEED_100 &&
+                    speed != SPEED_1000) || (cmd->duplex != DUPLEX_HALF &&
+                    cmd->duplex != DUPLEX_FULL))
                        return -EINVAL;
        } else {
                netdev_err(net_dev, "Not supported!");
                return -ENOTSUPP;
        }
 
-       if (priv->phy) {
-               return phy_ethtool_sset(priv->phy, cmd);
-       } else if (h->dev->ops->adjust_link && link_stat) {
-               h->dev->ops->adjust_link(h, speed, duplex);
+       if (h->dev->ops->adjust_link) {
+               h->dev->ops->adjust_link(h, (int)speed, cmd->duplex);
                return 0;
        }
+
        netdev_err(net_dev, "Not supported!");
        return -ENOTSUPP;
 }
@@ -682,7 +667,6 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
        drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0';
 
        strncpy(drvinfo->fw_version, "N/A", ETHTOOL_FWVERS_LEN);
-       drvinfo->eedump_len = 0;
 }
 
 /**
index e4ec52ae61ffdc7aa40fc0db8eea6ebd83406578..37491c85bc422a18a9086e29b6e8d555333c1045 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/etherdevice.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/netdevice.h>
@@ -20,6 +21,7 @@
 #include <linux/of_platform.h>
 #include <linux/phy.h>
 #include <linux/platform_device.h>
+#include <linux/regmap.h>
 #include <linux/spinlock_types.h>
 
 #define MDIO_DRV_NAME "Hi-HNS_MDIO"
@@ -36,7 +38,7 @@
 
 struct hns_mdio_device {
        void *vbase;            /* mdio reg base address */
-       void *sys_vbase;
+       struct regmap *subctrl_vbase;
 };
 
 /* mdio reg */
@@ -155,10 +157,10 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
        u32 time_cnt;
        u32 reg_value;
 
-       mdio_write_reg((void *)mdio_dev->sys_vbase, cfg_reg, set_val);
+       regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
 
        for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
-               reg_value = mdio_read_reg((void *)mdio_dev->sys_vbase, st_reg);
+               regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
                reg_value &= st_msk;
                if ((!!check_st) == (!!reg_value))
                        break;
@@ -352,7 +354,7 @@ static int hns_mdio_reset(struct mii_bus *bus)
        struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
        int ret;
 
-       if (!mdio_dev->sys_vbase) {
+       if (!mdio_dev->subctrl_vbase) {
                dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
                return -ENODEV;
        }
@@ -455,13 +457,12 @@ static int hns_mdio_probe(struct platform_device *pdev)
                return ret;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       mdio_dev->sys_vbase = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(mdio_dev->sys_vbase)) {
-               ret = PTR_ERR(mdio_dev->sys_vbase);
-               return ret;
+       mdio_dev->subctrl_vbase =
+               syscon_node_to_regmap(of_parse_phandle(np, "subctrl_vbase", 0));
+       if (IS_ERR(mdio_dev->subctrl_vbase)) {
+               dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n");
+               mdio_dev->subctrl_vbase = NULL;
        }
-
        new_bus->irq = devm_kcalloc(&pdev->dev, PHY_MAX_ADDR,
                                    sizeof(int), GFP_KERNEL);
        if (!new_bus->irq)
index b60a34d982a90f6eff42601d8ee0e29a7d356c5d..5d7db6c01c46c04adcf0b12b49e67610c0d67ca7 100644 (file)
@@ -2204,7 +2204,6 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
        snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s",
                 dev->cell_index, dev->ofdev->dev.of_node->full_name);
-       info->regdump_len = emac_ethtool_get_regs_len(ndev);
 }
 
 static const struct ethtool_ops emac_ethtool_ops = {
index 4270ad2d4ddfa91f9e98a759959f67441b743a9f..83e557c7f2796874771a534a640670a5fd5b8f1a 100644 (file)
@@ -559,8 +559,6 @@ static void e1000_get_drvinfo(struct net_device *netdev,
 
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
-       drvinfo->regdump_len = e1000_get_regs_len(netdev);
-       drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
 }
 
 static void e1000_get_ringparam(struct net_device *netdev,
index 74dc150559711f2dd8f3b9b707288cb5bd77319a..fd7be860c20131e7db9a557dcdd40db348d9edf5 100644 (file)
@@ -3820,7 +3820,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
        if (work_done < budget) {
                if (likely(adapter->itr_setting & 3))
                        e1000_set_itr(adapter);
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                if (!test_bit(__E1000_DOWN, &adapter->flags))
                        e1000_irq_enable(adapter);
        }
index ad6daa656d3e9f8b21a9bd3e43e40346711238b6..6cab1f30d41e93957551ae68b031679b458b9da7 100644 (file)
@@ -648,8 +648,6 @@ static void e1000_get_drvinfo(struct net_device *netdev,
 
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
-       drvinfo->regdump_len = e1000_get_regs_len(netdev);
-       drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
 }
 
 static void e1000_get_ringparam(struct net_device *netdev,
index 2e2ddec04a50cca071353f2a75ca281e98e60eb5..0a854a47d31a77ef0723231dd75e3aceddc5d6b0 100644 (file)
@@ -2693,7 +2693,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
        if (work_done < weight) {
                if (adapter->itr_setting & 3)
                        e1000_set_itr(adapter);
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                if (!test_bit(__E1000_DOWN, &adapter->state)) {
                        if (adapter->msix_entries)
                                ew32(IMS, adapter->rx_ring->ims_val);
index 08ecf43dffc77babaa86a9c3c5c98c950ce7423b..5304bc1fbecd4e218497ea7b7680b7b81d964411 100644 (file)
@@ -176,7 +176,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector)
                return;
 
        /* Generate a folder for each q_vector */
-       sprintf(name, "q_vector.%03d", q_vector->v_idx);
+       snprintf(name, sizeof(name), "q_vector.%03d", q_vector->v_idx);
 
        q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc);
        if (!q_vector->dbg_q_vector)
@@ -186,7 +186,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector)
        for (i = 0; i < q_vector->tx.count; i++) {
                struct fm10k_ring *ring = &q_vector->tx.ring[i];
 
-               sprintf(name, "tx_ring.%03d", ring->queue_index);
+               snprintf(name, sizeof(name), "tx_ring.%03d", ring->queue_index);
 
                debugfs_create_file(name, 0600,
                                    q_vector->dbg_q_vector, ring,
@@ -197,7 +197,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector)
        for (i = 0; i < q_vector->rx.count; i++) {
                struct fm10k_ring *ring = &q_vector->rx.ring[i];
 
-               sprintf(name, "rx_ring.%03d", ring->queue_index);
+               snprintf(name, sizeof(name), "rx_ring.%03d", ring->queue_index);
 
                debugfs_create_file(name, 0600,
                                    q_vector->dbg_q_vector, ring,
index 4ef2fbd229119578ca8681294556c5d906335814..2ce0eba5e04034a4cfa6dc3c7ca8dbbc3282ec49 100644 (file)
@@ -206,13 +206,13 @@ static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
        }
 
        for (i = 0; i < interface->hw.mac.max_queues; i++) {
-               sprintf(p, "tx_queue_%u_packets", i);
+               snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i);
                p += ETH_GSTRING_LEN;
-               sprintf(p, "tx_queue_%u_bytes", i);
+               snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i);
                p += ETH_GSTRING_LEN;
-               sprintf(p, "rx_queue_%u_packets", i);
+               snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i);
                p += ETH_GSTRING_LEN;
-               sprintf(p, "rx_queue_%u_bytes", i);
+               snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i);
                p += ETH_GSTRING_LEN;
        }
 }
@@ -515,10 +515,6 @@ static void fm10k_get_drvinfo(struct net_device *dev,
                sizeof(info->version) - 1);
        strncpy(info->bus_info, pci_name(interface->pdev),
                sizeof(info->bus_info) - 1);
-
-       info->n_stats = fm10k_get_sset_count(dev, ETH_SS_STATS);
-
-       info->regdump_len = fm10k_get_regs_len(dev);
 }
 
 static void fm10k_get_pauseparam(struct net_device *dev,
index 2f47bfe6cc9084807d82d6bac60a68990226b553..e76a44cf330cd47d57084a05fd69611985a79e15 100644 (file)
@@ -593,9 +593,9 @@ static void fm10k_receive_skb(struct fm10k_q_vector *q_vector,
        napi_gro_receive(&q_vector->napi, skb);
 }
 
-static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
-                              struct fm10k_ring *rx_ring,
-                              int budget)
+static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
+                             struct fm10k_ring *rx_ring,
+                             int budget)
 {
        struct sk_buff *skb = rx_ring->skb;
        unsigned int total_bytes = 0, total_packets = 0;
@@ -662,7 +662,7 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
        q_vector->rx.total_packets += total_packets;
        q_vector->rx.total_bytes += total_bytes;
 
-       return total_packets < budget;
+       return total_packets;
 }
 
 #define VXLAN_HLEN (sizeof(struct udphdr) + 8)
@@ -1422,7 +1422,7 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
        struct fm10k_q_vector *q_vector =
                               container_of(napi, struct fm10k_q_vector, napi);
        struct fm10k_ring *ring;
-       int per_ring_budget;
+       int per_ring_budget, work_done = 0;
        bool clean_complete = true;
 
        fm10k_for_each_ring(ring, q_vector->tx)
@@ -1436,16 +1436,19 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
        else
                per_ring_budget = budget;
 
-       fm10k_for_each_ring(ring, q_vector->rx)
-               clean_complete &= fm10k_clean_rx_irq(q_vector, ring,
-                                                    per_ring_budget);
+       fm10k_for_each_ring(ring, q_vector->rx) {
+               int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget);
+
+               work_done += work;
+               clean_complete &= !!(work < per_ring_budget);
+       }
 
        /* If all work not completed, return budget and keep polling */
        if (!clean_complete)
                return budget;
 
        /* all work done, exit the polling mode */
-       napi_complete(napi);
+       napi_complete_done(napi, work_done);
 
        /* re-enable the q_vector */
        fm10k_qv_enable(q_vector);
@@ -1905,7 +1908,7 @@ static void fm10k_init_reta(struct fm10k_intfc *interface)
        u32 reta, base;
 
        /* If the netdev is initialized we have to maintain table if possible */
-       if (interface->netdev->reg_state) {
+       if (interface->netdev->reg_state != NETREG_UNINITIALIZED) {
                for (i = FM10K_RETA_SIZE; i--;) {
                        reta = interface->reta[i];
                        if ((((reta << 24) >> 24) < rss_i) &&
index f26dcb23ebf91aa8a92641f0d4f36a3bcfdfdd88..4dd3e26129b44657cdb05e2e9335ab4f554bcd76 100644 (file)
@@ -93,7 +93,7 @@
 #endif /* I40E_FCOE */
 #define I40E_MAX_AQ_BUF_SIZE          4096
 #define I40E_AQ_LEN                   256
-#define I40E_AQ_WORK_LIMIT            32
+#define I40E_AQ_WORK_LIMIT            66 /* max number of VFs + a little */
 #define I40E_MAX_USER_PRIORITY        8
 #define I40E_DEFAULT_MSG_ENABLE       4
 #define I40E_QUEUE_WAIT_RETRY_LIMIT   10
 /* Ethtool Private Flags */
 #define I40E_PRIV_FLAGS_NPAR_FLAG      BIT(0)
 #define I40E_PRIV_FLAGS_LINKPOLL_FLAG  BIT(1)
+#define I40E_PRIV_FLAGS_FD_ATR         BIT(2)
+#define I40E_PRIV_FLAGS_VEB_STATS      BIT(3)
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)
 #define I40E_NVM_VERSION_HI_SHIFT  12
 #define I40E_NVM_VERSION_HI_MASK   (0xf << I40E_NVM_VERSION_HI_SHIFT)
+#define I40E_OEM_VER_BUILD_MASK    0xffff
+#define I40E_OEM_VER_PATCH_MASK    0xff
+#define I40E_OEM_VER_BUILD_SHIFT   8
+#define I40E_OEM_VER_SHIFT         24
 
 /* The values in here are decimal coded as hex as is the case in the NVM map*/
 #define I40E_CURRENT_NVM_VERSION_HI 0x2
@@ -304,7 +310,6 @@ struct i40e_pf {
 #ifdef I40E_FCOE
 #define I40E_FLAG_FCOE_ENABLED                 BIT_ULL(11)
 #endif /* I40E_FCOE */
-#define I40E_FLAG_IN_NETPOLL                   BIT_ULL(12)
 #define I40E_FLAG_16BYTE_RX_DESC_ENABLED       BIT_ULL(13)
 #define I40E_FLAG_CLEAN_ADMINQ                 BIT_ULL(14)
 #define I40E_FLAG_FILTER_SYNC                  BIT_ULL(15)
@@ -330,6 +335,7 @@ struct i40e_pf {
 #define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE  BIT_ULL(38)
 #define I40E_FLAG_LINK_POLLING_ENABLED         BIT_ULL(39)
 #define I40E_FLAG_VEB_MODE_ENABLED             BIT_ULL(40)
+#define I40E_FLAG_NO_PCI_LINK_CHECK            BIT_ULL(42)
 
        /* tracks features that get auto disabled by errors */
        u64 auto_disable_flags;
@@ -464,6 +470,8 @@ struct i40e_vsi {
 #define I40E_VSI_FLAG_VEB_OWNER                BIT(1)
        unsigned long flags;
 
+       /* Per VSI lock to protect elements/list (MAC filter) */
+       spinlock_t mac_filter_list_lock;
        struct list_head mac_filter_list;
 
        /* VSI stats */
@@ -494,6 +502,7 @@ struct i40e_vsi {
         */
        u16 rx_itr_setting;
        u16 tx_itr_setting;
+       u16 int_rate_limit;  /* value in usecs */
 
        u16 rss_table_size;
        u16 rss_size;
@@ -570,6 +579,8 @@ struct i40e_q_vector {
        struct rcu_head rcu;    /* to avoid race with update stats on free */
        char name[I40E_INT_NAME_STR_LEN];
        bool arm_wb_state;
+#define ITR_COUNTDOWN_START 100
+       u8 itr_countdown;       /* when 0 should adjust ITR */
 } ____cacheline_internodealigned_in_smp;
 
 /* lan device */
@@ -579,22 +590,29 @@ struct i40e_device {
 };
 
 /**
- * i40e_fw_version_str - format the FW and NVM version strings
+ * i40e_nvm_version_str - format the NVM version strings
  * @hw: ptr to the hardware info
  **/
-static inline char *i40e_fw_version_str(struct i40e_hw *hw)
+static inline char *i40e_nvm_version_str(struct i40e_hw *hw)
 {
        static char buf[32];
+       u32 full_ver;
+       u8 ver, patch;
+       u16 build;
+
+       full_ver = hw->nvm.oem_ver;
+       ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT);
+       build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT)
+                & I40E_OEM_VER_BUILD_MASK);
+       patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK);
 
        snprintf(buf, sizeof(buf),
-                "f%d.%d.%05d a%d.%d n%x.%02x e%x",
-                hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
-                hw->aq.api_maj_ver, hw->aq.api_min_ver,
+                "%x.%02x 0x%x %d.%d.%d",
                 (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
                        I40E_NVM_VERSION_HI_SHIFT,
                 (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
                        I40E_NVM_VERSION_LO_SHIFT,
-                (hw->nvm.eetrack & 0xffffff));
+                hw->nvm.eetrack, ver, build, patch);
 
        return buf;
 }
index fa2e916b23daa4fd954a952753fb1136ee577f5b..0ff8f01e57ee5a4a7de890485e5987fd9eec7f8a 100644 (file)
@@ -386,7 +386,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
 
        hw->aq.asq.next_to_use = 0;
        hw->aq.asq.next_to_clean = 0;
-       hw->aq.asq.count = hw->aq.num_asq_entries;
 
        /* allocate the ring memory */
        ret_code = i40e_alloc_adminq_asq_ring(hw);
@@ -404,6 +403,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
                goto init_adminq_free_rings;
 
        /* success! */
+       hw->aq.asq.count = hw->aq.num_asq_entries;
        goto init_adminq_exit;
 
 init_adminq_free_rings:
@@ -445,7 +445,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
 
        hw->aq.arq.next_to_use = 0;
        hw->aq.arq.next_to_clean = 0;
-       hw->aq.arq.count = hw->aq.num_arq_entries;
 
        /* allocate the ring memory */
        ret_code = i40e_alloc_adminq_arq_ring(hw);
@@ -463,6 +462,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
                goto init_adminq_free_rings;
 
        /* success! */
+       hw->aq.arq.count = hw->aq.num_arq_entries;
        goto init_adminq_exit;
 
 init_adminq_free_rings:
@@ -553,8 +553,9 @@ shutdown_arq_out:
  **/
 i40e_status i40e_init_adminq(struct i40e_hw *hw)
 {
-       i40e_status ret_code;
+       u16 cfg_ptr, oem_hi, oem_lo;
        u16 eetrack_lo, eetrack_hi;
+       i40e_status ret_code;
        int retry = 0;
 
        /* verify input for valid configuration */
@@ -613,6 +614,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
        i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
        i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
        hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+       i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
+       i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
+                          &oem_hi);
+       i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
+                          &oem_lo);
+       hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
 
        if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
                ret_code = I40E_ERR_FIRMWARE_API_VERSION;
index 785b3dbd22ca721f989abf3f994975cb105ea754..6584b6cd73fd30187c0c2ca9d37dc3cbff51ab81 100644 (file)
@@ -1722,11 +1722,13 @@ struct i40e_aqc_get_link_status {
        u8      phy_type;    /* i40e_aq_phy_type   */
        u8      link_speed;  /* i40e_aq_link_speed */
        u8      link_info;
-#define I40E_AQ_LINK_UP                        0x01
+#define I40E_AQ_LINK_UP                        0x01    /* obsolete */
+#define I40E_AQ_LINK_UP_FUNCTION       0x01
 #define I40E_AQ_LINK_FAULT             0x02
 #define I40E_AQ_LINK_FAULT_TX          0x04
 #define I40E_AQ_LINK_FAULT_RX          0x08
 #define I40E_AQ_LINK_FAULT_REMOTE      0x10
+#define I40E_AQ_LINK_UP_PORT           0x20
 #define I40E_AQ_MEDIA_AVAILABLE                0x40
 #define I40E_AQ_SIGNAL_DETECT          0x80
        u8      an_info;
@@ -2130,6 +2132,13 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
 struct i40e_aqc_lldp_set_local_mib {
 #define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT       0
 #define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK        (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK        (1 << \
+                                       SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB        0x0
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT   (1)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK    (1 << \
+                               SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS         0x1
        u8      type;
        u8      reserved0;
        __le16  length;
index 2d012d9c22ada1ea46b5b7471ad489786ab20060..2d74c6e4d7b618fe3c5dd44d3ae93c2a433d3491 100644 (file)
@@ -87,7 +87,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
  * @hw: pointer to the HW structure
  * @aq_err: the AQ error code to convert
  **/
-char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
 {
        switch (aq_err) {
        case I40E_AQ_RC_OK:
@@ -147,7 +147,7 @@ char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
  * @hw: pointer to the HW structure
  * @stat_err: the status error code to convert
  **/
-char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
 {
        switch (stat_err) {
        case 0:
@@ -331,25 +331,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
                        len = buf_len;
                /* write the full 16-byte chunks */
                for (i = 0; i < (len - 16); i += 16)
-                       i40e_debug(hw, mask,
-                                  "\t0x%04X  %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
-                                  i, buf[i], buf[i + 1], buf[i + 2],
-                                  buf[i + 3], buf[i + 4], buf[i + 5],
-                                  buf[i + 6], buf[i + 7], buf[i + 8],
-                                  buf[i + 9], buf[i + 10], buf[i + 11],
-                                  buf[i + 12], buf[i + 13], buf[i + 14],
-                                  buf[i + 15]);
+                       i40e_debug(hw, mask, "\t0x%04X  %16ph\n", i, buf + i);
                /* write whatever's left over without overrunning the buffer */
-               if (i < len) {
-                       char d_buf[80];
-                       int j = 0;
-
-                       memset(d_buf, 0, sizeof(d_buf));
-                       j += sprintf(d_buf, "\t0x%04X ", i);
-                       while (i < len)
-                               j += sprintf(&d_buf[j], " %02X", buf[i++]);
-                       i40e_debug(hw, mask, "%s\n", d_buf);
-               }
+               if (i < len)
+                       i40e_debug(hw, mask, "\t0x%04X  %*ph\n",
+                                            i, len - i, buf + i);
        }
 }
 
@@ -958,6 +944,9 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
        else
                hw->pf_id = (u8)(func_rid & 0x7);
 
+       if (hw->mac.type == I40E_MAC_X722)
+               hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+
        status = i40e_init_nvm(hw);
        return status;
 }
@@ -1617,6 +1606,9 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
        if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
                status = I40E_ERR_UNKNOWN_PHY;
 
+       if (report_init)
+               hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
+
        return status;
 }
 
@@ -1717,14 +1709,14 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
                        *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
        }
        /* Update the link info */
-       status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+       status = i40e_update_link_info(hw);
        if (status) {
                /* Wait a little bit (on 40G cards it sometimes takes a really
                 * long time for link to come back from the atomic reset)
                 * and try once more
                 */
                msleep(1000);
-               status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+               status = i40e_update_link_info(hw);
        }
        if (status)
                *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
@@ -2247,7 +2239,7 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
        i40e_status status = 0;
 
        if (hw->phy.get_link_info) {
-               status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+               status = i40e_update_link_info(hw);
 
                if (status)
                        i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
@@ -2259,6 +2251,32 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
        return status;
 }
 
+/**
+ * i40e_updatelink_status - update status of the HW network link
+ * @hw: pointer to the hw struct
+ **/
+i40e_status i40e_update_link_info(struct i40e_hw *hw)
+{
+       struct i40e_aq_get_phy_abilities_resp abilities;
+       i40e_status status = 0;
+
+       status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+       if (status)
+               return status;
+
+       if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) {
+               status = i40e_aq_get_phy_capabilities(hw, false, false,
+                                                     &abilities, NULL);
+               if (status)
+                       return status;
+
+               memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+                      sizeof(hw->phy.link_info.module_type));
+       }
+
+       return status;
+}
+
 /**
  * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
  * @hw: pointer to the hw struct
@@ -3796,6 +3814,28 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
        return status;
 }
 
+/**
+ * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid to add ethertype filter from
+ **/
+#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+                                                   u16 seid)
+{
+       u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
+                  I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
+                  I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
+       u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
+       i40e_status status;
+
+       status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
+                                                      seid, 0, true, NULL,
+                                                      NULL);
+       if (status)
+               hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
+}
+
 /**
  * i40e_aq_alternate_read
  * @hw: pointer to the hardware structure
index 6fa07ef1651d8d3aa7d93ef89b8db79ffbb6a85c..2691277c0055d2572f2994e24c120066a7bec28b 100644 (file)
@@ -380,7 +380,7 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
 {
        u16 length, typelength, offset = 0;
        struct i40e_cee_app_prio *app;
-       u8 i, up;
+       u8 i, up, selector;
 
        typelength = ntohs(tlv->hdr.typelen);
        length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
@@ -393,13 +393,21 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
        for (i = 0; i < dcbcfg->numapps; i++) {
                app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset);
                for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) {
-                       if (app->prio_map & (1 << up))
+                       if (app->prio_map & BIT(up))
                                break;
                }
                dcbcfg->app[i].priority = up;
-               /* Get Selector from lower 2 bits */
-               dcbcfg->app[i].selector = (app->upper_oui_sel &
-                                          I40E_CEE_APP_SELECTOR_MASK);
+
+               /* Get Selector from lower 2 bits, and convert to IEEE */
+               selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK);
+               if (selector == I40E_CEE_APP_SEL_ETHTYPE)
+                       dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+               else if (selector == I40E_CEE_APP_SEL_TCPIP)
+                       dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+               else
+                       /* Keep selector as it is for unknown types */
+                       dcbcfg->app[i].selector = selector;
+
                dcbcfg->app[i].protocolid = ntohs(app->protocol);
                /* Move to next app */
                offset += sizeof(*app);
index 7c42d1340de6719865fb3dcb158321edef1025a7..886e667f2f1c8a54c4a48586e10626f7ce3accf5 100644 (file)
@@ -240,10 +240,9 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf,
        for (v = 0; v < pf->num_alloc_vsi; v++) {
                if (pf->vsi[v] && pf->vsi[v]->netdev) {
                        err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
-                       if (err)
-                               dev_info(&pf->pdev->dev, "Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
-                                        pf->vsi[v]->seid, err, app->selector,
-                                        app->protocolid, app->priority);
+                       dev_dbg(&pf->pdev->dev, "Deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
+                               pf->vsi[v]->seid, err, app->selector,
+                               app->protocolid, app->priority);
                }
        }
 }
index c1dd2483e26258598e2a6c1e660e828f81999458..d4b7af9a2fc82f8b55878188a0c53a5911d6ddf1 100644 (file)
@@ -953,24 +953,6 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
        }
 }
 
-/**
- * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR
- * @pf: the PF that would be altered
- * @flag: flag that needs enabling or disabling
- * @enable: Enable/disable FD SD/ATR
- **/
-static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
-{
-       if (enable) {
-               pf->flags |= flag;
-       } else {
-               pf->flags &= ~flag;
-               pf->auto_disable_flags |= flag;
-       }
-       dev_info(&pf->pdev->dev, "requesting a PF reset\n");
-       i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
-}
-
 #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
 /**
  * i40e_dbg_command_write - write into command datum
@@ -1155,7 +1137,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        goto command_write_done;
                }
 
+               spin_lock_bh(&vsi->mac_filter_list_lock);
                f = i40e_add_filter(vsi, ma, vlan, false, false);
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
                ret = i40e_sync_vsi_filters(vsi, true);
                if (f && !ret)
                        dev_info(&pf->pdev->dev,
@@ -1192,7 +1176,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        goto command_write_done;
                }
 
+               spin_lock_bh(&vsi->mac_filter_list_lock);
                i40e_del_filter(vsi, ma, vlan, false, false);
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
                ret = i40e_sync_vsi_filters(vsi, true);
                if (!ret)
                        dev_info(&pf->pdev->dev,
@@ -1759,10 +1745,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                raw_packet = NULL;
                kfree(asc_packet);
                asc_packet = NULL;
-       } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) {
-               i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
-       } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
-               i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
        } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
                dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
                         i40e_get_current_fd_count(pf));
@@ -1989,8 +1971,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                dev_info(&pf->pdev->dev, "  send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
                dev_info(&pf->pdev->dev, "  add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
                dev_info(&pf->pdev->dev, "  rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
-               dev_info(&pf->pdev->dev, "  fd-atr off\n");
-               dev_info(&pf->pdev->dev, "  fd-atr on\n");
                dev_info(&pf->pdev->dev, "  fd current cnt");
                dev_info(&pf->pdev->dev, "  lldp start\n");
                dev_info(&pf->pdev->dev, "  lldp stop\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
new file mode 100644 (file)
index 0000000..c601ca4
--- /dev/null
@@ -0,0 +1,55 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DEVIDS_H_
+#define _I40E_DEVIDS_H_
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710          0x1572
+#define I40E_DEV_ID_QEMU               0x1574
+#define I40E_DEV_ID_KX_A               0x157F
+#define I40E_DEV_ID_KX_B               0x1580
+#define I40E_DEV_ID_KX_C               0x1581
+#define I40E_DEV_ID_QSFP_A             0x1583
+#define I40E_DEV_ID_QSFP_B             0x1584
+#define I40E_DEV_ID_QSFP_C             0x1585
+#define I40E_DEV_ID_10G_BASE_T         0x1586
+#define I40E_DEV_ID_20G_KR2            0x1587
+#define I40E_DEV_ID_20G_KR2_A          0x1588
+#define I40E_DEV_ID_10G_BASE_T4                0x1589
+#define I40E_DEV_ID_VF                 0x154C
+#define I40E_DEV_ID_VF_HV              0x1571
+#define I40E_DEV_ID_SFP_X722           0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722     0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722    0x37D2
+#define I40E_DEV_ID_X722_VF            0x37CD
+#define I40E_DEV_ID_X722_VF_HV         0x37D9
+
+#define i40e_is_40G_device(d)          ((d) == I40E_DEV_ID_QSFP_A  || \
+                                        (d) == I40E_DEV_ID_QSFP_B  || \
+                                        (d) == I40E_DEV_ID_QSFP_C)
+
+#endif /* _I40E_DEVIDS_H_ */
index 148f61461076ecb933140a41086f52cfd7812862..3f385ffe420f712abbda79b14c46e2b6196d8dc8 100644 (file)
@@ -90,9 +90,6 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
        I40E_VSI_STAT("tx_linearize", tx_linearize),
 };
 
-static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
-                                struct ethtool_rxnfc *cmd);
-
 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
  * but they are separate.  This device supports Virtualization, and
  * as such might have several netdevs supporting VMDq and FCoE going
@@ -231,6 +228,8 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
 static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
        "NPAR",
        "LinkPolling",
+       "flow-director-atr",
+       "veb-stats",
 };
 
 #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
@@ -254,7 +253,8 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf)
  **/
 static void i40e_get_settings_link_up(struct i40e_hw *hw,
                                      struct ethtool_cmd *ecmd,
-                                     struct net_device *netdev)
+                                     struct net_device *netdev,
+                                     struct i40e_pf *pf)
 {
        struct i40e_link_status *hw_link_info = &hw->phy.link_info;
        u32 link_speed = hw_link_info->link_speed;
@@ -273,65 +273,49 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
        case I40E_PHY_TYPE_40GBASE_AOC:
                ecmd->supported = SUPPORTED_40000baseCR4_Full;
                break;
-       case I40E_PHY_TYPE_40GBASE_KR4:
-               ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_40000baseKR4_Full;
-               ecmd->advertising = ADVERTISED_Autoneg |
-                                   ADVERTISED_40000baseKR4_Full;
-               break;
        case I40E_PHY_TYPE_40GBASE_SR4:
                ecmd->supported = SUPPORTED_40000baseSR4_Full;
                break;
        case I40E_PHY_TYPE_40GBASE_LR4:
                ecmd->supported = SUPPORTED_40000baseLR4_Full;
                break;
-       case I40E_PHY_TYPE_20GBASE_KR2:
-               ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_20000baseKR2_Full;
-               ecmd->advertising = ADVERTISED_Autoneg |
-                                   ADVERTISED_20000baseKR2_Full;
-               break;
-       case I40E_PHY_TYPE_10GBASE_KX4:
-               ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_10000baseKX4_Full;
-               ecmd->advertising = ADVERTISED_Autoneg |
-                                   ADVERTISED_10000baseKX4_Full;
-               break;
-       case I40E_PHY_TYPE_10GBASE_KR:
-               ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_10000baseKR_Full;
-               ecmd->advertising = ADVERTISED_Autoneg |
-                                   ADVERTISED_10000baseKR_Full;
-               break;
        case I40E_PHY_TYPE_10GBASE_SR:
        case I40E_PHY_TYPE_10GBASE_LR:
        case I40E_PHY_TYPE_1000BASE_SX:
        case I40E_PHY_TYPE_1000BASE_LX:
-               ecmd->supported = SUPPORTED_10000baseT_Full |
-                                 SUPPORTED_1000baseT_Full;
+               ecmd->supported = SUPPORTED_10000baseT_Full;
+               if (hw_link_info->module_type[2] &
+                   I40E_MODULE_TYPE_1000BASE_SX ||
+                   hw_link_info->module_type[2] &
+                   I40E_MODULE_TYPE_1000BASE_LX) {
+                       ecmd->supported |= SUPPORTED_1000baseT_Full;
+                       if (hw_link_info->requested_speeds &
+                           I40E_LINK_SPEED_1GB)
+                               ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               }
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
                        ecmd->advertising |= ADVERTISED_10000baseT_Full;
-               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
-                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
-               break;
-       case I40E_PHY_TYPE_1000BASE_KX:
-               ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_1000baseKX_Full;
-               ecmd->advertising = ADVERTISED_Autoneg |
-                                   ADVERTISED_1000baseKX_Full;
                break;
        case I40E_PHY_TYPE_10GBASE_T:
        case I40E_PHY_TYPE_1000BASE_T:
-       case I40E_PHY_TYPE_100BASE_TX:
                ecmd->supported = SUPPORTED_Autoneg |
                                  SUPPORTED_10000baseT_Full |
-                                 SUPPORTED_1000baseT_Full |
-                                 SUPPORTED_100baseT_Full;
+                                 SUPPORTED_1000baseT_Full;
                ecmd->advertising = ADVERTISED_Autoneg;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
                        ecmd->advertising |= ADVERTISED_10000baseT_Full;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
                        ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               break;
+       case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_1000baseT_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_1000baseT_Full;
+               break;
+       case I40E_PHY_TYPE_100BASE_TX:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_100baseT_Full;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
                        ecmd->advertising |= ADVERTISED_100baseT_Full;
                break;
@@ -351,12 +335,24 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
                break;
        case I40E_PHY_TYPE_SGMII:
                ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_1000baseT_Full |
-                                 SUPPORTED_100baseT_Full;
+                                 SUPPORTED_1000baseT_Full;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
                        ecmd->advertising |= ADVERTISED_1000baseT_Full;
-               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
-                       ecmd->advertising |= ADVERTISED_100baseT_Full;
+               if (pf->hw.mac.type == I40E_MAC_X722) {
+                       ecmd->supported |= SUPPORTED_100baseT_Full;
+                       if (hw_link_info->requested_speeds &
+                           I40E_LINK_SPEED_100MB)
+                               ecmd->advertising |= ADVERTISED_100baseT_Full;
+               }
+               break;
+       /* Backplane is set based on supported phy types in get_settings
+        * so don't set anything here but don't warn either
+        */
+       case I40E_PHY_TYPE_40GBASE_KR4:
+       case I40E_PHY_TYPE_20GBASE_KR2:
+       case I40E_PHY_TYPE_10GBASE_KR:
+       case I40E_PHY_TYPE_10GBASE_KX4:
+       case I40E_PHY_TYPE_1000BASE_KX:
                break;
        default:
                /* if we got here and link is up something bad is afoot */
@@ -395,66 +391,73 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
  * Reports link settings that can be determined when link is down
  **/
 static void i40e_get_settings_link_down(struct i40e_hw *hw,
-                                       struct ethtool_cmd *ecmd)
+                                       struct ethtool_cmd *ecmd,
+                                       struct i40e_pf *pf)
 {
-       struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+       enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types;
 
        /* link is down and the driver needs to fall back on
-        * device ID to determine what kinds of info to display,
-        * it's mostly a guess that may change when link is up
+        * supported phy types to figure out what info to display
         */
-       switch (hw->device_id) {
-       case I40E_DEV_ID_QSFP_A:
-       case I40E_DEV_ID_QSFP_B:
-       case I40E_DEV_ID_QSFP_C:
-               /* pluggable QSFP */
-               ecmd->supported = SUPPORTED_40000baseSR4_Full |
-                                 SUPPORTED_40000baseCR4_Full |
-                                 SUPPORTED_40000baseLR4_Full;
-               ecmd->advertising = ADVERTISED_40000baseSR4_Full |
-                                   ADVERTISED_40000baseCR4_Full |
-                                   ADVERTISED_40000baseLR4_Full;
-               break;
-       case I40E_DEV_ID_KX_B:
-               /* backplane 40G */
-               ecmd->supported = SUPPORTED_40000baseKR4_Full;
-               ecmd->advertising = ADVERTISED_40000baseKR4_Full;
-               break;
-       case I40E_DEV_ID_KX_C:
-               /* backplane 10G */
-               ecmd->supported = SUPPORTED_10000baseKR_Full;
-               ecmd->advertising = ADVERTISED_10000baseKR_Full;
-               break;
-       case I40E_DEV_ID_10G_BASE_T:
-       case I40E_DEV_ID_10G_BASE_T4:
-               ecmd->supported = SUPPORTED_10000baseT_Full |
-                                 SUPPORTED_1000baseT_Full |
-                                 SUPPORTED_100baseT_Full;
-               /* Figure out what has been requested */
-               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
-                       ecmd->advertising |= ADVERTISED_10000baseT_Full;
-               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
-                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
-               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+       ecmd->supported = 0x0;
+       ecmd->advertising = 0x0;
+       if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
+               ecmd->supported |= SUPPORTED_Autoneg |
+                                  SUPPORTED_1000baseT_Full;
+               ecmd->advertising |= ADVERTISED_Autoneg |
+                                    ADVERTISED_1000baseT_Full;
+               if (pf->hw.mac.type == I40E_MAC_X722) {
+                       ecmd->supported |= SUPPORTED_100baseT_Full;
                        ecmd->advertising |= ADVERTISED_100baseT_Full;
-               break;
-       case I40E_DEV_ID_20G_KR2:
-       case I40E_DEV_ID_20G_KR2_A:
-               /* backplane 20G */
-               ecmd->supported = SUPPORTED_20000baseKR2_Full;
-               ecmd->advertising = ADVERTISED_20000baseKR2_Full;
-               break;
-       default:
-               /* all the rest are 10G/1G */
-               ecmd->supported = SUPPORTED_10000baseT_Full |
-                                 SUPPORTED_1000baseT_Full;
-               /* Figure out what has been requested */
-               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
-                       ecmd->advertising |= ADVERTISED_10000baseT_Full;
-               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
-                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
-               break;
+               }
        }
+       if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
+           phy_types & I40E_CAP_PHY_TYPE_XFI ||
+           phy_types & I40E_CAP_PHY_TYPE_SFI ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
+               ecmd->supported |= SUPPORTED_10000baseT_Full;
+       if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
+               ecmd->supported |= SUPPORTED_Autoneg |
+                                  SUPPORTED_10000baseT_Full;
+               ecmd->advertising |= ADVERTISED_Autoneg |
+                                    ADVERTISED_10000baseT_Full;
+       }
+       if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
+           phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
+           phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
+               ecmd->supported |= SUPPORTED_40000baseCR4_Full;
+       if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
+           phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
+               ecmd->supported |= SUPPORTED_Autoneg |
+                                 SUPPORTED_40000baseCR4_Full;
+               ecmd->advertising |= ADVERTISED_Autoneg |
+                                   ADVERTISED_40000baseCR4_Full;
+       }
+       if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
+           !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
+               ecmd->supported |= SUPPORTED_Autoneg |
+                                  SUPPORTED_100baseT_Full;
+               ecmd->advertising |= ADVERTISED_Autoneg |
+                                    ADVERTISED_100baseT_Full;
+       }
+       if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
+           phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+           phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+           phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
+               ecmd->supported |= SUPPORTED_Autoneg |
+                                  SUPPORTED_1000baseT_Full;
+               ecmd->advertising |= ADVERTISED_Autoneg |
+                                    ADVERTISED_1000baseT_Full;
+       }
+       if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
+               ecmd->supported |= SUPPORTED_40000baseSR4_Full;
+       if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
+               ecmd->supported |= SUPPORTED_40000baseLR4_Full;
 
        /* With no link speed and duplex are unknown */
        ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
@@ -478,12 +481,43 @@ static int i40e_get_settings(struct net_device *netdev,
        bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
 
        if (link_up)
-               i40e_get_settings_link_up(hw, ecmd, netdev);
+               i40e_get_settings_link_up(hw, ecmd, netdev, pf);
        else
-               i40e_get_settings_link_down(hw, ecmd);
+               i40e_get_settings_link_down(hw, ecmd, pf);
 
        /* Now set the settings that don't rely on link being up/down */
 
+       /* For backplane, supported and advertised are only reliant on the
+        * phy types the NVM specifies are supported.
+        */
+       if (hw->device_id == I40E_DEV_ID_KX_B ||
+           hw->device_id == I40E_DEV_ID_KX_C ||
+           hw->device_id == I40E_DEV_ID_20G_KR2 ||
+           hw->device_id ==  I40E_DEV_ID_20G_KR2_A) {
+               ecmd->supported = SUPPORTED_Autoneg;
+               ecmd->advertising = ADVERTISED_Autoneg;
+               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
+                       ecmd->supported |= SUPPORTED_40000baseKR4_Full;
+                       ecmd->advertising |= ADVERTISED_40000baseKR4_Full;
+               }
+               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
+                       ecmd->supported |= SUPPORTED_20000baseKR2_Full;
+                       ecmd->advertising |= ADVERTISED_20000baseKR2_Full;
+               }
+               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
+                       ecmd->supported |= SUPPORTED_10000baseKR_Full;
+                       ecmd->advertising |= ADVERTISED_10000baseKR_Full;
+               }
+               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
+                       ecmd->supported |= SUPPORTED_10000baseKX4_Full;
+                       ecmd->advertising |= ADVERTISED_10000baseKX4_Full;
+               }
+               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
+                       ecmd->supported |= SUPPORTED_1000baseKX_Full;
+                       ecmd->advertising |= ADVERTISED_1000baseKX_Full;
+               }
+       }
+
        /* Set autoneg settings */
        ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
                          AUTONEG_ENABLE : AUTONEG_DISABLE);
@@ -583,6 +617,14 @@ static int i40e_set_settings(struct net_device *netdev,
            hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
                return -EOPNOTSUPP;
 
+       if (hw->device_id == I40E_DEV_ID_KX_B ||
+           hw->device_id == I40E_DEV_ID_KX_C ||
+           hw->device_id == I40E_DEV_ID_20G_KR2 ||
+           hw->device_id == I40E_DEV_ID_20G_KR2_A) {
+               netdev_info(netdev, "Changing settings is not supported on backplane.\n");
+               return -EOPNOTSUPP;
+       }
+
        /* get our own copy of the bits to check against */
        memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd));
        i40e_get_settings(netdev, &safe_ecmd);
@@ -619,28 +661,31 @@ static int i40e_set_settings(struct net_device *netdev,
 
        /* Check autoneg */
        if (autoneg == AUTONEG_ENABLE) {
-               /* If autoneg is not supported, return error */
-               if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
-                       netdev_info(netdev, "Autoneg not supported on this phy\n");
-                       return -EINVAL;
-               }
                /* If autoneg was not already enabled */
                if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
+                       /* If autoneg is not supported, return error */
+                       if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
+                               netdev_info(netdev, "Autoneg not supported on this phy\n");
+                               return -EINVAL;
+                       }
+                       /* Autoneg is allowed to change */
                        config.abilities = abilities.abilities |
                                           I40E_AQ_PHY_ENABLE_AN;
                        change = true;
                }
        } else {
-               /* If autoneg is supported 10GBASE_T is the only phy that
-                * can disable it, so otherwise return error
-                */
-               if (safe_ecmd.supported & SUPPORTED_Autoneg &&
-                   hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) {
-                       netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
-                       return -EINVAL;
-               }
                /* If autoneg is currently enabled */
                if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
+                       /* If autoneg is supported 10GBASE_T is the only PHY
+                        * that can disable it, so otherwise return error
+                        */
+                       if (safe_ecmd.supported & SUPPORTED_Autoneg &&
+                           hw->phy.link_info.phy_type !=
+                           I40E_PHY_TYPE_10GBASE_T) {
+                               netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
+                               return -EINVAL;
+                       }
+                       /* Autoneg is allowed to change */
                        config.abilities = abilities.abilities &
                                           ~I40E_AQ_PHY_ENABLE_AN;
                        change = true;
@@ -704,11 +749,11 @@ static int i40e_set_settings(struct net_device *netdev,
                        return -EAGAIN;
                }
 
-               status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+               status = i40e_update_link_info(hw);
                if (status)
-                       netdev_info(netdev, "Updating link info failed with err %s aq_err %s\n",
-                                   i40e_stat_str(hw, status),
-                                   i40e_aq_str(hw, hw->aq.asq_last_status));
+                       netdev_dbg(netdev, "Updating link info failed with err %s aq_err %s\n",
+                                  i40e_stat_str(hw, status),
+                                  i40e_aq_str(hw, hw->aq.asq_last_status));
 
        } else {
                netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -958,9 +1003,7 @@ static int i40e_get_eeprom(struct net_device *netdev,
 
                cmd = (struct i40e_nvm_access *)eeprom;
                ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
-               if (ret_val &&
-                   ((hw->aq.asq_last_status != I40E_AQ_RC_EACCES) ||
-                    (hw->debug_mask & I40E_DEBUG_NVM)))
+               if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM))
                        dev_info(&pf->pdev->dev,
                                 "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
                                 ret_val, hw->aq.asq_last_status, errno,
@@ -1064,10 +1107,7 @@ static int i40e_set_eeprom(struct net_device *netdev,
 
        cmd = (struct i40e_nvm_access *)eeprom;
        ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
-       if (ret_val &&
-           ((hw->aq.asq_last_status != I40E_AQ_RC_EPERM &&
-             hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) ||
-            (hw->debug_mask & I40E_DEBUG_NVM)))
+       if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM))
                dev_info(&pf->pdev->dev,
                         "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
                         ret_val, hw->aq.asq_last_status, errno,
@@ -1087,11 +1127,10 @@ static void i40e_get_drvinfo(struct net_device *netdev,
        strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
        strlcpy(drvinfo->version, i40e_driver_version_str,
                sizeof(drvinfo->version));
-       strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw),
+       strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
                sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
                sizeof(drvinfo->bus_info));
-       drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
 }
 
 static void i40e_get_ringparam(struct net_device *netdev,
@@ -1367,6 +1406,12 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
                        data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
                                     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
                }
+               for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) {
+                       data[i++] = veb->tc_stats.tc_tx_packets[j];
+                       data[i++] = veb->tc_stats.tc_tx_bytes[j];
+                       data[i++] = veb->tc_stats.tc_rx_packets[j];
+                       data[i++] = veb->tc_stats.tc_rx_bytes[j];
+               }
        }
        for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
                p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
@@ -1815,6 +1860,14 @@ static int i40e_get_coalesce(struct net_device *netdev,
 
        ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
        ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+       /* we use the _usecs_high to store/set the interrupt rate limit
+        * that the hardware supports, that almost but not quite
+        * fits the original intent of the ethtool variable,
+        * the rx_coalesce_usecs_high limits total interrupts
+        * per second from both tx/rx sources.
+        */
+       ec->rx_coalesce_usecs_high = vsi->int_rate_limit;
+       ec->tx_coalesce_usecs_high = vsi->int_rate_limit;
 
        return 0;
 }
@@ -1833,6 +1886,17 @@ static int i40e_set_coalesce(struct net_device *netdev,
        if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
                vsi->work_limit = ec->tx_max_coalesced_frames_irq;
 
+       /* tx_coalesce_usecs_high is ignored, use rx-usecs-high instead */
+       if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) {
+               netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n");
+               return -EINVAL;
+       }
+
+       if (ec->rx_coalesce_usecs_high >= INTRL_REG_TO_USEC(I40E_MAX_INTRL)) {
+               netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-235\n");
+               return -EINVAL;
+       }
+
        vector = vsi->base_vector;
        if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
            (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
@@ -1846,6 +1910,8 @@ static int i40e_set_coalesce(struct net_device *netdev,
                return -EINVAL;
        }
 
+       vsi->int_rate_limit = ec->rx_coalesce_usecs_high;
+
        if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
            (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
                vsi->tx_itr_setting = ec->tx_coalesce_usecs;
@@ -1870,11 +1936,14 @@ static int i40e_set_coalesce(struct net_device *netdev,
                vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
 
        for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+               u16 intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit);
+
                q_vector = vsi->q_vectors[i];
                q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
                wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
                q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
                wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
+               wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl);
                i40e_flush(hw);
        }
 
@@ -2639,6 +2708,10 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
                I40E_PRIV_FLAGS_NPAR_FLAG : 0;
        ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ?
                I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0;
+       ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ?
+               I40E_PRIV_FLAGS_FD_ATR : 0;
+       ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
+               I40E_PRIV_FLAGS_VEB_STATS : 0;
 
        return ret_flags;
 }
@@ -2659,6 +2732,22 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
        else
                pf->flags &= ~I40E_FLAG_LINK_POLLING_ENABLED;
 
+       /* allow the user to control the state of the Flow
+        * Director ATR (Application Targeted Routing) feature
+        * of the driver
+        */
+       if (flags & I40E_PRIV_FLAGS_FD_ATR) {
+               pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+       } else {
+               pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+               pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+       }
+
+       if (flags & I40E_PRIV_FLAGS_VEB_STATS)
+               pf->flags |= I40E_FLAG_VEB_STATS_ENABLED;
+       else
+               pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+
        return 0;
 }
 
index eaedc1f4c25760f90f000bb268ed3c4060275647..fe5d9bf3ed6d80801f5881d47309af3b69ec01e4 100644 (file)
@@ -284,7 +284,7 @@ void i40e_init_pf_fcoe(struct i40e_pf *pf)
        pf->fcoe_hmc_filt_num = 0;
 
        if (!pf->hw.func_caps.fcoe) {
-               dev_info(&pf->pdev->dev, "FCoE capability is disabled\n");
+               dev_dbg(&pf->pdev->dev, "FCoE capability is disabled\n");
                return;
        }
 
@@ -1516,10 +1516,12 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
         * same PCI function.
         */
        netdev->dev_port = 1;
+       spin_lock_bh(&vsi->mac_filter_list_lock);
        i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
        i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
        i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
        i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
 
        /* use san mac */
        ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
index a484f2265524ec8755b4e5fd913ff597829f0bfb..b825f978d441d1987581b249694298bb5996538d 100644 (file)
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 21
+#define DRV_VERSION_BUILD 46
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -1355,6 +1355,9 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
  * @is_netdev: make sure its a netdev filter, else doesn't matter
  *
  * Returns ptr to the filter object or NULL when no memory available.
+ *
+ * NOTE: This function is expected to be called with mac_filter_list_lock
+ * being held.
  **/
 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
                                        u8 *macaddr, s16 vlan,
@@ -1413,6 +1416,9 @@ add_filter_out:
  * @vlan: the vlan
  * @is_vf: make sure it's a VF filter, else doesn't matter
  * @is_netdev: make sure it's a netdev filter, else doesn't matter
+ *
+ * NOTE: This function is expected to be called with mac_filter_list_lock
+ * being held.
  **/
 void i40e_del_filter(struct i40e_vsi *vsi,
                     u8 *macaddr, s16 vlan,
@@ -1519,8 +1525,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
                element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
                i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
        } else {
+               spin_lock_bh(&vsi->mac_filter_list_lock);
                i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
                                false, false);
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
        }
 
        if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
@@ -1531,10 +1539,12 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
                element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
                i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
        } else {
+               spin_lock_bh(&vsi->mac_filter_list_lock);
                f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
                                    false, false);
                if (f)
                        f->is_laa = true;
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
        }
 
        i40e_sync_vsi_filters(vsi, false);
@@ -1707,6 +1717,8 @@ static void i40e_set_rx_mode(struct net_device *netdev)
        struct netdev_hw_addr *mca;
        struct netdev_hw_addr *ha;
 
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+
        /* add addr if not already in the filter list */
        netdev_for_each_uc_addr(uca, netdev) {
                if (!i40e_find_mac(vsi, uca->addr, false, true)) {
@@ -1754,6 +1766,7 @@ static void i40e_set_rx_mode(struct net_device *netdev)
 bottom_of_search_loop:
                continue;
        }
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
 
        /* check for other flag changes */
        if (vsi->current_netdev_flags != vsi->netdev->flags) {
@@ -1762,6 +1775,79 @@ bottom_of_search_loop:
        }
 }
 
+/**
+ * i40e_mac_filter_entry_clone - Clones a MAC filter entry
+ * @src: source MAC filter entry to be clones
+ *
+ * Returns the pointer to newly cloned MAC filter entry or NULL
+ * in case of error
+ **/
+static struct i40e_mac_filter *i40e_mac_filter_entry_clone(
+                                       struct i40e_mac_filter *src)
+{
+       struct i40e_mac_filter *f;
+
+       f = kzalloc(sizeof(*f), GFP_ATOMIC);
+       if (!f)
+               return NULL;
+       *f = *src;
+
+       INIT_LIST_HEAD(&f->list);
+
+       return f;
+}
+
+/**
+ * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: pointer to vsi struct
+ * @from: Pointer to list which contains MAC filter entries - changes to
+ *        those entries needs to be undone.
+ *
+ * MAC filter entries from list were slated to be removed from device.
+ **/
+static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
+                                        struct list_head *from)
+{
+       struct i40e_mac_filter *f, *ftmp;
+
+       list_for_each_entry_safe(f, ftmp, from, list) {
+               f->changed = true;
+               /* Move the element back into MAC filter list*/
+               list_move_tail(&f->list, &vsi->mac_filter_list);
+       }
+}
+
+/**
+ * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: pointer to vsi struct
+ *
+ * MAC filter entries from list were slated to be added from device.
+ **/
+static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi)
+{
+       struct i40e_mac_filter *f, *ftmp;
+
+       list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+               if (!f->changed && f->counter)
+                       f->changed = true;
+       }
+}
+
+/**
+ * i40e_cleanup_add_list - Deletes the element from add list and release
+ *                     memory
+ * @add_list: Pointer to list which contains MAC filter entries
+ **/
+static void i40e_cleanup_add_list(struct list_head *add_list)
+{
+       struct i40e_mac_filter *f, *ftmp;
+
+       list_for_each_entry_safe(f, ftmp, add_list, list) {
+               list_del(&f->list);
+               kfree(f);
+       }
+}
+
 /**
  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
  * @vsi: ptr to the VSI
@@ -1773,11 +1859,13 @@ bottom_of_search_loop:
  **/
 int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
 {
-       struct i40e_mac_filter *f, *ftmp;
+       struct list_head tmp_del_list, tmp_add_list;
+       struct i40e_mac_filter *f, *ftmp, *fclone;
        bool promisc_forced_on = false;
        bool add_happened = false;
        int filter_list_len = 0;
        u32 changed_flags = 0;
+       bool err_cond = false;
        i40e_status ret = 0;
        struct i40e_pf *pf;
        int num_add = 0;
@@ -1798,17 +1886,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
                vsi->current_netdev_flags = vsi->netdev->flags;
        }
 
+       INIT_LIST_HEAD(&tmp_del_list);
+       INIT_LIST_HEAD(&tmp_add_list);
+
        if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
                vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
 
-               filter_list_len = pf->hw.aq.asq_buf_size /
-                           sizeof(struct i40e_aqc_remove_macvlan_element_data);
-               del_list = kcalloc(filter_list_len,
-                           sizeof(struct i40e_aqc_remove_macvlan_element_data),
-                           GFP_KERNEL);
-               if (!del_list)
-                       return -ENOMEM;
-
+               spin_lock_bh(&vsi->mac_filter_list_lock);
                list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
                        if (!f->changed)
                                continue;
@@ -1816,6 +1900,58 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
                        if (f->counter != 0)
                                continue;
                        f->changed = false;
+
+                       /* Move the element into temporary del_list */
+                       list_move_tail(&f->list, &tmp_del_list);
+               }
+
+               list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+                       if (!f->changed)
+                               continue;
+
+                       if (f->counter == 0)
+                               continue;
+                       f->changed = false;
+
+                       /* Clone MAC filter entry and add into temporary list */
+                       fclone = i40e_mac_filter_entry_clone(f);
+                       if (!fclone) {
+                               err_cond = true;
+                               break;
+                       }
+                       list_add_tail(&fclone->list, &tmp_add_list);
+               }
+
+               /* if failed to clone MAC filter entry - undo */
+               if (err_cond) {
+                       i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+                       i40e_undo_add_filter_entries(vsi);
+               }
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+               if (err_cond)
+                       i40e_cleanup_add_list(&tmp_add_list);
+       }
+
+       /* Now process 'del_list' outside the lock */
+       if (!list_empty(&tmp_del_list)) {
+               filter_list_len = pf->hw.aq.asq_buf_size /
+                           sizeof(struct i40e_aqc_remove_macvlan_element_data);
+               del_list = kcalloc(filter_list_len,
+                           sizeof(struct i40e_aqc_remove_macvlan_element_data),
+                           GFP_KERNEL);
+               if (!del_list) {
+                       i40e_cleanup_add_list(&tmp_add_list);
+
+                       /* Undo VSI's MAC filter entry element updates */
+                       spin_lock_bh(&vsi->mac_filter_list_lock);
+                       i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+                       i40e_undo_add_filter_entries(vsi);
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
+                       return -ENOMEM;
+               }
+
+               list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
                        cmd_flags = 0;
 
                        /* add to delete list */
@@ -1828,10 +1964,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
                        del_list[num_del].flags = cmd_flags;
                        num_del++;
 
-                       /* unlink from filter list */
-                       list_del(&f->list);
-                       kfree(f);
-
                        /* flush a full buffer */
                        if (num_del == filter_list_len) {
                                ret = i40e_aq_remove_macvlan(&pf->hw,
@@ -1842,12 +1974,18 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
                                memset(del_list, 0, sizeof(*del_list));
 
                                if (ret && aq_err != I40E_AQ_RC_ENOENT)
-                                       dev_info(&pf->pdev->dev,
-                                                "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
-                                                i40e_stat_str(&pf->hw, ret),
-                                                i40e_aq_str(&pf->hw, aq_err));
+                                       dev_err(&pf->pdev->dev,
+                                               "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
+                                               i40e_stat_str(&pf->hw, ret),
+                                               i40e_aq_str(&pf->hw, aq_err));
                        }
+                       /* Release memory for MAC filter entries which were
+                        * synced up with HW.
+                        */
+                       list_del(&f->list);
+                       kfree(f);
                }
+
                if (num_del) {
                        ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
                                                     del_list, num_del, NULL);
@@ -1863,6 +2001,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
 
                kfree(del_list);
                del_list = NULL;
+       }
+
+       if (!list_empty(&tmp_add_list)) {
 
                /* do all the adds now */
                filter_list_len = pf->hw.aq.asq_buf_size /
@@ -1870,16 +2011,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
                add_list = kcalloc(filter_list_len,
                               sizeof(struct i40e_aqc_add_macvlan_element_data),
                               GFP_KERNEL);
-               if (!add_list)
+               if (!add_list) {
+                       /* Purge element from temporary lists */
+                       i40e_cleanup_add_list(&tmp_add_list);
+
+                       /* Undo add filter entries from VSI MAC filter list */
+                       spin_lock_bh(&vsi->mac_filter_list_lock);
+                       i40e_undo_add_filter_entries(vsi);
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
                        return -ENOMEM;
+               }
 
-               list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
-                       if (!f->changed)
-                               continue;
+               list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
 
-                       if (f->counter == 0)
-                               continue;
-                       f->changed = false;
                        add_happened = true;
                        cmd_flags = 0;
 
@@ -1906,7 +2050,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
                                        break;
                                memset(add_list, 0, sizeof(*add_list));
                        }
+                       /* Entries from tmp_add_list were cloned from MAC
+                        * filter list, hence clean those cloned entries
+                        */
+                       list_del(&f->list);
+                       kfree(f);
                }
+
                if (num_add) {
                        ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
                                                  add_list, num_add, NULL);
@@ -2158,6 +2308,9 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
        is_vf = (vsi->type == I40E_VSI_SRIOV);
        is_netdev = !!(vsi->netdev);
 
+       /* Locked once because all functions invoked below iterates list*/
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+
        if (is_netdev) {
                add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
                                        is_vf, is_netdev);
@@ -2165,6 +2318,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
                        dev_info(&vsi->back->pdev->dev,
                                 "Could not add vlan filter %d for %pM\n",
                                 vid, vsi->netdev->dev_addr);
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
                        return -ENOMEM;
                }
        }
@@ -2175,6 +2329,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
                        dev_info(&vsi->back->pdev->dev,
                                 "Could not add vlan filter %d for %pM\n",
                                 vid, f->macaddr);
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
                        return -ENOMEM;
                }
        }
@@ -2196,6 +2351,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
                                dev_info(&vsi->back->pdev->dev,
                                         "Could not add filter 0 for %pM\n",
                                         vsi->netdev->dev_addr);
+                               spin_unlock_bh(&vsi->mac_filter_list_lock);
                                return -ENOMEM;
                        }
                }
@@ -2204,22 +2360,28 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
        /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
        if (vid > 0 && !vsi->info.pvid) {
                list_for_each_entry(f, &vsi->mac_filter_list, list) {
-                       if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
-                                            is_vf, is_netdev)) {
-                               i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
-                                               is_vf, is_netdev);
-                               add_f = i40e_add_filter(vsi, f->macaddr,
-                                                       0, is_vf, is_netdev);
-                               if (!add_f) {
-                                       dev_info(&vsi->back->pdev->dev,
-                                                "Could not add filter 0 for %pM\n",
-                                                f->macaddr);
-                                       return -ENOMEM;
-                               }
+                       if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+                                             is_vf, is_netdev))
+                               continue;
+                       i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+                                       is_vf, is_netdev);
+                       add_f = i40e_add_filter(vsi, f->macaddr,
+                                               0, is_vf, is_netdev);
+                       if (!add_f) {
+                               dev_info(&vsi->back->pdev->dev,
+                                        "Could not add filter 0 for %pM\n",
+                                       f->macaddr);
+                               spin_unlock_bh(&vsi->mac_filter_list_lock);
+                               return -ENOMEM;
                        }
                }
        }
 
+       /* Make sure to release before sync_vsi_filter because that
+        * function will lock/unlock as necessary
+        */
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
        if (test_bit(__I40E_DOWN, &vsi->back->state) ||
            test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
                return 0;
@@ -2244,6 +2406,9 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
        is_vf = (vsi->type == I40E_VSI_SRIOV);
        is_netdev = !!(netdev);
 
+       /* Locked once because all functions invoked below iterates list */
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+
        if (is_netdev)
                i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
 
@@ -2274,6 +2439,7 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
                        dev_info(&vsi->back->pdev->dev,
                                 "Could not add filter %d for %pM\n",
                                 I40E_VLAN_ANY, netdev->dev_addr);
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
                        return -ENOMEM;
                }
        }
@@ -2282,16 +2448,22 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
                list_for_each_entry(f, &vsi->mac_filter_list, list) {
                        i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
                        add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
-                                           is_vf, is_netdev);
+                                               is_vf, is_netdev);
                        if (!add_f) {
                                dev_info(&vsi->back->pdev->dev,
                                         "Could not add filter %d for %pM\n",
                                         I40E_VLAN_ANY, f->macaddr);
+                               spin_unlock_bh(&vsi->mac_filter_list_lock);
                                return -ENOMEM;
                        }
                }
        }
 
+       /* Make sure to release before sync_vsi_filter because that
+        * function with lock/unlock as necessary
+        */
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
        if (test_bit(__I40E_DOWN, &vsi->back->state) ||
            test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
                return 0;
@@ -2901,11 +3073,9 @@ static int i40e_vsi_configure(struct i40e_vsi *vsi)
 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
-       struct i40e_q_vector *q_vector;
        struct i40e_hw *hw = &pf->hw;
        u16 vector;
        int i, q;
-       u32 val;
        u32 qp;
 
        /* The interrupt indexing is offset by 1 in the PFINT_ITRn
@@ -2915,7 +3085,9 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
        qp = vsi->base_queue;
        vector = vsi->base_vector;
        for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
-               q_vector = vsi->q_vectors[i];
+               struct i40e_q_vector *q_vector = vsi->q_vectors[i];
+
+               q_vector->itr_countdown = ITR_COUNTDOWN_START;
                q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
                q_vector->rx.latency_range = I40E_LOW_LATENCY;
                wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
@@ -2924,10 +3096,14 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
                q_vector->tx.latency_range = I40E_LOW_LATENCY;
                wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
                     q_vector->tx.itr);
+               wr32(hw, I40E_PFINT_RATEN(vector - 1),
+                    INTRL_USEC_TO_REG(vsi->int_rate_limit));
 
                /* Linked list for the queuepairs assigned to this vector */
                wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
                for (q = 0; q < q_vector->num_ringpairs; q++) {
+                       u32 val;
+
                        val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
                              (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
                              (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
@@ -3007,6 +3183,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
        u32 val;
 
        /* set the ITR configuration */
+       q_vector->itr_countdown = ITR_COUNTDOWN_START;
        q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
        q_vector->rx.latency_range = I40E_LOW_LATENCY;
        wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
@@ -3092,7 +3269,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
        if (!q_vector->tx.ring && !q_vector->rx.ring)
                return IRQ_HANDLED;
 
-       napi_schedule(&q_vector->napi);
+       napi_schedule_irqoff(&q_vector->napi);
 
        return IRQ_HANDLED;
 }
@@ -3261,6 +3438,8 @@ static irqreturn_t i40e_intr(int irq, void *data)
 
        /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
        if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
+               struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+               struct i40e_q_vector *q_vector = vsi->q_vectors[0];
 
                /* temporarily disable queue cause for NAPI processing */
                u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
@@ -3273,7 +3452,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
                wr32(hw, I40E_QINT_TQCTL(0), qval);
 
                if (!test_bit(__I40E_DOWN, &pf->state))
-                       napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
+                       napi_schedule_irqoff(&q_vector->napi);
        }
 
        if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
@@ -3574,14 +3753,12 @@ static void i40e_netpoll(struct net_device *netdev)
        if (test_bit(__I40E_DOWN, &vsi->state))
                return;
 
-       pf->flags |= I40E_FLAG_IN_NETPOLL;
        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
                for (i = 0; i < vsi->num_q_vectors; i++)
                        i40e_msix_clean_rings(0, vsi->q_vectors[i]);
        } else {
                i40e_intr(pf->pdev->irq, netdev);
        }
-       pf->flags &= ~I40E_FLAG_IN_NETPOLL;
 }
 #endif
 
@@ -4844,8 +5021,8 @@ out:
  */
 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 {
-       char speed[SPEED_SIZE] = "Unknown";
-       char fc[FC_SIZE] = "RX/TX";
+       char *speed = "Unknown";
+       char *fc = "Unknown";
 
        if (vsi->current_isup == isup)
                return;
@@ -4866,19 +5043,19 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 
        switch (vsi->back->hw.phy.link_info.link_speed) {
        case I40E_LINK_SPEED_40GB:
-               strlcpy(speed, "40 Gbps", SPEED_SIZE);
+               speed = "40 G";
                break;
        case I40E_LINK_SPEED_20GB:
-               strncpy(speed, "20 Gbps", SPEED_SIZE);
+               speed = "20 G";
                break;
        case I40E_LINK_SPEED_10GB:
-               strlcpy(speed, "10 Gbps", SPEED_SIZE);
+               speed = "10 G";
                break;
        case I40E_LINK_SPEED_1GB:
-               strlcpy(speed, "1000 Mbps", SPEED_SIZE);
+               speed = "1000 M";
                break;
        case I40E_LINK_SPEED_100MB:
-               strncpy(speed, "100 Mbps", SPEED_SIZE);
+               speed = "100 M";
                break;
        default:
                break;
@@ -4886,20 +5063,20 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 
        switch (vsi->back->hw.fc.current_mode) {
        case I40E_FC_FULL:
-               strlcpy(fc, "RX/TX", FC_SIZE);
+               fc = "RX/TX";
                break;
        case I40E_FC_TX_PAUSE:
-               strlcpy(fc, "TX", FC_SIZE);
+               fc = "TX";
                break;
        case I40E_FC_RX_PAUSE:
-               strlcpy(fc, "RX", FC_SIZE);
+               fc = "RX";
                break;
        default:
-               strlcpy(fc, "None", FC_SIZE);
+               fc = "None";
                break;
        }
 
-       netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
+       netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
                    speed, fc);
 }
 
@@ -6220,8 +6397,9 @@ static void i40e_config_bridge_mode(struct i40e_veb *veb)
 {
        struct i40e_pf *pf = veb->pf;
 
-       dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
-                veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+       if (pf->hw.debug_mask & I40E_DEBUG_LAN)
+               dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
+                        veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
        if (veb->bridge_mode & BRIDGE_MODE_VEPA)
                i40e_disable_pf_switch_lb(pf);
        else
@@ -6353,12 +6531,6 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
                }
        } while (err);
 
-       if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
-           (pf->hw.aq.fw_maj_ver < 2)) {
-               pf->hw.func_caps.num_msix_vectors++;
-               pf->hw.func_caps.num_msix_vectors_vf++;
-       }
-
        if (pf->hw.debug_mask & I40E_DEBUG_USER)
                dev_info(&pf->pdev->dev,
                         "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
@@ -6593,9 +6765,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
        /* make sure our flow control settings are restored */
        ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
        if (ret)
-               dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n",
-                        i40e_stat_str(&pf->hw, ret),
-                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
+                       i40e_stat_str(&pf->hw, ret),
+                       i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
        /* Rebuild the VSIs and VEBs that existed before reset.
         * They are still in our local switch element arrays, so only
@@ -6665,6 +6837,15 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
        if (pf->flags & I40E_FLAG_MSIX_ENABLED)
                ret = i40e_setup_misc_vector(pf);
 
+       /* Add a filter to drop all Flow control frames from any VSI from being
+        * transmitted. By doing so we stop a malicious VF from sending out
+        * PAUSE or PFC frames and potentially controlling traffic for other
+        * PF/VF VSIs.
+        * The FW can still send Flow control frames if enabled.
+        */
+       i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+                                                      pf->main_vsi_seid);
+
        /* restart the VSIs that were rebuilt and running before the reset */
        i40e_pf_unquiesce_all_vsi(pf);
 
@@ -7047,6 +7228,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
        vsi->idx = vsi_idx;
        vsi->rx_itr_setting = pf->rx_itr_default;
        vsi->tx_itr_setting = pf->tx_itr_default;
+       vsi->int_rate_limit = 0;
        vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
                                pf->rss_table_size : 64;
        vsi->netdev_registered = false;
@@ -7065,6 +7247,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
        /* Setup default MSIX irq handler for VSI */
        i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
 
+       /* Initialize VSI lock */
+       spin_lock_init(&vsi->mac_filter_list_lock);
        pf->vsi[vsi_idx] = vsi;
        ret = vsi_idx;
        goto unlock_pf;
@@ -7955,12 +8139,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
            (pf->hw.func_caps.fd_filters_best_effort > 0)) {
                pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
                pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
-               if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
-                       pf->flags |= I40E_FLAG_FD_SB_ENABLED;
-               } else {
+               if (pf->flags & I40E_FLAG_MFP_ENABLED &&
+                   pf->hw.num_partitions > 1)
                        dev_info(&pf->pdev->dev,
                                 "Flow Director Sideband mode Disabled in MFP mode\n");
-               }
+               else
+                       pf->flags |= I40E_FLAG_FD_SB_ENABLED;
                pf->fdir_pf_filter_count =
                                 pf->hw.func_caps.fd_filters_guaranteed;
                pf->hw.fdir_shared_filter_count =
@@ -7970,6 +8154,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
        if (pf->hw.func_caps.vmdq) {
                pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
                pf->flags |= I40E_FLAG_VMDQ_ENABLED;
+               pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
        }
 
 #ifdef I40E_FCOE
@@ -8330,13 +8515,15 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
  * @seq: RTNL message seq #
  * @dev: the netdev being configured
  * @filter_mask: unused
+ * @nlflags: netlink flags passed in
  *
  * Return the mode in which the hardware bridge is operating in
  * i.e VEB or VEPA.
  **/
 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                                   struct net_device *dev,
-                                  u32 filter_mask, int nlflags)
+                                  u32 __always_unused filter_mask,
+                                  int nlflags)
 {
        struct i40e_netdev_priv *np = netdev_priv(dev);
        struct i40e_vsi *vsi = np->vsi;
@@ -8365,7 +8552,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
 /**
  * i40e_features_check - Validate encapsulated packet conforms to limits
  * @skb: skb buff
- * @netdev: This physical port's netdev
+ * @dev: This physical port's netdev
  * @features: Offload features that the stack believes apply
  **/
 static netdev_features_t i40e_features_check(struct sk_buff *skb,
@@ -8446,6 +8633,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
 
        netdev->hw_enc_features |= NETIF_F_IP_CSUM       |
                                  NETIF_F_GSO_UDP_TUNNEL |
+                                 NETIF_F_GSO_GRE        |
                                  NETIF_F_TSO;
 
        netdev->features = NETIF_F_SG                  |
@@ -8453,6 +8641,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
                           NETIF_F_SCTP_CSUM           |
                           NETIF_F_HIGHDMA             |
                           NETIF_F_GSO_UDP_TUNNEL      |
+                          NETIF_F_GSO_GRE             |
                           NETIF_F_HW_VLAN_CTAG_TX     |
                           NETIF_F_HW_VLAN_CTAG_RX     |
                           NETIF_F_HW_VLAN_CTAG_FILTER |
@@ -8478,17 +8667,26 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
                 * default a MAC-VLAN filter that accepts any tagged packet
                 * which must be replaced by a normal filter.
                 */
-               if (!i40e_rm_default_mac_filter(vsi, mac_addr))
+               if (!i40e_rm_default_mac_filter(vsi, mac_addr)) {
+                       spin_lock_bh(&vsi->mac_filter_list_lock);
                        i40e_add_filter(vsi, mac_addr,
                                        I40E_VLAN_ANY, false, true);
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
+               }
        } else {
                /* relate the VSI_VMDQ name to the VSI_MAIN name */
                snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
                         pf->vsi[pf->lan_vsi]->netdev->name);
                random_ether_addr(mac_addr);
+
+               spin_lock_bh(&vsi->mac_filter_list_lock);
                i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
        }
+
+       spin_lock_bh(&vsi->mac_filter_list_lock);
        i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
 
        ether_addr_copy(netdev->dev_addr, mac_addr);
        ether_addr_copy(netdev->perm_addr, mac_addr);
@@ -8544,12 +8742,22 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
                return 1;
 
        veb = pf->veb[vsi->veb_idx];
+       if (!veb) {
+               dev_info(&pf->pdev->dev,
+                        "There is no veb associated with the bridge\n");
+               return -ENOENT;
+       }
+
        /* Uplink is a bridge in VEPA mode */
-       if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA))
+       if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
                return 0;
+       } else {
+               /* Uplink is a bridge in VEB mode */
+               return 1;
+       }
 
-       /* Uplink is a bridge in VEB mode */
-       return 1;
+       /* VEPA is now default bridge, so return 0 */
+       return 0;
 }
 
 /**
@@ -8562,10 +8770,13 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
 static int i40e_add_vsi(struct i40e_vsi *vsi)
 {
        int ret = -ENODEV;
-       struct i40e_mac_filter *f, *ftmp;
+       u8 laa_macaddr[ETH_ALEN];
+       bool found_laa_mac_filter = false;
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
        struct i40e_vsi_context ctxt;
+       struct i40e_mac_filter *f, *ftmp;
+
        u8 enabled_tc = 0x1; /* TC0 enabled */
        int f_count = 0;
 
@@ -8737,32 +8948,41 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                vsi->id = ctxt.vsi_number;
        }
 
+       spin_lock_bh(&vsi->mac_filter_list_lock);
        /* If macvlan filters already exist, force them to get loaded */
        list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
                f->changed = true;
                f_count++;
 
+               /* Expected to have only one MAC filter entry for LAA in list */
                if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
-                       struct i40e_aqc_remove_macvlan_element_data element;
+                       ether_addr_copy(laa_macaddr, f->macaddr);
+                       found_laa_mac_filter = true;
+               }
+       }
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
 
-                       memset(&element, 0, sizeof(element));
-                       ether_addr_copy(element.mac_addr, f->macaddr);
-                       element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
-                       ret = i40e_aq_remove_macvlan(hw, vsi->seid,
-                                                    &element, 1, NULL);
-                       if (ret) {
-                               /* some older FW has a different default */
-                               element.flags |=
-                                              I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
-                               i40e_aq_remove_macvlan(hw, vsi->seid,
-                                                      &element, 1, NULL);
-                       }
+       if (found_laa_mac_filter) {
+               struct i40e_aqc_remove_macvlan_element_data element;
 
-                       i40e_aq_mac_address_write(hw,
-                                                 I40E_AQC_WRITE_TYPE_LAA_WOL,
-                                                 f->macaddr, NULL);
+               memset(&element, 0, sizeof(element));
+               ether_addr_copy(element.mac_addr, laa_macaddr);
+               element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+               ret = i40e_aq_remove_macvlan(hw, vsi->seid,
+                                            &element, 1, NULL);
+               if (ret) {
+                       /* some older FW has a different default */
+                       element.flags |=
+                                      I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+                       i40e_aq_remove_macvlan(hw, vsi->seid,
+                                              &element, 1, NULL);
                }
+
+               i40e_aq_mac_address_write(hw,
+                                         I40E_AQC_WRITE_TYPE_LAA_WOL,
+                                         laa_macaddr, NULL);
        }
+
        if (f_count) {
                vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
                pf->flags |= I40E_FLAG_FILTER_SYNC;
@@ -8825,9 +9045,12 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
                i40e_vsi_disable_irq(vsi);
        }
 
+       spin_lock_bh(&vsi->mac_filter_list_lock);
        list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
                i40e_del_filter(vsi, f->macaddr, f->vlan,
                                f->is_vf, f->is_netdev);
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
        i40e_sync_vsi_filters(vsi, false);
 
        i40e_vsi_delete(vsi);
@@ -9727,7 +9950,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
                i40e_config_rss(pf);
 
        /* fill in link information and enable LSE reporting */
-       i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+       i40e_update_link_info(&pf->hw);
        i40e_link_event(pf);
 
        /* Initialize user-specific link properties */
@@ -9845,8 +10068,14 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
        }
 
        pf->queues_left = queues_left;
+       dev_dbg(&pf->pdev->dev,
+               "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
+               pf->hw.func_caps.num_tx_qp,
+               !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
+               pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps,
+               pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left);
 #ifdef I40E_FCOE
-       dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
+       dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
 #endif
 }
 
@@ -9923,6 +10152,10 @@ static void i40e_print_features(struct i40e_pf *pf)
        if (pf->flags & I40E_FLAG_FCOE_ENABLED)
                buf += sprintf(buf, "FCOE ");
 #endif
+       if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+               buf += sprintf(buf, "VEB ");
+       else
+               buf += sprintf(buf, "VEPA ");
 
        BUG_ON(buf > (string + INFO_STRING_LEN));
        dev_info(&pf->pdev->dev, "%s\n", string);
@@ -9948,9 +10181,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        static u16 pfs_found;
        u16 wol_nvm_bits;
        u16 link_status;
-       int err = 0;
+       int err;
        u32 len;
        u32 i;
+       u8 set_fc_aq_fail;
 
        err = pci_enable_device_mem(pdev);
        if (err)
@@ -10062,7 +10296,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pf->hw.fc.requested_mode = I40E_FC_NONE;
 
        err = i40e_init_adminq(hw);
-       dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
+
+       /* provide nvm, fw, api versions */
+       dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
+                hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
+                hw->aq.api_maj_ver, hw->aq.api_min_ver,
+                i40e_nvm_version_str(hw));
+
        if (err) {
                dev_info(&pdev->dev,
                         "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
@@ -10209,6 +10449,25 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
                goto err_vsis;
        }
+
+       /* Make sure flow control is set according to current settings */
+       err = i40e_set_fc(hw, &set_fc_aq_fail, true);
+       if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
+               dev_dbg(&pf->pdev->dev,
+                       "Set fc with err %s aq_err %s on get_phy_cap\n",
+                       i40e_stat_str(hw, err),
+                       i40e_aq_str(hw, hw->aq.asq_last_status));
+       if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
+               dev_dbg(&pf->pdev->dev,
+                       "Set fc with err %s aq_err %s on set_phy_config\n",
+                       i40e_stat_str(hw, err),
+                       i40e_aq_str(hw, hw->aq.asq_last_status));
+       if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
+               dev_dbg(&pf->pdev->dev,
+                       "Set fc with err %s aq_err %s on get_link_info\n",
+                       i40e_stat_str(hw, err),
+                       i40e_aq_str(hw, hw->aq.asq_last_status));
+
        /* if FDIR VSI was set up, start it now */
        for (i = 0; i < pf->num_alloc_vsi; i++) {
                if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
@@ -10299,37 +10558,82 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        i40e_fcoe_vsi_setup(pf);
 
 #endif
-       /* Get the negotiated link width and speed from PCI config space */
-       pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
+#define PCI_SPEED_SIZE 8
+#define PCI_WIDTH_SIZE 8
+       /* Devices on the IOSF bus do not have this information
+        * and will report PCI Gen 1 x 1 by default so don't bother
+        * checking them.
+        */
+       if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
+               char speed[PCI_SPEED_SIZE] = "Unknown";
+               char width[PCI_WIDTH_SIZE] = "Unknown";
 
-       i40e_set_pci_config_data(hw, link_status);
+               /* Get the negotiated link width and speed from PCI config
+                * space
+                */
+               pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
+                                         &link_status);
+
+               i40e_set_pci_config_data(hw, link_status);
+
+               switch (hw->bus.speed) {
+               case i40e_bus_speed_8000:
+                       strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
+               case i40e_bus_speed_5000:
+                       strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
+               case i40e_bus_speed_2500:
+                       strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
+               default:
+                       break;
+               }
+               switch (hw->bus.width) {
+               case i40e_bus_width_pcie_x8:
+                       strncpy(width, "8", PCI_WIDTH_SIZE); break;
+               case i40e_bus_width_pcie_x4:
+                       strncpy(width, "4", PCI_WIDTH_SIZE); break;
+               case i40e_bus_width_pcie_x2:
+                       strncpy(width, "2", PCI_WIDTH_SIZE); break;
+               case i40e_bus_width_pcie_x1:
+                       strncpy(width, "1", PCI_WIDTH_SIZE); break;
+               default:
+                       break;
+               }
 
-       dev_info(&pdev->dev, "PCI-Express: %s %s\n",
-               (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
-                hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
-                hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
-                "Unknown"),
-               (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
-                hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
-                hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
-                hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
-                "Unknown"));
+               dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
+                        speed, width);
 
-       if (hw->bus.width < i40e_bus_width_pcie_x8 ||
-           hw->bus.speed < i40e_bus_speed_8000) {
-               dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
-               dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+               if (hw->bus.width < i40e_bus_width_pcie_x8 ||
+                   hw->bus.speed < i40e_bus_speed_8000) {
+                       dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
+                       dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+               }
        }
 
        /* get the requested speeds from the fw */
        err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
        if (err)
-               dev_info(&pf->pdev->dev,
-                        "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n",
-                        i40e_stat_str(&pf->hw, err),
-                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %s last_status =  %s\n",
+                       i40e_stat_str(&pf->hw, err),
+                       i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
        pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
 
+       /* get the supported phy types from the fw */
+       err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
+       if (err)
+               dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %s last_status =  %s\n",
+                       i40e_stat_str(&pf->hw, err),
+                       i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+       pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type);
+
+       /* Add a filter to drop all Flow control frames from any VSI from being
+        * transmitted. By doing so we stop a malicious VF from sending out
+        * PAUSE or PFC frames and potentially controlling traffic for other
+        * PF/VF VSIs.
+        * The FW can still send Flow control frames if enabled.
+        */
+       i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+                                                      pf->main_vsi_seid);
+
        /* print a string summarizing features */
        i40e_print_features(pf);
 
@@ -10377,6 +10681,7 @@ err_dma:
 static void i40e_remove(struct pci_dev *pdev)
 {
        struct i40e_pf *pf = pci_get_drvdata(pdev);
+       struct i40e_hw *hw = &pf->hw;
        i40e_status ret_code;
        int i;
 
@@ -10384,6 +10689,10 @@ static void i40e_remove(struct pci_dev *pdev)
 
        i40e_ptp_stop(pf);
 
+       /* Disable RSS in hw */
+       wr32(hw, I40E_PFQF_HENA(0), 0);
+       wr32(hw, I40E_PFQF_HENA(1), 0);
+
        /* no more scheduling of any task */
        set_bit(__I40E_DOWN, &pf->state);
        del_timer_sync(&pf->service_timer);
index 2142e1004a2f41de2bf3cdaee8d91ba756c41ed0..6100cdd9ad13b420f20a640f8dc2572b89afb92d 100644 (file)
@@ -290,9 +290,18 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
                               u16 *data)
 {
-       if (hw->mac.type == I40E_MAC_X722)
-               return i40e_read_nvm_word_aq(hw, offset, data);
-       return i40e_read_nvm_word_srctl(hw, offset, data);
+       enum i40e_status_code ret_code = 0;
+
+       if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+               ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+               if (!ret_code) {
+                       ret_code = i40e_read_nvm_word_aq(hw, offset, data);
+                       i40e_release_nvm(hw);
+               }
+       } else {
+               ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+       }
+       return ret_code;
 }
 
 /**
@@ -397,9 +406,19 @@ read_nvm_buffer_aq_exit:
 i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
                                 u16 *words, u16 *data)
 {
-       if (hw->mac.type == I40E_MAC_X722)
-               return i40e_read_nvm_buffer_aq(hw, offset, words, data);
-       return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+       enum i40e_status_code ret_code = 0;
+
+       if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+               ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+               if (!ret_code) {
+                       ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+                                                          data);
+                       i40e_release_nvm(hw);
+               }
+       } else {
+               ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+       }
+       return ret_code;
 }
 
 /**
@@ -465,7 +484,7 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
 static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
                                                    u16 *checksum)
 {
-       i40e_status ret_code = 0;
+       i40e_status ret_code;
        struct i40e_virt_mem vmem;
        u16 pcie_alt_module = 0;
        u16 checksum_local = 0;
@@ -545,15 +564,16 @@ i40e_calc_nvm_checksum_exit:
  **/
 i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
 {
-       i40e_status ret_code = 0;
+       i40e_status ret_code;
        u16 checksum;
        __le16 le_sum;
 
        ret_code = i40e_calc_nvm_checksum(hw, &checksum);
-       le_sum = cpu_to_le16(checksum);
-       if (!ret_code)
+       if (!ret_code) {
+               le_sum = cpu_to_le16(checksum);
                ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
                                             1, &le_sum, true);
+       }
 
        return ret_code;
 }
@@ -632,7 +652,7 @@ static inline u8 i40e_nvmupd_get_transaction(u32 val)
        return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
 }
 
-static char *i40e_nvm_update_state_str[] = {
+static const char * const i40e_nvm_update_state_str[] = {
        "I40E_NVMUPD_INVALID",
        "I40E_NVMUPD_READ_CON",
        "I40E_NVMUPD_READ_SNT",
index e51e1567837c76f85591879e64b3bfa0cfd562f0..bb9d583e5416fdf469b7853a92ce8c435901bafd 100644 (file)
@@ -58,8 +58,8 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
 void i40e_idle_aq(struct i40e_hw *hw);
 bool i40e_check_asq_alive(struct i40e_hw *hw);
 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
-char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
-char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
 
 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
                                bool pf_lut, u8 *lut, u16 lut_size);
@@ -259,6 +259,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw);
 void i40e_clear_hw(struct i40e_hw *hw);
 void i40e_clear_pxe_mode(struct i40e_hw *hw);
 i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+i40e_status i40e_update_link_info(struct i40e_hw *hw);
 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
                                      u32 *max_bw, u32 *min_bw, bool *min_valid,
@@ -321,4 +322,6 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
                               void *buff, u16 *ret_buff_size,
                               u8 *ret_next_table, u32 *ret_next_index,
                               struct i40e_asq_cmd_details *cmd_details);
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+                                                   u16 vsi_seid);
 #endif /* _I40E_PROTOTYPE_H_ */
index 552c84e2e05d7ee0b5ff0b90a07203786830c49a..565ca7c835bc3989459065e129254e0a16eadba6 100644 (file)
@@ -674,8 +674,8 @@ void i40e_ptp_init(struct i40e_pf *pf)
                struct timespec64 ts;
                u32 regval;
 
-               dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
-                        netdev->name);
+               if (pf->hw.debug_mask & I40E_DEBUG_LAN)
+                       dev_info(&pf->pdev->dev, "PHC enabled\n");
                pf->flags |= I40E_FLAG_PTP;
 
                /* Ensure the clocks are running. */
index 01e5ece8046d180c438adef1dc1198a40305c1cd..635b3ac17877b153eba5c77a352ab8428d2b0742 100644 (file)
@@ -815,6 +815,8 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
  * i40e_set_new_dynamic_itr - Find new ITR level
  * @rc: structure containing ring performance data
  *
+ * Returns true if ITR changed, false if not
+ *
  * Stores a new ITR value based on packets and byte counts during
  * the last interrupt.  The advantage of per interrupt computation
  * is faster updates and more accurate ITR for the current traffic
@@ -823,21 +825,32 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
  * testing data as well as attempting to minimize response time
  * while increasing bulk throughput.
  **/
-static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
 {
        enum i40e_latency_range new_latency_range = rc->latency_range;
+       struct i40e_q_vector *qv = rc->ring->q_vector;
        u32 new_itr = rc->itr;
        int bytes_per_int;
+       int usecs;
 
        if (rc->total_packets == 0 || !rc->itr)
-               return;
+               return false;
 
        /* simple throttlerate management
-        *   0-10MB/s   lowest (100000 ints/s)
+        *   0-10MB/s   lowest (50000 ints/s)
         *  10-20MB/s   low    (20000 ints/s)
-        *  20-1249MB/s bulk   (8000 ints/s)
+        *  20-1249MB/s bulk   (18000 ints/s)
+        *  > 40000 Rx packets per second (8000 ints/s)
+        *
+        * The math works out because the divisor is in 10^(-6) which
+        * turns the bytes/us input value into MB/s values, but
+        * make sure to use usecs, as the register values written
+        * are in 2 usec increments in the ITR registers, and make sure
+        * to use the smoothed values that the countdown timer gives us.
         */
-       bytes_per_int = rc->total_bytes / rc->itr;
+       usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
+       bytes_per_int = rc->total_bytes / usecs;
+
        switch (new_latency_range) {
        case I40E_LOWEST_LATENCY:
                if (bytes_per_int > 10)
@@ -850,35 +863,52 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
                        new_latency_range = I40E_LOWEST_LATENCY;
                break;
        case I40E_BULK_LATENCY:
-               if (bytes_per_int <= 20)
-                       new_latency_range = I40E_LOW_LATENCY;
-               break;
+       case I40E_ULTRA_LATENCY:
        default:
                if (bytes_per_int <= 20)
                        new_latency_range = I40E_LOW_LATENCY;
                break;
        }
+
+       /* this is to adjust RX more aggressively when streaming small
+        * packets.  The value of 40000 was picked as it is just beyond
+        * what the hardware can receive per second if in low latency
+        * mode.
+        */
+#define RX_ULTRA_PACKET_RATE 40000
+
+       if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
+           (&qv->rx == rc))
+               new_latency_range = I40E_ULTRA_LATENCY;
+
        rc->latency_range = new_latency_range;
 
        switch (new_latency_range) {
        case I40E_LOWEST_LATENCY:
-               new_itr = I40E_ITR_100K;
+               new_itr = I40E_ITR_50K;
                break;
        case I40E_LOW_LATENCY:
                new_itr = I40E_ITR_20K;
                break;
        case I40E_BULK_LATENCY:
+               new_itr = I40E_ITR_18K;
+               break;
+       case I40E_ULTRA_LATENCY:
                new_itr = I40E_ITR_8K;
                break;
        default:
                break;
        }
 
-       if (new_itr != rc->itr)
-               rc->itr = new_itr;
-
        rc->total_bytes = 0;
        rc->total_packets = 0;
+
+       if (new_itr != rc->itr) {
+               rc->itr = new_itr;
+               return true;
+       }
+
+       return false;
 }
 
 /**
@@ -1268,16 +1298,11 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
                             struct sk_buff *skb, u16 vlan_tag)
 {
        struct i40e_q_vector *q_vector = rx_ring->q_vector;
-       struct i40e_vsi *vsi = rx_ring->vsi;
-       u64 flags = vsi->back->flags;
 
        if (vlan_tag & VLAN_VID_MASK)
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 
-       if (flags & I40E_FLAG_IN_NETPOLL)
-               netif_rx(skb);
-       else
-               napi_gro_receive(&q_vector->napi, skb);
+       napi_gro_receive(&q_vector->napi, skb);
 }
 
 /**
@@ -1752,6 +1777,21 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
        return total_rx_packets;
 }
 
+static u32 i40e_buildreg_itr(const int type, const u16 itr)
+{
+       u32 val;
+
+       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+             I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+             (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+             (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+
+       return val;
+}
+
+/* a small macro to shorten up some long lines */
+#define INTREG I40E_PFINT_DYN_CTLN
+
 /**
  * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
  * @vsi: the VSI we care about
@@ -1762,54 +1802,69 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
                                          struct i40e_q_vector *q_vector)
 {
        struct i40e_hw *hw = &vsi->back->hw;
-       u16 old_itr;
+       bool rx = false, tx = false;
+       u32 rxval, txval;
        int vector;
-       u32 val;
 
        vector = (q_vector->v_idx + vsi->base_vector);
+
+       /* avoid dynamic calculation if in countdown mode OR if
+        * all dynamic is disabled
+        */
+       rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+
+       if (q_vector->itr_countdown > 0 ||
+           (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
+            !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
+               goto enable_int;
+       }
+
        if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
-               old_itr = q_vector->rx.itr;
-               i40e_set_new_dynamic_itr(&q_vector->rx);
-               if (old_itr != q_vector->rx.itr) {
-                       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
-                       I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
-                       (I40E_RX_ITR <<
-                               I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
-                       (q_vector->rx.itr <<
-                               I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
-               } else {
-                       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
-                       I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
-                       (I40E_ITR_NONE <<
-                               I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
-               }
-               if (!test_bit(__I40E_DOWN, &vsi->state))
-                       wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
-       } else {
-               i40e_irq_dynamic_enable(vsi, q_vector->v_idx);
+               rx = i40e_set_new_dynamic_itr(&q_vector->rx);
+               rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
        }
+
        if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
-               old_itr = q_vector->tx.itr;
-               i40e_set_new_dynamic_itr(&q_vector->tx);
-               if (old_itr != q_vector->tx.itr) {
-                       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
-                               I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
-                               (I40E_TX_ITR <<
-                                  I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
-                               (q_vector->tx.itr <<
-                                  I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
-               } else {
-                       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
-                               I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
-                               (I40E_ITR_NONE <<
-                                  I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
-               }
-               if (!test_bit(__I40E_DOWN, &vsi->state))
-                       wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
-                             vsi->base_vector - 1), val);
-       } else {
-               i40e_irq_dynamic_enable(vsi, q_vector->v_idx);
+               tx = i40e_set_new_dynamic_itr(&q_vector->tx);
+               txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
+       }
+
+       if (rx || tx) {
+               /* get the higher of the two ITR adjustments and
+                * use the same value for both ITR registers
+                * when in adaptive mode (Rx and/or Tx)
+                */
+               u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
+
+               q_vector->tx.itr = q_vector->rx.itr = itr;
+               txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
+               tx = true;
+               rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
+               rx = true;
        }
+
+       /* only need to enable the interrupt once, but need
+        * to possibly update both ITR values
+        */
+       if (rx) {
+               /* set the INTENA_MSK_MASK so that this first write
+                * won't actually enable the interrupt, instead just
+                * updating the ITR (it's bit 31 PF and VF)
+                */
+               rxval |= BIT(31);
+               /* don't check _DOWN because interrupt isn't being enabled */
+               wr32(hw, INTREG(vector - 1), rxval);
+       }
+
+enable_int:
+       if (!test_bit(__I40E_DOWN, &vsi->state))
+               wr32(hw, INTREG(vector - 1), txval);
+
+       if (q_vector->itr_countdown)
+               q_vector->itr_countdown--;
+       else
+               q_vector->itr_countdown = ITR_COUNTDOWN_START;
+
 }
 
 /**
@@ -1830,7 +1885,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
        bool clean_complete = true;
        bool arm_wb = false;
        int budget_per_ring;
-       int cleaned;
+       int work_done = 0;
 
        if (test_bit(__I40E_DOWN, &vsi->state)) {
                napi_complete(napi);
@@ -1846,22 +1901,31 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
                ring->arm_wb = false;
        }
 
+       /* Handle case where we are called by netpoll with a budget of 0 */
+       if (budget <= 0)
+               goto tx_only;
+
        /* We attempt to distribute budget to each Rx queue fairly, but don't
         * allow the budget to go below 1 because that would exit polling early.
         */
        budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
 
        i40e_for_each_ring(ring, q_vector->rx) {
+               int cleaned;
+
                if (ring_is_ps_enabled(ring))
                        cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
                else
                        cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+
+               work_done += cleaned;
                /* if we didn't clean as many as budgeted, we must be done */
                clean_complete &= (budget_per_ring != cleaned);
        }
 
        /* If work not completed, return budget and polling will return */
        if (!clean_complete) {
+tx_only:
                if (arm_wb)
                        i40e_force_wb(vsi, q_vector);
                return budget;
@@ -1871,7 +1935,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
                q_vector->arm_wb_state = false;
 
        /* Work is done so exit the polling mode and re-enable the interrupt */
-       napi_complete(napi);
+       napi_complete_done(napi, work_done);
        if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
                i40e_update_enable_itr(vsi, q_vector);
        } else { /* Legacy mode */
@@ -2123,6 +2187,7 @@ out:
  * @tx_ring:  ptr to the ring to send
  * @skb:      ptr to the skb we're sending
  * @hdr_len:  ptr to the size of the packet header
+ * @cd_type_cmd_tso_mss: ptr to u64 object
  * @cd_tunneling: ptr to context descriptor bits
  *
  * Returns 0 if no TSO can happen, 1 if tso is going, or error
@@ -2182,6 +2247,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
  * @tx_ring:  ptr to the ring to send
  * @skb:      ptr to the skb we're sending
  * @tx_flags: the collected send information
+ * @cd_type_cmd_tso_mss: ptr to u64 object
  *
  * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
  **/
@@ -2224,6 +2290,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
  * @tx_flags: pointer to Tx flags currently set
  * @td_cmd: Tx descriptor command bits to set
  * @td_offset: Tx descriptor header offsets to set
+ * @tx_ring: Tx descriptor ring
  * @cd_tunneling: ptr to context desc bits
  **/
 static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
index 75cecfa6e3386ef721934149bd76c97803dfd008..6779fb771d6af9b9c8fcf46fd56ccb498c2951e3 100644 (file)
 #define I40E_MAX_ITR               0x0FF0  /* reg uses 2 usec resolution */
 #define I40E_MIN_ITR               0x0001  /* reg uses 2 usec resolution */
 #define I40E_ITR_100K              0x0005
+#define I40E_ITR_50K               0x000A
 #define I40E_ITR_20K               0x0019
+#define I40E_ITR_18K               0x001B
 #define I40E_ITR_8K                0x003E
 #define I40E_ITR_4K                0x007A
-#define I40E_ITR_RX_DEF            I40E_ITR_8K
-#define I40E_ITR_TX_DEF            I40E_ITR_4K
+#define I40E_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
+#define I40E_ITR_RX_DEF            I40E_ITR_20K
+#define I40E_ITR_TX_DEF            I40E_ITR_20K
 #define I40E_ITR_DYNAMIC           0x8000  /* use top bit as a flag */
 #define I40E_MIN_INT_RATE          250     /* ~= 1000000 / (I40E_MAX_ITR * 2) */
 #define I40E_MAX_INT_RATE          500000  /* == 1000000 / (I40E_MIN_ITR * 2) */
 #define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
 #define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
 #define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
+ * the value of the rate limit is non-zero
+ */
+#define INTRL_ENA                  BIT(6)
+#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
+#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
+#define I40E_INTRL_8K              125     /* 8000 ints/sec */
+#define I40E_INTRL_62K             16      /* 62500 ints/sec */
+#define I40E_INTRL_83K             12      /* 83333 ints/sec */
 
 #define I40E_QUEUE_END_OF_LIST 0x7FF
 
@@ -286,6 +298,7 @@ enum i40e_latency_range {
        I40E_LOWEST_LATENCY = 0,
        I40E_LOW_LATENCY = 1,
        I40E_BULK_LATENCY = 2,
+       I40E_ULTRA_LATENCY = 3,
 };
 
 struct i40e_ring_container {
index d1ec5a4326cf319b7b4af110447f4e720dc7b8bb..dd2da356d9a1bb6628fcd0825f400350fc037e6f 100644 (file)
 #include "i40e_adminq.h"
 #include "i40e_hmc.h"
 #include "i40e_lan_hmc.h"
-
-/* Device IDs */
-#define I40E_DEV_ID_SFP_XL710          0x1572
-#define I40E_DEV_ID_QEMU               0x1574
-#define I40E_DEV_ID_KX_A               0x157F
-#define I40E_DEV_ID_KX_B               0x1580
-#define I40E_DEV_ID_KX_C               0x1581
-#define I40E_DEV_ID_QSFP_A             0x1583
-#define I40E_DEV_ID_QSFP_B             0x1584
-#define I40E_DEV_ID_QSFP_C             0x1585
-#define I40E_DEV_ID_10G_BASE_T         0x1586
-#define I40E_DEV_ID_20G_KR2            0x1587
-#define I40E_DEV_ID_20G_KR2_A          0x1588
-#define I40E_DEV_ID_10G_BASE_T4                0x1589
-#define I40E_DEV_ID_VF                 0x154C
-#define I40E_DEV_ID_VF_HV              0x1571
-#define I40E_DEV_ID_SFP_X722           0x37D0
-#define I40E_DEV_ID_1G_BASE_T_X722     0x37D1
-#define I40E_DEV_ID_10G_BASE_T_X722    0x37D2
-#define I40E_DEV_ID_X722_VF            0x37CD
-#define I40E_DEV_ID_X722_VF_HV         0x37D9
-
-#define i40e_is_40G_device(d)          ((d) == I40E_DEV_ID_QSFP_A  || \
-                                        (d) == I40E_DEV_ID_QSFP_B  || \
-                                        (d) == I40E_DEV_ID_QSFP_C)
+#include "i40e_devids.h"
 
 /* I40E_MASK is a macro used on 32 bit registers */
 #define I40E_MASK(mask, shift) (mask << shift)
@@ -191,16 +167,65 @@ struct i40e_link_status {
        bool crc_enable;
        u8 pacing;
        u8 requested_speeds;
+       u8 module_type[3];
+       /* 1st byte: module identifier */
+#define I40E_MODULE_TYPE_SFP           0x03
+#define I40E_MODULE_TYPE_QSFP          0x0D
+       /* 2nd byte: ethernet compliance codes for 10/40G */
+#define I40E_MODULE_TYPE_40G_ACTIVE    0x01
+#define I40E_MODULE_TYPE_40G_LR4       0x02
+#define I40E_MODULE_TYPE_40G_SR4       0x04
+#define I40E_MODULE_TYPE_40G_CR4       0x08
+#define I40E_MODULE_TYPE_10G_BASE_SR   0x10
+#define I40E_MODULE_TYPE_10G_BASE_LR   0x20
+#define I40E_MODULE_TYPE_10G_BASE_LRM  0x40
+#define I40E_MODULE_TYPE_10G_BASE_ER   0x80
+       /* 3rd byte: ethernet compliance codes for 1G */
+#define I40E_MODULE_TYPE_1000BASE_SX   0x01
+#define I40E_MODULE_TYPE_1000BASE_LX   0x02
+#define I40E_MODULE_TYPE_1000BASE_CX   0x04
+#define I40E_MODULE_TYPE_1000BASE_T    0x08
+};
+
+enum i40e_aq_capabilities_phy_type {
+       I40E_CAP_PHY_TYPE_SGMII           = BIT(I40E_PHY_TYPE_SGMII),
+       I40E_CAP_PHY_TYPE_1000BASE_KX     = BIT(I40E_PHY_TYPE_1000BASE_KX),
+       I40E_CAP_PHY_TYPE_10GBASE_KX4     = BIT(I40E_PHY_TYPE_10GBASE_KX4),
+       I40E_CAP_PHY_TYPE_10GBASE_KR      = BIT(I40E_PHY_TYPE_10GBASE_KR),
+       I40E_CAP_PHY_TYPE_40GBASE_KR4     = BIT(I40E_PHY_TYPE_40GBASE_KR4),
+       I40E_CAP_PHY_TYPE_XAUI            = BIT(I40E_PHY_TYPE_XAUI),
+       I40E_CAP_PHY_TYPE_XFI             = BIT(I40E_PHY_TYPE_XFI),
+       I40E_CAP_PHY_TYPE_SFI             = BIT(I40E_PHY_TYPE_SFI),
+       I40E_CAP_PHY_TYPE_XLAUI           = BIT(I40E_PHY_TYPE_XLAUI),
+       I40E_CAP_PHY_TYPE_XLPPI           = BIT(I40E_PHY_TYPE_XLPPI),
+       I40E_CAP_PHY_TYPE_40GBASE_CR4_CU  = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
+       I40E_CAP_PHY_TYPE_10GBASE_CR1_CU  = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
+       I40E_CAP_PHY_TYPE_10GBASE_AOC     = BIT(I40E_PHY_TYPE_10GBASE_AOC),
+       I40E_CAP_PHY_TYPE_40GBASE_AOC     = BIT(I40E_PHY_TYPE_40GBASE_AOC),
+       I40E_CAP_PHY_TYPE_100BASE_TX      = BIT(I40E_PHY_TYPE_100BASE_TX),
+       I40E_CAP_PHY_TYPE_1000BASE_T      = BIT(I40E_PHY_TYPE_1000BASE_T),
+       I40E_CAP_PHY_TYPE_10GBASE_T       = BIT(I40E_PHY_TYPE_10GBASE_T),
+       I40E_CAP_PHY_TYPE_10GBASE_SR      = BIT(I40E_PHY_TYPE_10GBASE_SR),
+       I40E_CAP_PHY_TYPE_10GBASE_LR      = BIT(I40E_PHY_TYPE_10GBASE_LR),
+       I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
+       I40E_CAP_PHY_TYPE_10GBASE_CR1     = BIT(I40E_PHY_TYPE_10GBASE_CR1),
+       I40E_CAP_PHY_TYPE_40GBASE_CR4     = BIT(I40E_PHY_TYPE_40GBASE_CR4),
+       I40E_CAP_PHY_TYPE_40GBASE_SR4     = BIT(I40E_PHY_TYPE_40GBASE_SR4),
+       I40E_CAP_PHY_TYPE_40GBASE_LR4     = BIT(I40E_PHY_TYPE_40GBASE_LR4),
+       I40E_CAP_PHY_TYPE_1000BASE_SX     = BIT(I40E_PHY_TYPE_1000BASE_SX),
+       I40E_CAP_PHY_TYPE_1000BASE_LX     = BIT(I40E_PHY_TYPE_1000BASE_LX),
+       I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL =
+                                        BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
+       I40E_CAP_PHY_TYPE_20GBASE_KR2     = BIT(I40E_PHY_TYPE_20GBASE_KR2)
 };
 
 struct i40e_phy_info {
        struct i40e_link_status link_info;
        struct i40e_link_status link_info_old;
-       u32 autoneg_advertised;
-       u32 phy_id;
-       u32 module_type;
        bool get_link_info;
        enum i40e_media_type media_type;
+       /* all the phy types the NVM is capable of */
+       enum i40e_aq_capabilities_phy_type phy_types;
 };
 
 #define I40E_HW_CAP_MAX_GPIO                   30
@@ -289,6 +314,7 @@ struct i40e_nvm_info {
        bool blank_nvm_mode;      /* is NVM empty (no FW present)*/
        u16 version;              /* NVM package version */
        u32 eetrack;              /* NVM data version */
+       u32 oem_ver;              /* OEM version info */
 };
 
 /* definitions used in NVM update support */
@@ -417,6 +443,8 @@ struct i40e_fc_info {
 #define I40E_APP_PROTOID_FIP           0x8914
 #define I40E_APP_SEL_ETHTYPE           0x1
 #define I40E_APP_SEL_TCPIP             0x2
+#define I40E_CEE_APP_SEL_ETHTYPE       0x0
+#define I40E_CEE_APP_SEL_TCPIP         0x1
 
 /* CEE or IEEE 802.1Qaz ETS Configuration data */
 struct i40e_dcb_ets_config {
@@ -447,6 +475,8 @@ struct i40e_dcbx_config {
        u8  dcbx_mode;
 #define I40E_DCBX_MODE_CEE     0x1
 #define I40E_DCBX_MODE_IEEE    0x2
+       u8  app_mode;
+#define I40E_DCBX_APPS_NON_WILLING     0x1
        u32 numapps;
        u32 tlv_status; /* CEE mode TLV status */
        struct i40e_dcb_ets_config etscfg;
@@ -514,6 +544,9 @@ struct i40e_hw {
        struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
        struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
 
+#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+       u64 flags;
+
        /* debug mask */
        u32 debug_mask;
        char err_str[16];
@@ -1035,8 +1068,8 @@ enum i40e_filter_program_desc_fd_status {
 };
 
 #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT       23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
-                                      BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK        (0x1FFUL << \
+                                        I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CMD_SHIFT    4
 #define I40E_TXD_FLTR_QW1_CMD_MASK     (0xFFFFULL << \
@@ -1204,6 +1237,8 @@ struct i40e_hw_port_stats {
 #define I40E_SR_EMP_MODULE_PTR                 0x0F
 #define I40E_SR_PBA_FLAGS                      0x15
 #define I40E_SR_PBA_BLOCK_PTR                  0x16
+#define I40E_SR_BOOT_CONFIG_PTR                        0x17
+#define I40E_NVM_OEM_VER_OFF                   0x83
 #define I40E_SR_NVM_DEV_STARTER_VERSION                0x18
 #define I40E_SR_NVM_WAKE_ON_LAN                        0x19
 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR  0x27
index 95d0f8c5d484766cacf6cbb543ff70c2aa66eef4..ae879826084b790a467f9eed01196f430daf47ae 100644 (file)
@@ -150,6 +150,7 @@ struct i40e_virtchnl_vsi_resource {
 #define I40E_VIRTCHNL_VF_OFFLOAD_FCOE          0x00000004
 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ                0x00000008
 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG       0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR     0x00000020
 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN          0x00010000
 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING    0x00020000
 
index ee747dc5d6174e7a7aeb1e49780e95e76b1a0372..44462b40f2d7666f059b6741123107755e18e415 100644 (file)
@@ -547,6 +547,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
                 */
                if (vf->port_vlan_id)
                        i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
+
+               spin_lock_bh(&vsi->mac_filter_list_lock);
                f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
                                    vf->port_vlan_id ? vf->port_vlan_id : -1,
                                    true, false);
@@ -559,6 +561,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
                if (!f)
                        dev_info(&pf->pdev->dev,
                                 "Could not allocate VF broadcast filter\n");
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
        }
 
        /* program mac filter */
@@ -703,6 +706,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
         */
        vf->num_queue_pairs = 0;
        vf->vf_states = 0;
+       clear_bit(I40E_VF_STAT_INIT, &vf->vf_states);
 }
 
 /**
@@ -841,11 +845,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
 complete_reset:
        /* reallocate VF resources to reset the VSI state */
        i40e_free_vf_res(vf);
-       i40e_alloc_vf_res(vf);
-       i40e_enable_vf_mappings(vf);
-       set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
-       clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
-
+       if (!i40e_alloc_vf_res(vf)) {
+               i40e_enable_vf_mappings(vf);
+               set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+               clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+       }
        /* tell the VF the reset is done */
        wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
        i40e_flush(hw);
@@ -940,6 +944,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
        if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
                ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
                if (ret) {
+                       pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
                        pf->num_alloc_vfs = 0;
                        goto err_iov;
                }
@@ -964,8 +969,6 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
                /* VF resources get allocated during reset */
                i40e_reset_vf(&vfs[i], false);
 
-               /* enable VF vplan_qtable mappings */
-               i40e_enable_vf_mappings(&vfs[i]);
        }
        pf->num_alloc_vfs = num_alloc_vfs;
 
@@ -1103,6 +1106,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
                }
        } else {
                vf->num_valid_msgs++;
+               /* reset the invalid counter, if a valid message is received. */
+               vf->num_invalid_msgs = 0;
        }
 
        aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,  v_opcode, v_retval,
@@ -1204,6 +1209,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
        } else {
                vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
        }
+
+       if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+
        vfres->num_vsis = num_vsis;
        vfres->num_queue_pairs = vf->num_queue_pairs;
        vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
@@ -1593,6 +1602,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        }
        vsi = pf->vsi[vf->lan_vsi_idx];
 
+       /* Lock once, because all function inside for loop accesses VSI's
+        * MAC filter list which needs to be protected using same lock.
+        */
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+
        /* add new addresses to the list */
        for (i = 0; i < al->num_elements; i++) {
                struct i40e_mac_filter *f;
@@ -1611,9 +1625,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                        dev_err(&pf->pdev->dev,
                                "Unable to add VF MAC filter\n");
                        ret = I40E_ERR_PARAM;
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
                        goto error_param;
                }
        }
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
 
        /* program the updated filter list */
        if (i40e_sync_vsi_filters(vsi, false))
@@ -1661,10 +1677,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        }
        vsi = pf->vsi[vf->lan_vsi_idx];
 
+       spin_lock_bh(&vsi->mac_filter_list_lock);
        /* delete addresses from the list */
        for (i = 0; i < al->num_elements; i++)
                i40e_del_filter(vsi, al->list[i].addr,
                                I40E_VLAN_ANY, true, false);
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
 
        /* program the updated filter list */
        if (i40e_sync_vsi_filters(vsi, false))
@@ -2061,6 +2079,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
                goto error_param;
        }
 
+       /* Lock once because below invoked function add/del_filter requires
+        * mac_filter_list_lock to be held
+        */
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+
        /* delete the temporary mac address */
        i40e_del_filter(vsi, vf->default_lan_addr.addr,
                        vf->port_vlan_id ? vf->port_vlan_id : -1,
@@ -2072,6 +2095,8 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
        list_for_each_entry(f, &vsi->mac_filter_list, list)
                i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
 
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
        dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
        /* program mac filter */
        if (i40e_sync_vsi_filters(vsi, false)) {
@@ -2104,6 +2129,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
        u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_pf *pf = np->vsi->back;
+       bool is_vsi_in_vlan = false;
        struct i40e_vsi *vsi;
        struct i40e_vf *vf;
        int ret = 0;
@@ -2133,7 +2159,11 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
                /* duplicate request, so just return success */
                goto error_pvid;
 
-       if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) {
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+       is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi);
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+       if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) {
                dev_err(&pf->pdev->dev,
                        "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
                        vf_id);
index 736f6f08b4f26c98042375db16767eefca0fbe66..da44995def42f3b034a1b08780a0c901335e02c7 100644 (file)
@@ -29,8 +29,6 @@
 
 #include "i40e.h"
 
-#define I40E_MAX_MACVLAN_FILTERS 256
-#define I40E_MAX_VLAN_FILTERS 256
 #define I40E_MAX_VLANID 4095
 
 #define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
@@ -98,7 +96,8 @@ struct i40e_vf {
 
        u8 num_queue_pairs;     /* num of qps assigned to VF vsis */
        u64 num_mdd_events;     /* num of mdd events detected */
-       u64 num_invalid_msgs;   /* num of malformed or invalid msgs detected */
+       /* num of continuous malformed or invalid msgs detected */
+       u64 num_invalid_msgs;
        u64 num_valid_msgs;     /* num of valid msgs detected */
 
        unsigned long vf_caps;  /* vf's adv. capabilities */
index 3eba36913c1d18d98e8042a793d55411dd4f3277..fd123ca60761e84721a02ab991ed677925338126 100644 (file)
@@ -373,7 +373,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
 
        hw->aq.asq.next_to_use = 0;
        hw->aq.asq.next_to_clean = 0;
-       hw->aq.asq.count = hw->aq.num_asq_entries;
 
        /* allocate the ring memory */
        ret_code = i40e_alloc_adminq_asq_ring(hw);
@@ -391,6 +390,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
                goto init_adminq_free_rings;
 
        /* success! */
+       hw->aq.asq.count = hw->aq.num_asq_entries;
        goto init_adminq_exit;
 
 init_adminq_free_rings:
@@ -432,7 +432,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
 
        hw->aq.arq.next_to_use = 0;
        hw->aq.arq.next_to_clean = 0;
-       hw->aq.arq.count = hw->aq.num_arq_entries;
 
        /* allocate the ring memory */
        ret_code = i40e_alloc_adminq_arq_ring(hw);
@@ -450,6 +449,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
                goto init_adminq_free_rings;
 
        /* success! */
+       hw->aq.arq.count = hw->aq.num_arq_entries;
        goto init_adminq_exit;
 
 init_adminq_free_rings:
index c8022092d36986acce3062af4ec8b2d643314495..fcb9ef34cc7a28566003b0c53824a04d736d1b95 100644 (file)
@@ -1719,11 +1719,13 @@ struct i40e_aqc_get_link_status {
        u8      phy_type;    /* i40e_aq_phy_type   */
        u8      link_speed;  /* i40e_aq_link_speed */
        u8      link_info;
-#define I40E_AQ_LINK_UP                        0x01
+#define I40E_AQ_LINK_UP                        0x01    /* obsolete */
+#define I40E_AQ_LINK_UP_FUNCTION       0x01
 #define I40E_AQ_LINK_FAULT             0x02
 #define I40E_AQ_LINK_FAULT_TX          0x04
 #define I40E_AQ_LINK_FAULT_RX          0x08
 #define I40E_AQ_LINK_FAULT_REMOTE      0x10
+#define I40E_AQ_LINK_UP_PORT           0x20
 #define I40E_AQ_MEDIA_AVAILABLE                0x40
 #define I40E_AQ_SIGNAL_DETECT          0x80
        u8      an_info;
index b98b642b897a0384c7212a56a5520edd4aadd610..72b1942a94aab4748cbbd242e94677065dbcef4d 100644 (file)
@@ -87,7 +87,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
  * @hw: pointer to the HW structure
  * @aq_err: the AQ error code to convert
  **/
-char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
 {
        switch (aq_err) {
        case I40E_AQ_RC_OK:
@@ -147,7 +147,7 @@ char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
  * @hw: pointer to the HW structure
  * @stat_err: the status error code to convert
  **/
-char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
 {
        switch (stat_err) {
        case 0:
@@ -331,25 +331,11 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
                        len = buf_len;
                /* write the full 16-byte chunks */
                for (i = 0; i < (len - 16); i += 16)
-                       i40e_debug(hw, mask,
-                                  "\t0x%04X  %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
-                                  i, buf[i], buf[i + 1], buf[i + 2],
-                                  buf[i + 3], buf[i + 4], buf[i + 5],
-                                  buf[i + 6], buf[i + 7], buf[i + 8],
-                                  buf[i + 9], buf[i + 10], buf[i + 11],
-                                  buf[i + 12], buf[i + 13], buf[i + 14],
-                                  buf[i + 15]);
+                       i40e_debug(hw, mask, "\t0x%04X  %16ph\n", i, buf + i);
                /* write whatever's left over without overrunning the buffer */
-               if (i < len) {
-                       char d_buf[80];
-                       int j = 0;
-
-                       memset(d_buf, 0, sizeof(d_buf));
-                       j += sprintf(d_buf, "\t0x%04X ", i);
-                       while (i < len)
-                               j += sprintf(&d_buf[j], " %02X", buf[i++]);
-                       i40e_debug(hw, mask, "%s\n", d_buf);
-               }
+               if (i < len)
+                       i40e_debug(hw, mask, "\t0x%04X  %*ph\n",
+                                            i, len - i, buf + i);
        }
 }
 
@@ -443,9 +429,6 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
                                        I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
                                        I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
 
-       cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
-       cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));
-
        status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL);
 
        return status;
@@ -520,8 +503,6 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
                                          I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
                                          I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
        cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
-       cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key));
-       cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key));
 
        status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL);
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
new file mode 100644 (file)
index 0000000..e6a39c9
--- /dev/null
@@ -0,0 +1,55 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DEVIDS_H_
+#define _I40E_DEVIDS_H_
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710          0x1572
+#define I40E_DEV_ID_QEMU               0x1574
+#define I40E_DEV_ID_KX_A               0x157F
+#define I40E_DEV_ID_KX_B               0x1580
+#define I40E_DEV_ID_KX_C               0x1581
+#define I40E_DEV_ID_QSFP_A             0x1583
+#define I40E_DEV_ID_QSFP_B             0x1584
+#define I40E_DEV_ID_QSFP_C             0x1585
+#define I40E_DEV_ID_10G_BASE_T         0x1586
+#define I40E_DEV_ID_20G_KR2            0x1587
+#define I40E_DEV_ID_20G_KR2_A          0x1588
+#define I40E_DEV_ID_10G_BASE_T4                0x1589
+#define I40E_DEV_ID_VF                 0x154C
+#define I40E_DEV_ID_VF_HV              0x1571
+#define I40E_DEV_ID_SFP_X722           0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722     0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722    0x37D2
+#define I40E_DEV_ID_X722_VF            0x37CD
+#define I40E_DEV_ID_X722_VF_HV         0x37D9
+
+#define i40e_is_40G_device(d)          ((d) == I40E_DEV_ID_QSFP_A  || \
+                                        (d) == I40E_DEV_ID_QSFP_B  || \
+                                        (d) == I40E_DEV_ID_QSFP_C)
+
+#endif /* _I40E_DEVIDS_H_ */
index 55ae4b0f8192fea531f0ea0e29013b111b540a4b..cbd9a1b078abf6caaa959218e0b4a4920c414d02 100644 (file)
@@ -60,8 +60,8 @@ void i40e_idle_aq(struct i40e_hw *hw);
 void i40evf_resume_aq(struct i40e_hw *hw);
 bool i40evf_check_asq_alive(struct i40e_hw *hw);
 i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
-char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
-char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
 
 i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
                                  bool pf_lut, u8 *lut, u16 lut_size);
@@ -101,4 +101,6 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
                                u16 vsi_seid, u16 queue, bool is_add,
                                struct i40e_control_filter_stats *stats,
                                struct i40e_asq_cmd_details *cmd_details);
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+                                                   u16 vsi_seid);
 #endif /* _I40E_PROTOTYPE_H_ */
index 0e71eb4633d5cd739a1ba5216c9dc7c7e442c97b..47e9a90d6b100d9874c6a6e77e123c7e5ccc7ac2 100644 (file)
@@ -318,6 +318,8 @@ static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector
  * i40e_set_new_dynamic_itr - Find new ITR level
  * @rc: structure containing ring performance data
  *
+ * Returns true if ITR changed, false if not
+ *
  * Stores a new ITR value based on packets and byte counts during
  * the last interrupt.  The advantage of per interrupt computation
  * is faster updates and more accurate ITR for the current traffic
@@ -326,21 +328,32 @@ static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector
  * testing data as well as attempting to minimize response time
  * while increasing bulk throughput.
  **/
-static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
 {
        enum i40e_latency_range new_latency_range = rc->latency_range;
+       struct i40e_q_vector *qv = rc->ring->q_vector;
        u32 new_itr = rc->itr;
        int bytes_per_int;
+       int usecs;
 
        if (rc->total_packets == 0 || !rc->itr)
-               return;
+               return false;
 
        /* simple throttlerate management
-        *   0-10MB/s   lowest (100000 ints/s)
+        *   0-10MB/s   lowest (50000 ints/s)
         *  10-20MB/s   low    (20000 ints/s)
-        *  20-1249MB/s bulk   (8000 ints/s)
+        *  20-1249MB/s bulk   (18000 ints/s)
+        *  > 40000 Rx packets per second (8000 ints/s)
+        *
+        * The math works out because the divisor is in 10^(-6) which
+        * turns the bytes/us input value into MB/s values, but
+        * make sure to use usecs, as the register values written
+        * are in 2 usec increments in the ITR registers, and make sure
+        * to use the smoothed values that the countdown timer gives us.
         */
-       bytes_per_int = rc->total_bytes / rc->itr;
+       usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
+       bytes_per_int = rc->total_bytes / usecs;
+
        switch (new_latency_range) {
        case I40E_LOWEST_LATENCY:
                if (bytes_per_int > 10)
@@ -353,35 +366,52 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
                        new_latency_range = I40E_LOWEST_LATENCY;
                break;
        case I40E_BULK_LATENCY:
-               if (bytes_per_int <= 20)
-                       new_latency_range = I40E_LOW_LATENCY;
-               break;
+       case I40E_ULTRA_LATENCY:
        default:
                if (bytes_per_int <= 20)
                        new_latency_range = I40E_LOW_LATENCY;
                break;
        }
+
+       /* this is to adjust RX more aggressively when streaming small
+        * packets.  The value of 40000 was picked as it is just beyond
+        * what the hardware can receive per second if in low latency
+        * mode.
+        */
+#define RX_ULTRA_PACKET_RATE 40000
+
+       if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
+           (&qv->rx == rc))
+               new_latency_range = I40E_ULTRA_LATENCY;
+
        rc->latency_range = new_latency_range;
 
        switch (new_latency_range) {
        case I40E_LOWEST_LATENCY:
-               new_itr = I40E_ITR_100K;
+               new_itr = I40E_ITR_50K;
                break;
        case I40E_LOW_LATENCY:
                new_itr = I40E_ITR_20K;
                break;
        case I40E_BULK_LATENCY:
+               new_itr = I40E_ITR_18K;
+               break;
+       case I40E_ULTRA_LATENCY:
                new_itr = I40E_ITR_8K;
                break;
        default:
                break;
        }
 
-       if (new_itr != rc->itr)
-               rc->itr = new_itr;
-
        rc->total_bytes = 0;
        rc->total_packets = 0;
+
+       if (new_itr != rc->itr) {
+               rc->itr = new_itr;
+               return true;
+       }
+
+       return false;
 }
 
 /*
@@ -742,16 +772,11 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
                             struct sk_buff *skb, u16 vlan_tag)
 {
        struct i40e_q_vector *q_vector = rx_ring->q_vector;
-       struct i40e_vsi *vsi = rx_ring->vsi;
-       u64 flags = vsi->back->flags;
 
        if (vlan_tag & VLAN_VID_MASK)
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 
-       if (flags & I40E_FLAG_IN_NETPOLL)
-               netif_rx(skb);
-       else
-               napi_gro_receive(&q_vector->napi, skb);
+       napi_gro_receive(&q_vector->napi, skb);
 }
 
 /**
@@ -1192,6 +1217,21 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
        return total_rx_packets;
 }
 
+static u32 i40e_buildreg_itr(const int type, const u16 itr)
+{
+       u32 val;
+
+       val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+             I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+             (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
+             (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
+
+       return val;
+}
+
+/* a small macro to shorten up some long lines */
+#define INTREG I40E_VFINT_DYN_CTLN1
+
 /**
  * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
  * @vsi: the VSI we care about
@@ -1202,55 +1242,67 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
                                          struct i40e_q_vector *q_vector)
 {
        struct i40e_hw *hw = &vsi->back->hw;
-       u16 old_itr;
+       bool rx = false, tx = false;
+       u32 rxval, txval;
        int vector;
-       u32 val;
 
        vector = (q_vector->v_idx + vsi->base_vector);
+
+       /* avoid dynamic calculation if in countdown mode OR if
+        * all dynamic is disabled
+        */
+       rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+
+       if (q_vector->itr_countdown > 0 ||
+           (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
+            !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
+               goto enable_int;
+       }
+
        if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
-               old_itr = q_vector->rx.itr;
-               i40e_set_new_dynamic_itr(&q_vector->rx);
-               if (old_itr != q_vector->rx.itr) {
-                       val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
-                       I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
-                       (I40E_RX_ITR <<
-                               I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
-                       (q_vector->rx.itr <<
-                               I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
-               } else {
-                       val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
-                       I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
-                       (I40E_ITR_NONE <<
-                               I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT);
-               }
-               if (!test_bit(__I40E_DOWN, &vsi->state))
-                       wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
-       } else {
-               i40evf_irq_enable_queues(vsi->back, 1
-                       << q_vector->v_idx);
+               rx = i40e_set_new_dynamic_itr(&q_vector->rx);
+               rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
        }
        if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
-               old_itr = q_vector->tx.itr;
-               i40e_set_new_dynamic_itr(&q_vector->tx);
-               if (old_itr != q_vector->tx.itr) {
-                       val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
-                               I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
-                               (I40E_TX_ITR <<
-                                  I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
-                               (q_vector->tx.itr <<
-                                  I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
+               tx = i40e_set_new_dynamic_itr(&q_vector->tx);
+               txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
+       }
+       if (rx || tx) {
+               /* get the higher of the two ITR adjustments and
+                * use the same value for both ITR registers
+                * when in adaptive mode (Rx and/or Tx)
+                */
+               u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
 
-               } else {
-                       val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
-                               I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
-                               (I40E_ITR_NONE <<
-                                  I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT);
-               }
-               if (!test_bit(__I40E_DOWN, &vsi->state))
-                       wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
-       } else {
-               i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx));
+               q_vector->tx.itr = q_vector->rx.itr = itr;
+               txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
+               tx = true;
+               rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
+               rx = true;
+       }
+
+       /* only need to enable the interrupt once, but need
+        * to possibly update both ITR values
+        */
+       if (rx) {
+               /* set the INTENA_MSK_MASK so that this first write
+                * won't actually enable the interrupt, instead just
+                * updating the ITR (it's bit 31 PF and VF)
+                */
+               rxval |= BIT(31);
+               /* don't check _DOWN because interrupt isn't being enabled */
+               wr32(hw, INTREG(vector - 1), rxval);
        }
+
+enable_int:
+       if (!test_bit(__I40E_DOWN, &vsi->state))
+               wr32(hw, INTREG(vector - 1), txval);
+
+       if (q_vector->itr_countdown)
+               q_vector->itr_countdown--;
+       else
+               q_vector->itr_countdown = ITR_COUNTDOWN_START;
+
 }
 
 /**
@@ -1271,7 +1323,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
        bool clean_complete = true;
        bool arm_wb = false;
        int budget_per_ring;
-       int cleaned;
+       int work_done = 0;
 
        if (test_bit(__I40E_DOWN, &vsi->state)) {
                napi_complete(napi);
@@ -1287,22 +1339,31 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
                ring->arm_wb = false;
        }
 
+       /* Handle case where we are called by netpoll with a budget of 0 */
+       if (budget <= 0)
+               goto tx_only;
+
        /* We attempt to distribute budget to each Rx queue fairly, but don't
         * allow the budget to go below 1 because that would exit polling early.
         */
        budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
 
        i40e_for_each_ring(ring, q_vector->rx) {
+               int cleaned;
+
                if (ring_is_ps_enabled(ring))
                        cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
                else
                        cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+
+               work_done += cleaned;
                /* if we didn't clean as many as budgeted, we must be done */
                clean_complete &= (budget_per_ring != cleaned);
        }
 
        /* If work not completed, return budget and polling will return */
        if (!clean_complete) {
+tx_only:
                if (arm_wb)
                        i40evf_force_wb(vsi, q_vector);
                return budget;
@@ -1312,7 +1373,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
                q_vector->arm_wb_state = false;
 
        /* Work is done so exit the polling mode and re-enable the interrupt */
-       napi_complete(napi);
+       napi_complete_done(napi, work_done);
        i40e_update_enable_itr(vsi, q_vector);
        return 0;
 }
index 0c13ece00366c8bb45fb1d645f73d539753ac689..ebc1bf77f03606fb05312b2a5c84e788b92435cb 100644 (file)
 #define I40E_MAX_ITR               0x0FF0  /* reg uses 2 usec resolution */
 #define I40E_MIN_ITR               0x0001  /* reg uses 2 usec resolution */
 #define I40E_ITR_100K              0x0005
+#define I40E_ITR_50K               0x000A
 #define I40E_ITR_20K               0x0019
+#define I40E_ITR_18K               0x001B
 #define I40E_ITR_8K                0x003E
 #define I40E_ITR_4K                0x007A
-#define I40E_ITR_RX_DEF            I40E_ITR_8K
-#define I40E_ITR_TX_DEF            I40E_ITR_4K
+#define I40E_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
+#define I40E_ITR_RX_DEF            I40E_ITR_20K
+#define I40E_ITR_TX_DEF            I40E_ITR_20K
 #define I40E_ITR_DYNAMIC           0x8000  /* use top bit as a flag */
 #define I40E_MIN_INT_RATE          250     /* ~= 1000000 / (I40E_MAX_ITR * 2) */
 #define I40E_MAX_INT_RATE          500000  /* == 1000000 / (I40E_MIN_ITR * 2) */
 #define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
 #define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
 #define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
+ * the value of the rate limit is non-zero
+ */
+#define INTRL_ENA                  BIT(6)
+#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
+#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
+#define I40E_INTRL_8K              125     /* 8000 ints/sec */
+#define I40E_INTRL_62K             16      /* 62500 ints/sec */
+#define I40E_INTRL_83K             12      /* 83333 ints/sec */
 
 #define I40E_QUEUE_END_OF_LIST 0x7FF
 
@@ -79,16 +91,16 @@ enum i40e_dyn_idx_t {
        BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
 
 #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
-               BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
-               BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
-               BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
-               BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
-               BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
-               BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
 
 #define i40e_pf_get_default_rss_hena(pf) \
        (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
-               I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
+         I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
 
 /* Supported Rx Buffer Sizes */
 #define I40E_RXBUFFER_512   512    /* Used for packet split */
@@ -281,6 +293,7 @@ enum i40e_latency_range {
        I40E_LOWEST_LATENCY = 0,
        I40E_LOW_LATENCY = 1,
        I40E_BULK_LATENCY = 2,
+       I40E_ULTRA_LATENCY = 3,
 };
 
 struct i40e_ring_container {
index a59b60ffd0ce67c0db6a670940e753afeb366f04..301fe2b6dd03b153f6c3d0814ca25ef84e69af1a 100644 (file)
 #include "i40e_adminq.h"
 #include "i40e_hmc.h"
 #include "i40e_lan_hmc.h"
-
-/* Device IDs */
-#define I40E_DEV_ID_SFP_XL710          0x1572
-#define I40E_DEV_ID_QEMU               0x1574
-#define I40E_DEV_ID_KX_A               0x157F
-#define I40E_DEV_ID_KX_B               0x1580
-#define I40E_DEV_ID_KX_C               0x1581
-#define I40E_DEV_ID_QSFP_A             0x1583
-#define I40E_DEV_ID_QSFP_B             0x1584
-#define I40E_DEV_ID_QSFP_C             0x1585
-#define I40E_DEV_ID_10G_BASE_T         0x1586
-#define I40E_DEV_ID_20G_KR2            0x1587
-#define I40E_DEV_ID_20G_KR2_A          0x1588
-#define I40E_DEV_ID_10G_BASE_T4                0x1589
-#define I40E_DEV_ID_VF                 0x154C
-#define I40E_DEV_ID_VF_HV              0x1571
-#define I40E_DEV_ID_SFP_X722           0x37D0
-#define I40E_DEV_ID_1G_BASE_T_X722     0x37D1
-#define I40E_DEV_ID_10G_BASE_T_X722    0x37D2
-#define I40E_DEV_ID_X722_VF            0x37CD
-#define I40E_DEV_ID_X722_VF_HV         0x37D9
-
-#define i40e_is_40G_device(d)          ((d) == I40E_DEV_ID_QSFP_A  || \
-                                        (d) == I40E_DEV_ID_QSFP_B  || \
-                                        (d) == I40E_DEV_ID_QSFP_C)
+#include "i40e_devids.h"
 
 /* I40E_MASK is a macro used on 32 bit registers */
 #define I40E_MASK(mask, shift) (mask << shift)
@@ -191,16 +167,65 @@ struct i40e_link_status {
        bool crc_enable;
        u8 pacing;
        u8 requested_speeds;
+       u8 module_type[3];
+       /* 1st byte: module identifier */
+#define I40E_MODULE_TYPE_SFP           0x03
+#define I40E_MODULE_TYPE_QSFP          0x0D
+       /* 2nd byte: ethernet compliance codes for 10/40G */
+#define I40E_MODULE_TYPE_40G_ACTIVE    0x01
+#define I40E_MODULE_TYPE_40G_LR4       0x02
+#define I40E_MODULE_TYPE_40G_SR4       0x04
+#define I40E_MODULE_TYPE_40G_CR4       0x08
+#define I40E_MODULE_TYPE_10G_BASE_SR   0x10
+#define I40E_MODULE_TYPE_10G_BASE_LR   0x20
+#define I40E_MODULE_TYPE_10G_BASE_LRM  0x40
+#define I40E_MODULE_TYPE_10G_BASE_ER   0x80
+       /* 3rd byte: ethernet compliance codes for 1G */
+#define I40E_MODULE_TYPE_1000BASE_SX   0x01
+#define I40E_MODULE_TYPE_1000BASE_LX   0x02
+#define I40E_MODULE_TYPE_1000BASE_CX   0x04
+#define I40E_MODULE_TYPE_1000BASE_T    0x08
+};
+
+enum i40e_aq_capabilities_phy_type {
+       I40E_CAP_PHY_TYPE_SGMII           = BIT(I40E_PHY_TYPE_SGMII),
+       I40E_CAP_PHY_TYPE_1000BASE_KX     = BIT(I40E_PHY_TYPE_1000BASE_KX),
+       I40E_CAP_PHY_TYPE_10GBASE_KX4     = BIT(I40E_PHY_TYPE_10GBASE_KX4),
+       I40E_CAP_PHY_TYPE_10GBASE_KR      = BIT(I40E_PHY_TYPE_10GBASE_KR),
+       I40E_CAP_PHY_TYPE_40GBASE_KR4     = BIT(I40E_PHY_TYPE_40GBASE_KR4),
+       I40E_CAP_PHY_TYPE_XAUI            = BIT(I40E_PHY_TYPE_XAUI),
+       I40E_CAP_PHY_TYPE_XFI             = BIT(I40E_PHY_TYPE_XFI),
+       I40E_CAP_PHY_TYPE_SFI             = BIT(I40E_PHY_TYPE_SFI),
+       I40E_CAP_PHY_TYPE_XLAUI           = BIT(I40E_PHY_TYPE_XLAUI),
+       I40E_CAP_PHY_TYPE_XLPPI           = BIT(I40E_PHY_TYPE_XLPPI),
+       I40E_CAP_PHY_TYPE_40GBASE_CR4_CU  = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
+       I40E_CAP_PHY_TYPE_10GBASE_CR1_CU  = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
+       I40E_CAP_PHY_TYPE_10GBASE_AOC     = BIT(I40E_PHY_TYPE_10GBASE_AOC),
+       I40E_CAP_PHY_TYPE_40GBASE_AOC     = BIT(I40E_PHY_TYPE_40GBASE_AOC),
+       I40E_CAP_PHY_TYPE_100BASE_TX      = BIT(I40E_PHY_TYPE_100BASE_TX),
+       I40E_CAP_PHY_TYPE_1000BASE_T      = BIT(I40E_PHY_TYPE_1000BASE_T),
+       I40E_CAP_PHY_TYPE_10GBASE_T       = BIT(I40E_PHY_TYPE_10GBASE_T),
+       I40E_CAP_PHY_TYPE_10GBASE_SR      = BIT(I40E_PHY_TYPE_10GBASE_SR),
+       I40E_CAP_PHY_TYPE_10GBASE_LR      = BIT(I40E_PHY_TYPE_10GBASE_LR),
+       I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
+       I40E_CAP_PHY_TYPE_10GBASE_CR1     = BIT(I40E_PHY_TYPE_10GBASE_CR1),
+       I40E_CAP_PHY_TYPE_40GBASE_CR4     = BIT(I40E_PHY_TYPE_40GBASE_CR4),
+       I40E_CAP_PHY_TYPE_40GBASE_SR4     = BIT(I40E_PHY_TYPE_40GBASE_SR4),
+       I40E_CAP_PHY_TYPE_40GBASE_LR4     = BIT(I40E_PHY_TYPE_40GBASE_LR4),
+       I40E_CAP_PHY_TYPE_1000BASE_SX     = BIT(I40E_PHY_TYPE_1000BASE_SX),
+       I40E_CAP_PHY_TYPE_1000BASE_LX     = BIT(I40E_PHY_TYPE_1000BASE_LX),
+       I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL =
+                                        BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
+       I40E_CAP_PHY_TYPE_20GBASE_KR2     = BIT(I40E_PHY_TYPE_20GBASE_KR2)
 };
 
 struct i40e_phy_info {
        struct i40e_link_status link_info;
        struct i40e_link_status link_info_old;
-       u32 autoneg_advertised;
-       u32 phy_id;
-       u32 module_type;
        bool get_link_info;
        enum i40e_media_type media_type;
+       /* all the phy types the NVM is capable of */
+       enum i40e_aq_capabilities_phy_type phy_types;
 };
 
 #define I40E_HW_CAP_MAX_GPIO                   30
@@ -288,6 +313,7 @@ struct i40e_nvm_info {
        bool blank_nvm_mode;      /* is NVM empty (no FW present)*/
        u16 version;              /* NVM package version */
        u32 eetrack;              /* NVM data version */
+       u32 oem_ver;              /* OEM version info */
 };
 
 /* definitions used in NVM update support */
@@ -1029,8 +1055,8 @@ enum i40e_filter_program_desc_fd_status {
 };
 
 #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT       23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
-                                      BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK        (0x1FFUL << \
+                                        I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CMD_SHIFT    4
 #define I40E_TXD_FLTR_QW1_CMD_MASK     (0xFFFFULL << \
@@ -1173,6 +1199,7 @@ struct i40e_hw_port_stats {
 /* Checksum and Shadow RAM pointers */
 #define I40E_SR_NVM_CONTROL_WORD               0x00
 #define I40E_SR_EMP_MODULE_PTR                 0x0F
+#define I40E_NVM_OEM_VER_OFF                   0x83
 #define I40E_SR_NVM_DEV_STARTER_VERSION                0x18
 #define I40E_SR_NVM_WAKE_ON_LAN                        0x19
 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR  0x27
index cadda642c98cda1fe3e34f040c798425d721b843..9f7b279b9d9c8f827dec988b25801f26f0386f7c 100644 (file)
@@ -150,6 +150,7 @@ struct i40e_virtchnl_vsi_resource {
 #define I40E_VIRTCHNL_VF_OFFLOAD_FCOE          0x00000004
 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ                0x00000008
 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG       0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR     0x00000020
 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN          0x00010000
 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING    0x00020000
 
index e7a223ea6c25ed6d911ba8fbb09669bb203567bd..22fc3d49c4b95233fe1103aa92b95e5f97ecf6ba 100644 (file)
@@ -87,7 +87,7 @@ struct i40e_vsi {
 #define I40EVF_MAX_RXBUFFER   16384  /* largest size for single descriptor */
 #define I40EVF_MAX_AQ_BUF_SIZE    4096
 #define I40EVF_AQ_LEN             32
-#define I40EVF_AQ_MAX_ERR         10 /* times to try before resetting AQ */
+#define I40EVF_AQ_MAX_ERR         20 /* times to try before resetting AQ */
 
 #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
 
@@ -112,6 +112,8 @@ struct i40e_q_vector {
        struct i40e_ring_container tx;
        u32 ring_mask;
        u8 num_ringpairs;       /* total number of ring pairs in vector */
+#define ITR_COUNTDOWN_START 100
+       u8 itr_countdown;       /* when 0 or 1 update ITR */
        int v_idx;        /* vector index in list */
        char name[IFNAMSIZ + 9];
        bool arm_wb_state;
@@ -211,7 +213,6 @@ struct i40evf_adapter {
 #define I40EVF_FLAG_RX_1BUF_CAPABLE              BIT(1)
 #define I40EVF_FLAG_RX_PS_CAPABLE                BIT(2)
 #define I40EVF_FLAG_RX_PS_ENABLED                BIT(3)
-#define I40EVF_FLAG_IN_NETPOLL                   BIT(4)
 #define I40EVF_FLAG_IMIR_ENABLED                 BIT(5)
 #define I40EVF_FLAG_MQ_CAPABLE                   BIT(6)
 #define I40EVF_FLAG_NEED_LINK_UPDATE             BIT(7)
@@ -220,10 +221,10 @@ struct i40evf_adapter {
 #define I40EVF_FLAG_RESET_NEEDED                 BIT(10)
 #define I40EVF_FLAG_WB_ON_ITR_CAPABLE          BIT(11)
 #define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE     BIT(12)
+#define I40EVF_FLAG_ADDR_SET_BY_PF             BIT(13)
 /* duplicates for common code */
 #define I40E_FLAG_FDIR_ATR_ENABLED              0
 #define I40E_FLAG_DCB_ENABLED                   0
-#define I40E_FLAG_IN_NETPOLL                    I40EVF_FLAG_IN_NETPOLL
 #define I40E_FLAG_RX_CSUM_ENABLED                I40EVF_FLAG_RX_CSUM_ENABLED
 #define I40E_FLAG_WB_ON_ITR_CAPABLE            I40EVF_FLAG_WB_ON_ITR_CAPABLE
 #define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE       I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
index c00e4959f0263757ff03cfbddf2342bc62651740..d962164dfb0fbf579a150223b8d594ecb073b51f 100644 (file)
@@ -34,7 +34,7 @@ char i40evf_driver_name[] = "i40evf";
 static const char i40evf_driver_string[] =
        "Intel(R) XL710/X710 Virtual Function Network Driver";
 
-#define DRV_VERSION "1.3.13"
+#define DRV_VERSION "1.3.33"
 const char i40evf_driver_version[] = DRV_VERSION;
 static const char i40evf_copyright[] =
        "Copyright (c) 2013 - 2015 Intel Corporation.";
@@ -282,6 +282,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
 /**
  * i40evf_irq_enable - Enable default interrupt generation settings
  * @adapter: board private structure
+ * @flush: boolean value whether to run rd32()
  **/
 void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
 {
@@ -305,15 +306,14 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
        struct i40evf_adapter *adapter = netdev_priv(netdev);
        struct i40e_hw *hw = &adapter->hw;
        u32 val;
-       u32 ena_mask;
 
        /* handle non-queue interrupts */
-       val = rd32(hw, I40E_VFINT_ICR01);
-       ena_mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
+       rd32(hw, I40E_VFINT_ICR01);
+       rd32(hw, I40E_VFINT_ICR0_ENA1);
 
 
-       val = rd32(hw, I40E_VFINT_DYN_CTL01);
-       val = val | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
+       val = rd32(hw, I40E_VFINT_DYN_CTL01) |
+             I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
        wr32(hw, I40E_VFINT_DYN_CTL01, val);
 
        /* schedule work on the private workqueue */
@@ -334,7 +334,7 @@ static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
        if (!q_vector->tx.ring && !q_vector->rx.ring)
                return IRQ_HANDLED;
 
-       napi_schedule(&q_vector->napi);
+       napi_schedule_irqoff(&q_vector->napi);
 
        return IRQ_HANDLED;
 }
@@ -357,6 +357,7 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
        q_vector->rx.ring = rx_ring;
        q_vector->rx.count++;
        q_vector->rx.latency_range = I40E_LOW_LATENCY;
+       q_vector->itr_countdown = ITR_COUNTDOWN_START;
 }
 
 /**
@@ -377,6 +378,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
        q_vector->tx.ring = tx_ring;
        q_vector->tx.count++;
        q_vector->tx.latency_range = I40E_LOW_LATENCY;
+       q_vector->itr_countdown = ITR_COUNTDOWN_START;
        q_vector->num_ringpairs++;
        q_vector->ring_mask |= BIT(t_idx);
 }
@@ -444,6 +446,29 @@ out:
        return err;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * i40evf_netpoll - A Polling 'interrupt' handler
+ * @netdev: network interface device structure
+ *
+ * This is used by netconsole to send skbs without having to re-enable
+ * interrupts.  It's not called while the normal interrupt routine is executing.
+ **/
+static void i40evf_netpoll(struct net_device *netdev)
+{
+       struct i40evf_adapter *adapter = netdev_priv(netdev);
+       int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+       int i;
+
+       /* if interface is down do nothing */
+       if (test_bit(__I40E_DOWN, &adapter->vsi.state))
+               return;
+
+       for (i = 0; i < q_vectors; i++)
+               i40evf_msix_clean_rings(0, adapter->q_vector[i]);
+}
+
+#endif
 /**
  * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
  * @adapter: board private structure
@@ -841,6 +866,15 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
        if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
                return 0;
 
+       if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
+               return -EPERM;
+
+       f = i40evf_find_filter(adapter, hw->mac.addr);
+       if (f) {
+               f->remove = true;
+               adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+       }
+
        f = i40evf_add_filter(adapter, addr->sa_data);
        if (f) {
                ether_addr_copy(hw->mac.addr, addr->sa_data);
@@ -1114,6 +1148,8 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
                tx_ring->netdev = adapter->netdev;
                tx_ring->dev = &adapter->pdev->dev;
                tx_ring->count = adapter->tx_desc_count;
+               if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
+                       tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
                adapter->tx_rings[i] = tx_ring;
 
                rx_ring = &tx_ring[1];
@@ -1573,6 +1609,7 @@ static void i40evf_reset_task(struct work_struct *work)
                                                      reset_task);
        struct net_device *netdev = adapter->netdev;
        struct i40e_hw *hw = &adapter->hw;
+       struct i40evf_vlan_filter *vlf;
        struct i40evf_mac_filter *f;
        u32 reg_val;
        int i = 0, err;
@@ -1617,7 +1654,7 @@ static void i40evf_reset_task(struct work_struct *work)
        /* extra wait to make sure minimum wait is met */
        msleep(I40EVF_RESET_WAIT_MS);
        if (i == I40EVF_RESET_WAIT_COUNT) {
-               struct i40evf_mac_filter *f, *ftmp;
+               struct i40evf_mac_filter *ftmp;
                struct i40evf_vlan_filter *fv, *fvtmp;
 
                /* reset never finished */
@@ -1696,8 +1733,8 @@ continue_reset:
                f->add = true;
        }
        /* re-add all VLAN filters */
-       list_for_each_entry(f, &adapter->vlan_filter_list, list) {
-               f->add = true;
+       list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
+               vlf->add = true;
        }
        adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
        adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
@@ -2038,6 +2075,9 @@ static const struct net_device_ops i40evf_netdev_ops = {
        .ndo_tx_timeout         = i40evf_tx_timeout,
        .ndo_vlan_rx_add_vid    = i40evf_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = i40evf_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = i40evf_netpoll,
+#endif
 };
 
 /**
@@ -2086,7 +2126,10 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
 
        if (adapter->vf_res->vf_offload_flags
            & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
-               netdev->vlan_features = netdev->features;
+               netdev->vlan_features = netdev->features &
+                                       ~(NETIF_F_HW_VLAN_CTAG_TX |
+                                         NETIF_F_HW_VLAN_CTAG_RX |
+                                         NETIF_F_HW_VLAN_CTAG_FILTER);
                netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
                                    NETIF_F_HW_VLAN_CTAG_RX |
                                    NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -2244,10 +2287,13 @@ static void i40evf_init_task(struct work_struct *work)
        if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
                dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
                         adapter->hw.mac.addr);
-               random_ether_addr(adapter->hw.mac.addr);
+               eth_hw_addr_random(netdev);
+               ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
+       } else {
+               adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF;
+               ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+               ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
        }
-       ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
-       ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
 
        init_timer(&adapter->watchdog_timer);
        adapter->watchdog_timer.function = &i40evf_watchdog_timer;
@@ -2263,6 +2309,9 @@ static void i40evf_init_task(struct work_struct *work)
        if (err)
                goto err_sw_init;
        i40evf_map_rings_to_vectors(adapter);
+       if (adapter->vf_res->vf_offload_flags &
+                   I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+               adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
        if (!RSS_AQ(adapter))
                i40evf_configure_rss(adapter);
        err = i40evf_request_misc_irq(adapter);
@@ -2311,11 +2360,14 @@ err_alloc:
 err:
        /* Things went into the weeds, so try again later */
        if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
-               dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n");
+               dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
                adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
-               return; /* do not reschedule */
+               i40evf_shutdown_adminq(hw);
+               adapter->state = __I40EVF_STARTUP;
+               schedule_delayed_work(&adapter->init_task, HZ * 5);
+               return;
        }
-       schedule_delayed_work(&adapter->init_task, HZ * 3);
+       schedule_delayed_work(&adapter->init_task, HZ);
 }
 
 /**
index 4f056efecba408b0d96120e494ed2ce2c28e00cb..32e620e1eb5c9549db7f2743a1b2b2986523666f 100644 (file)
@@ -156,7 +156,8 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
        caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
               I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
               I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
-              I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+              I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
+              I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
        adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
        adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
        if (PF_IS_V11(adapter))
index 74262768b09b75bd7ad267afb67e720a617bce10..2529bc625de4532e78407500d4c43ba92714f06a 100644 (file)
@@ -842,10 +842,6 @@ static void igb_get_drvinfo(struct net_device *netdev,
                sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
-       drvinfo->n_stats = IGB_STATS_LEN;
-       drvinfo->testinfo_len = IGB_TEST_LEN;
-       drvinfo->regdump_len = igb_get_regs_len(netdev);
-       drvinfo->eedump_len = igb_get_eeprom_len(netdev);
 }
 
 static void igb_get_ringparam(struct net_device *netdev,
index 7e6267503790596e228455d8f9ee469410572dfb..ea7b098872456e5903bbfd5dead770eba52f69ef 100644 (file)
@@ -151,7 +151,7 @@ static void igb_setup_dca(struct igb_adapter *);
 #endif /* CONFIG_IGB_DCA */
 static int igb_poll(struct napi_struct *, int);
 static bool igb_clean_tx_irq(struct igb_q_vector *);
-static bool igb_clean_rx_irq(struct igb_q_vector *, int);
+static int igb_clean_rx_irq(struct igb_q_vector *, int);
 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
 static void igb_tx_timeout(struct net_device *);
 static void igb_reset_task(struct work_struct *);
@@ -6364,6 +6364,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
                                                     struct igb_q_vector,
                                                     napi);
        bool clean_complete = true;
+       int work_done = 0;
 
 #ifdef CONFIG_IGB_DCA
        if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
@@ -6372,15 +6373,19 @@ static int igb_poll(struct napi_struct *napi, int budget)
        if (q_vector->tx.ring)
                clean_complete = igb_clean_tx_irq(q_vector);
 
-       if (q_vector->rx.ring)
-               clean_complete &= igb_clean_rx_irq(q_vector, budget);
+       if (q_vector->rx.ring) {
+               int cleaned = igb_clean_rx_irq(q_vector, budget);
+
+               work_done += cleaned;
+               clean_complete &= (cleaned < budget);
+       }
 
        /* If all work not completed, return budget and keep polling */
        if (!clean_complete)
                return budget;
 
        /* If not enough Rx work done, exit the polling mode */
-       napi_complete(napi);
+       napi_complete_done(napi, work_done);
        igb_ring_irq_enable(q_vector);
 
        return 0;
@@ -6904,7 +6909,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
        skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 }
 
-static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
 {
        struct igb_ring *rx_ring = q_vector->rx.ring;
        struct sk_buff *skb = rx_ring->skb;
@@ -6978,7 +6983,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
        if (cleaned_count)
                igb_alloc_rx_buffers(rx_ring, cleaned_count);
 
-       return total_packets < budget;
+       return total_packets;
 }
 
 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
index c6996feb1cb4e2559b2b3a260d95a6b0f980b449..b74ce53d7b523e0b8414c21bd20ff9e884ec722f 100644 (file)
@@ -196,8 +196,6 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
                sizeof(drvinfo->version));
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
-       drvinfo->regdump_len = igbvf_get_regs_len(netdev);
-       drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
 }
 
 static void igbvf_get_ringparam(struct net_device *netdev,
index e86d41ed9260780dd644509fd307e64974e9dd0e..297af801f0519136eb1fe0f69a50a746e84c06b8 100644 (file)
@@ -1211,7 +1211,7 @@ static int igbvf_poll(struct napi_struct *napi, int budget)
 
        /* If not enough Rx work done, exit the polling mode */
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                if (adapter->requested_itr & 3)
                        igbvf_set_itr(adapter);
index b311e9e710d2636ddc3a17ad4d1cc10ba917cfb1..d2b29b490ae0167fe8325fd47758a9d6c0ca5a90 100644 (file)
@@ -479,9 +479,6 @@ ixgb_get_drvinfo(struct net_device *netdev,
                sizeof(drvinfo->version));
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
-       drvinfo->n_stats = IXGB_STATS_LEN;
-       drvinfo->regdump_len = ixgb_get_regs_len(netdev);
-       drvinfo->eedump_len = ixgb_get_eeprom_len(netdev);
 }
 
 static void
index dda0f678339ad88e02904787b446e7ad1db5c99b..1d2174526a4c909422ada878ac9dc13c60d7c513 100644 (file)
@@ -152,9 +152,17 @@ struct vf_data_storage {
        u16 vlan_count;
        u8 spoofchk_enabled;
        bool rss_query_enabled;
+       u8 trusted;
+       int xcast_mode;
        unsigned int vf_api;
 };
 
+enum ixgbevf_xcast_modes {
+       IXGBEVF_XCAST_MODE_NONE = 0,
+       IXGBEVF_XCAST_MODE_MULTI,
+       IXGBEVF_XCAST_MODE_ALLMULTI,
+};
+
 struct vf_macvlans {
        struct list_head l;
        int vf;
index 94c4912b23308c5bbd0648d8b21a5bcaea9077aa..d681273bd39d52ee060c14b1c6c17d7f5e427dfb 100644 (file)
@@ -943,9 +943,6 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
 
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
-       drvinfo->n_stats = IXGBE_STATS_LEN;
-       drvinfo->testinfo_len = IXGBE_TEST_LEN;
-       drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
 }
 
 static void ixgbe_get_ringparam(struct net_device *netdev,
index 191003901adb84f9faf8fa6a03013599b65e652c..47395ff5d908c43174a4c6afc4a8a089ae948c70 100644 (file)
@@ -2775,7 +2775,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                                container_of(napi, struct ixgbe_q_vector, napi);
        struct ixgbe_adapter *adapter = q_vector->adapter;
        struct ixgbe_ring *ring;
-       int per_ring_budget;
+       int per_ring_budget, work_done = 0;
        bool clean_complete = true;
 
 #ifdef CONFIG_IXGBE_DCA
@@ -2796,9 +2796,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
        else
                per_ring_budget = budget;
 
-       ixgbe_for_each_ring(ring, q_vector->rx)
-               clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring,
-                                  per_ring_budget) < per_ring_budget);
+       ixgbe_for_each_ring(ring, q_vector->rx) {
+               int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
+                                                per_ring_budget);
+
+               work_done += cleaned;
+               clean_complete &= (cleaned < per_ring_budget);
+       }
 
        ixgbe_qv_unlock_napi(q_vector);
        /* If all work not completed, return budget and keep polling */
@@ -2806,7 +2810,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                return budget;
 
        /* all work done, exit the polling mode */
-       napi_complete(napi);
+       napi_complete_done(napi, work_done);
        if (adapter->rx_itr_setting & 1)
                ixgbe_set_itr(q_vector);
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -3723,14 +3727,20 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
        hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
                                          adapter->num_vfs);
 
-       /* Ensure LLDP is set for Ethertype Antispoofing if we will be
+       /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
         * calling set_ethertype_anti_spoofing for each VF in loop below
         */
-       if (hw->mac.ops.set_ethertype_anti_spoofing)
+       if (hw->mac.ops.set_ethertype_anti_spoofing) {
                IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
-                               (IXGBE_ETQF_FILTER_EN    | /* enable filter */
-                                IXGBE_ETQF_TX_ANTISPOOF | /* tx antispoof */
-                                IXGBE_ETH_P_LLDP));       /* LLDP eth type */
+                               (IXGBE_ETQF_FILTER_EN    |
+                                IXGBE_ETQF_TX_ANTISPOOF |
+                                IXGBE_ETH_P_LLDP));
+
+               IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
+                               (IXGBE_ETQF_FILTER_EN |
+                                IXGBE_ETQF_TX_ANTISPOOF |
+                                ETH_P_PAUSE));
+       }
 
        /* For VFs that have spoof checking turned off */
        for (i = 0; i < adapter->num_vfs; i++) {
@@ -5301,7 +5311,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
        rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
        adapter->ring_feature[RING_F_RSS].limit = rss;
        adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
-       adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
        adapter->max_q_vectors = MAX_Q_VECTORS_82599;
        adapter->atr_sample_rate = 20;
        fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
@@ -5327,7 +5336,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
                adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
-               adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
 
                if (hw->device_id == IXGBE_DEV_ID_82598AT)
                        adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
@@ -8399,6 +8407,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_set_vf_rate        = ixgbe_ndo_set_vf_bw,
        .ndo_set_vf_spoofchk    = ixgbe_ndo_set_vf_spoofchk,
        .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
+       .ndo_set_vf_trust       = ixgbe_ndo_set_vf_trust,
        .ndo_get_vf_config      = ixgbe_ndo_get_vf_config,
        .ndo_get_stats64        = ixgbe_get_stats64,
 #ifdef CONFIG_IXGBE_DCB
index b1e4703ff2a5949fc4c35db6c0c1cd1dceea6675..8daa95f74548857fbb44f9e05c724b9c09052db3 100644 (file)
@@ -102,6 +102,8 @@ enum ixgbe_pfvf_api_rev {
 #define IXGBE_VF_GET_RETA      0x0a    /* VF request for RETA */
 #define IXGBE_VF_GET_RSS_KEY   0x0b    /* get RSS key */
 
+#define IXGBE_VF_UPDATE_XCAST_MODE     0x0c
+
 /* length of permanent address message returned from PF */
 #define IXGBE_VF_PERMADDR_MSG_LEN 4
 /* word in permanent address message with the current multicast type */
index 1d17b5872dd1f266625891b600d673c98a8cfa0f..fcd8b27a0ccba90bb7f67332f57174a71ef5bf1c 100644 (file)
@@ -116,6 +116,12 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
                         * we want to disable the querying by default.
                         */
                        adapter->vfinfo[i].rss_query_enabled = 0;
+
+                       /* Untrust all VFs */
+                       adapter->vfinfo[i].trusted = false;
+
+                       /* set the default xcast mode */
+                       adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
                }
 
                return 0;
@@ -1001,6 +1007,59 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
        return 0;
 }
 
+static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
+                                     u32 *msgbuf, u32 vf)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int xcast_mode = msgbuf[1];
+       u32 vmolr, disable, enable;
+
+       /* verify the PF is supporting the correct APIs */
+       switch (adapter->vfinfo[vf].vf_api) {
+       case ixgbe_mbox_api_12:
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI &&
+           !adapter->vfinfo[vf].trusted) {
+               xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
+       }
+
+       if (adapter->vfinfo[vf].xcast_mode == xcast_mode)
+               goto out;
+
+       switch (xcast_mode) {
+       case IXGBEVF_XCAST_MODE_NONE:
+               disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
+               enable = 0;
+               break;
+       case IXGBEVF_XCAST_MODE_MULTI:
+               disable = IXGBE_VMOLR_MPE;
+               enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE;
+               break;
+       case IXGBEVF_XCAST_MODE_ALLMULTI:
+               disable = 0;
+               enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+       vmolr &= ~disable;
+       vmolr |= enable;
+       IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+
+       adapter->vfinfo[vf].xcast_mode = xcast_mode;
+
+out:
+       msgbuf[1] = xcast_mode;
+
+       return 0;
+}
+
 static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
 {
        u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
@@ -1063,6 +1122,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
        case IXGBE_VF_GET_RSS_KEY:
                retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf);
                break;
+       case IXGBE_VF_UPDATE_XCAST_MODE:
+               retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
+               break;
        default:
                e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
                retval = IXGBE_ERR_MBX;
@@ -1124,6 +1186,17 @@ void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
        IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
 }
 
+static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 ping;
+
+       ping = IXGBE_PF_CONTROL_MSG;
+       if (adapter->vfinfo[vf].clear_to_send)
+               ping |= IXGBE_VT_MSGTYPE_CTS;
+       ixgbe_write_mbx(hw, &ping, 1, vf);
+}
+
 void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
@@ -1416,6 +1489,28 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
        return 0;
 }
 
+int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+       if (vf >= adapter->num_vfs)
+               return -EINVAL;
+
+       /* nothing to do */
+       if (adapter->vfinfo[vf].trusted == setting)
+               return 0;
+
+       adapter->vfinfo[vf].trusted = setting;
+
+       /* reset VF to reconfigure features */
+       adapter->vfinfo[vf].clear_to_send = false;
+       ixgbe_ping_vf(adapter, vf);
+
+       e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not ");
+
+       return 0;
+}
+
 int ixgbe_ndo_get_vf_config(struct net_device *netdev,
                            int vf, struct ifla_vf_info *ivi)
 {
@@ -1430,5 +1525,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
        ivi->qos = adapter->vfinfo[vf].pf_qos;
        ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
        ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled;
+       ivi->trusted = adapter->vfinfo[vf].trusted;
        return 0;
 }
index 2c197e6d1fe7c3cb66c1be785bba2a8084e806fa..dad925706f4cf554537d4978c28286357854486c 100644 (file)
@@ -49,6 +49,7 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
 int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
 int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
                                  bool setting);
+int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
 int ixgbe_ndo_get_vf_config(struct net_device *netdev,
                            int vf, struct ifla_vf_info *ivi);
 void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
index 939c90c4ff3917871b583c2584733554a92ebdfb..995f03107eacd03511dab6aa8e342a6c474375e1 100644 (file)
@@ -1752,6 +1752,9 @@ enum {
  *    FCoE (0x8906):         Filter 2
  *    1588 (0x88f7):         Filter 3
  *    FIP  (0x8914):         Filter 4
+ *    LLDP (0x88CC):         Filter 5
+ *    LACP (0x8809):         Filter 6
+ *    FC   (0x8808):         Filter 7
  */
 #define IXGBE_ETQF_FILTER_EAPOL          0
 #define IXGBE_ETQF_FILTER_FCOE           2
@@ -1759,6 +1762,7 @@ enum {
 #define IXGBE_ETQF_FILTER_FIP            4
 #define IXGBE_ETQF_FILTER_LLDP          5
 #define IXGBE_ETQF_FILTER_LACP          6
+#define IXGBE_ETQF_FILTER_FC            7
 
 /* VLAN Control Bit Masks */
 #define IXGBE_VLNCTRL_VET       0x0000FFFF  /* bits 0-15 */
index ed7b2899affe03bbaf922ad3d4a1e48e59a663ff..ebe0ac950b14e72a0d5dae2e51d3250c250846d1 100644 (file)
@@ -198,6 +198,7 @@ static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
  * ixgbe_reset_cs4227 - Reset CS4227 using port expander
  * @hw: pointer to hardware structure
  *
+ * This function assumes that the caller has acquired the proper semaphore.
  * Returns error code
  */
 static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
@@ -296,6 +297,14 @@ static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
                hw->mac.ops.release_swfw_sync(hw, swfw_mask);
                msleep(IXGBE_CS4227_CHECK_DELAY);
        }
+       /* If still pending, assume other instance failed. */
+       if (retry == IXGBE_CS4227_RETRIES) {
+               status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+               if (status) {
+                       hw_err(hw, "semaphore failed with %d\n", status);
+                       return;
+               }
+       }
 
        /* Reset the CS4227. */
        status = ixgbe_reset_cs4227(hw);
@@ -1608,7 +1617,7 @@ static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
        if (status)
                return status;
 
-       if (lsc)
+       if (lsc && phy->ops.setup_internal_link)
                return phy->ops.setup_internal_link(hw);
 
        return 0;
index 04c7ec8446e0329c71eaee8eea346a4e92836b8e..ec31472796213197fe50d163ce0473a0d34df7fe 100644 (file)
@@ -471,6 +471,12 @@ enum ixgbevf_boards {
        board_X550EM_x_vf,
 };
 
+enum ixgbevf_xcast_modes {
+       IXGBEVF_XCAST_MODE_NONE = 0,
+       IXGBEVF_XCAST_MODE_MULTI,
+       IXGBEVF_XCAST_MODE_ALLMULTI,
+};
+
 extern const struct ixgbevf_info ixgbevf_82599_vf_info;
 extern const struct ixgbevf_info ixgbevf_X540_vf_info;
 extern const struct ixgbevf_info ixgbevf_X550_vf_info;
index 35da2d74e73ecc74f567a528cb2ebffd9a25edcd..592ff237d69207837358a38025e31f4e9989b540 100644 (file)
@@ -1008,7 +1008,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
                container_of(napi, struct ixgbevf_q_vector, napi);
        struct ixgbevf_adapter *adapter = q_vector->adapter;
        struct ixgbevf_ring *ring;
-       int per_ring_budget;
+       int per_ring_budget, work_done = 0;
        bool clean_complete = true;
 
        ixgbevf_for_each_ring(ring, q_vector->tx)
@@ -1027,10 +1027,12 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
        else
                per_ring_budget = budget;
 
-       ixgbevf_for_each_ring(ring, q_vector->rx)
-               clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
-                                                       per_ring_budget)
-                                  < per_ring_budget);
+       ixgbevf_for_each_ring(ring, q_vector->rx) {
+               int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
+                                                  per_ring_budget);
+               work_done += cleaned;
+               clean_complete &= (cleaned < per_ring_budget);
+       }
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
        ixgbevf_qv_unlock_napi(q_vector);
@@ -1040,7 +1042,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
        if (!clean_complete)
                return budget;
        /* all work done, exit the polling mode */
-       napi_complete(napi);
+       napi_complete_done(napi, work_done);
        if (adapter->rx_itr_setting & 1)
                ixgbevf_set_itr(q_vector);
        if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
@@ -1892,9 +1894,17 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
+       unsigned int flags = netdev->flags;
+       int xcast_mode;
+
+       xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI :
+                    (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
+                    IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
 
        spin_lock_bh(&adapter->mbx_lock);
 
+       hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode);
+
        /* reprogram multicast list */
        hw->mac.ops.update_mc_addr_list(hw, netdev);
 
index 82f44e06e5fca11ad86ec92930428fbbc130c93d..340cdd469455ef646f38b25bd0ecde62788976ce 100644 (file)
@@ -112,6 +112,8 @@ enum ixgbe_pfvf_api_rev {
 #define IXGBE_VF_GET_RETA      0x0a    /* VF request for RETA */
 #define IXGBE_VF_GET_RSS_KEY   0x0b    /* get RSS hash key */
 
+#define IXGBE_VF_UPDATE_XCAST_MODE     0x0c
+
 /* length of permanent address message returned from PF */
 #define IXGBE_VF_PERMADDR_MSG_LEN      4
 /* word in permanent address message with the current multicast type */
index d1339b0506274ce0acbfc1e642b1b7fa57c332e8..427f3605cbfc8035f03910d564eeb5e707a5c58c 100644 (file)
@@ -468,6 +468,46 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
        return 0;
 }
 
+/**
+ *  ixgbevf_update_xcast_mode - Update Multicast mode
+ *  @hw: pointer to the HW structure
+ *  @netdev: pointer to net device structure
+ *  @xcast_mode: new multicast mode
+ *
+ *  Updates the Multicast Mode of VF.
+ **/
+static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
+                                    struct net_device *netdev, int xcast_mode)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       u32 msgbuf[2];
+       s32 err;
+
+       switch (hw->api_version) {
+       case ixgbe_mbox_api_12:
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
+       msgbuf[1] = xcast_mode;
+
+       err = mbx->ops.write_posted(hw, msgbuf, 2);
+       if (err)
+               return err;
+
+       err = mbx->ops.read_posted(hw, msgbuf, 2);
+       if (err)
+               return err;
+
+       msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+       if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
+               return -EPERM;
+
+       return 0;
+}
+
 /**
  *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
  *  @hw: pointer to the HW structure
@@ -727,6 +767,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
        .check_link             = ixgbevf_check_mac_link_vf,
        .set_rar                = ixgbevf_set_rar_vf,
        .update_mc_addr_list    = ixgbevf_update_mc_addr_list_vf,
+       .update_xcast_mode      = ixgbevf_update_xcast_mode,
        .set_uc_addr            = ixgbevf_set_uc_addr_vf,
        .set_vfta               = ixgbevf_set_vfta_vf,
 };
index d40f036b6df0c828b5a3efe10ee90ba780b36a52..ef9f7736b4dc6524ea12ea96c31c2218c6600f76 100644 (file)
@@ -63,6 +63,7 @@ struct ixgbe_mac_operations {
        s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
        s32 (*init_rx_addrs)(struct ixgbe_hw *);
        s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
+       s32 (*update_xcast_mode)(struct ixgbe_hw *, struct net_device *, int);
        s32 (*enable_mc)(struct ixgbe_hw *);
        s32 (*disable_mc)(struct ixgbe_hw *);
        s32 (*clear_vfta)(struct ixgbe_hw *);
index c78ae186809703b416874786629ebbc782fdcffe..6bf725921e79c6aa093275684eea722d20089497 100644 (file)
@@ -759,11 +759,23 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
 
        desc->l4i_chk = 0;
        desc->byte_cnt = length;
-       desc->buf_ptr = dma_map_single(dev->dev.parent, data,
-                                      length, DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) {
-               WARN(1, "dma_map_single failed!\n");
-               return -ENOMEM;
+
+       if (length <= 8 && (uintptr_t)data & 0x7) {
+               /* Copy unaligned small data fragment to TSO header data area */
+               memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE,
+                      data, length);
+               desc->buf_ptr = txq->tso_hdrs_dma
+                       + txq->tx_curr_desc * TSO_HEADER_SIZE;
+       } else {
+               /* Alignment is okay, map buffer and hand off to hardware */
+               txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
+               desc->buf_ptr = dma_map_single(dev->dev.parent, data,
+                       length, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(dev->dev.parent,
+                                              desc->buf_ptr))) {
+                       WARN(1, "dma_map_single failed!\n");
+                       return -ENOMEM;
+               }
        }
 
        cmd_sts = BUFFER_OWNED_BY_DMA;
@@ -779,7 +791,8 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
 }
 
 static inline void
-txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
+txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
+               u32 *first_cmd_sts, bool first_desc)
 {
        struct mv643xx_eth_private *mp = txq_to_mp(txq);
        int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
@@ -788,6 +801,7 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
        int ret;
        u32 cmd_csum = 0;
        u16 l4i_chk = 0;
+       u32 cmd_sts;
 
        tx_index = txq->tx_curr_desc;
        desc = &txq->tx_desc_area[tx_index];
@@ -803,9 +817,17 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
        desc->byte_cnt = hdr_len;
        desc->buf_ptr = txq->tso_hdrs_dma +
                        txq->tx_curr_desc * TSO_HEADER_SIZE;
-       desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA  | TX_FIRST_DESC |
+       cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA  | TX_FIRST_DESC |
                                   GEN_CRC;
 
+       /* Defer updating the first command descriptor until all
+        * following descriptors have been written.
+        */
+       if (first_desc)
+               *first_cmd_sts = cmd_sts;
+       else
+               desc->cmd_sts = cmd_sts;
+
        txq->tx_curr_desc++;
        if (txq->tx_curr_desc == txq->tx_ring_size)
                txq->tx_curr_desc = 0;
@@ -819,6 +841,8 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
        int desc_count = 0;
        struct tso_t tso;
        int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       struct tx_desc *first_tx_desc;
+       u32 first_cmd_sts = 0;
 
        /* Count needed descriptors */
        if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
@@ -826,11 +850,14 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
                return -EBUSY;
        }
 
+       first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc];
+
        /* Initialize the TSO handler, and prepare the first payload */
        tso_start(skb, &tso);
 
        total_len = skb->len - hdr_len;
        while (total_len > 0) {
+               bool first_desc = (desc_count == 0);
                char *hdr;
 
                data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
@@ -840,7 +867,8 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
                /* prepare packet headers: MAC + IP + TCP */
                hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
                tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
-               txq_put_hdr_tso(skb, txq, data_left);
+               txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts,
+                               first_desc);
 
                while (data_left > 0) {
                        int size;
@@ -860,6 +888,10 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
        __skb_queue_tail(&txq->tx_skb, skb);
        skb_tx_timestamp(skb);
 
+       /* ensure all other descriptors are written before first cmd_sts */
+       wmb();
+       first_tx_desc->cmd_sts = first_cmd_sts;
+
        /* clear TX_END status */
        mp->work_tx_end &= ~(1 << txq->index);
 
@@ -1586,7 +1618,6 @@ static void mv643xx_eth_get_drvinfo(struct net_device *dev,
                sizeof(drvinfo->version));
        strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
-       drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
 }
 
 static int mv643xx_eth_nway_reset(struct net_device *dev)
index dd6fe942acf9d3159248501853d28900c827388c..a47496a020d99eb2b56f185c3be8f1cf797b567a 100644 (file)
 #define MVNETA_TXQ_CMD                           0x2448
 #define      MVNETA_TXQ_DISABLE_SHIFT            8
 #define      MVNETA_TXQ_ENABLE_MASK              0x000000ff
+#define MVNETA_RX_DISCARD_FRAME_COUNT           0x2484
+#define MVNETA_OVERRUN_FRAME_COUNT              0x2488
 #define MVNETA_GMAC_CLOCK_DIVIDER                0x24f4
 #define      MVNETA_GMAC_1MS_CLOCK_ENABLE        BIT(31)
 #define MVNETA_ACC_MODE                          0x2500
 #define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
 #define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
 #define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
-#define MVNETA_MIB_COUNTERS_BASE                 0x3080
+#define MVNETA_MIB_COUNTERS_BASE                 0x3000
 #define      MVNETA_MIB_LATE_COLLISION           0x7c
 #define MVNETA_DA_FILT_SPEC_MCAST                0x3400
 #define MVNETA_DA_FILT_OTH_MCAST                 0x3500
 
 #define MVNETA_RX_BUF_SIZE(pkt_size)   ((pkt_size) + NET_SKB_PAD)
 
+struct mvneta_statistic {
+       unsigned short offset;
+       unsigned short type;
+       const char name[ETH_GSTRING_LEN];
+};
+
+#define T_REG_32       32
+#define T_REG_64       64
+
+static const struct mvneta_statistic mvneta_statistics[] = {
+       { 0x3000, T_REG_64, "good_octets_received", },
+       { 0x3010, T_REG_32, "good_frames_received", },
+       { 0x3008, T_REG_32, "bad_octets_received", },
+       { 0x3014, T_REG_32, "bad_frames_received", },
+       { 0x3018, T_REG_32, "broadcast_frames_received", },
+       { 0x301c, T_REG_32, "multicast_frames_received", },
+       { 0x3050, T_REG_32, "unrec_mac_control_received", },
+       { 0x3058, T_REG_32, "good_fc_received", },
+       { 0x305c, T_REG_32, "bad_fc_received", },
+       { 0x3060, T_REG_32, "undersize_received", },
+       { 0x3064, T_REG_32, "fragments_received", },
+       { 0x3068, T_REG_32, "oversize_received", },
+       { 0x306c, T_REG_32, "jabber_received", },
+       { 0x3070, T_REG_32, "mac_receive_error", },
+       { 0x3074, T_REG_32, "bad_crc_event", },
+       { 0x3078, T_REG_32, "collision", },
+       { 0x307c, T_REG_32, "late_collision", },
+       { 0x2484, T_REG_32, "rx_discard", },
+       { 0x2488, T_REG_32, "rx_overrun", },
+       { 0x3020, T_REG_32, "frames_64_octets", },
+       { 0x3024, T_REG_32, "frames_65_to_127_octets", },
+       { 0x3028, T_REG_32, "frames_128_to_255_octets", },
+       { 0x302c, T_REG_32, "frames_256_to_511_octets", },
+       { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
+       { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
+       { 0x3038, T_REG_64, "good_octets_sent", },
+       { 0x3040, T_REG_32, "good_frames_sent", },
+       { 0x3044, T_REG_32, "excessive_collision", },
+       { 0x3048, T_REG_32, "multicast_frames_sent", },
+       { 0x304c, T_REG_32, "broadcast_frames_sent", },
+       { 0x3054, T_REG_32, "fc_sent", },
+       { 0x300c, T_REG_32, "internal_mac_transmit_err", },
+};
+
 struct mvneta_pcpu_stats {
        struct  u64_stats_sync syncp;
        u64     rx_packets;
@@ -324,6 +370,8 @@ struct mvneta_port {
        unsigned int speed;
        unsigned int tx_csum_limit;
        int use_inband_status:1;
+
+       u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
 };
 
 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -530,6 +578,8 @@ static void mvneta_mib_counters_clear(struct mvneta_port *pp)
        /* Perform dummy reads from MIB counters */
        for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
                dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
+       dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
+       dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
 }
 
 /* Get System Network Statistics */
@@ -758,7 +808,6 @@ static void mvneta_port_up(struct mvneta_port *pp)
        u32 q_map;
 
        /* Enable all initialized TXs. */
-       mvneta_mib_counters_clear(pp);
        q_map = 0;
        for (queue = 0; queue < txq_number; queue++) {
                struct mvneta_tx_queue *txq = &pp->txqs[queue];
@@ -1035,6 +1084,8 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
        mvreg_write(pp, MVNETA_INTR_ENABLE,
                    (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
                     | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
+
+       mvneta_mib_counters_clear(pp);
 }
 
 /* Set max sizes for tx queues */
@@ -2982,6 +3033,65 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev,
        return 0;
 }
 
+static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
+                                      u8 *data)
+{
+       if (sset == ETH_SS_STATS) {
+               int i;
+
+               for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+                       memcpy(data + i * ETH_GSTRING_LEN,
+                              mvneta_statistics[i].name, ETH_GSTRING_LEN);
+       }
+}
+
+static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
+{
+       const struct mvneta_statistic *s;
+       void __iomem *base = pp->base;
+       u32 high, low, val;
+       int i;
+
+       for (i = 0, s = mvneta_statistics;
+            s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
+            s++, i++) {
+               val = 0;
+
+               switch (s->type) {
+               case T_REG_32:
+                       val = readl_relaxed(base + s->offset);
+                       break;
+               case T_REG_64:
+                       /* Docs say to read low 32-bit then high */
+                       low = readl_relaxed(base + s->offset);
+                       high = readl_relaxed(base + s->offset + 4);
+                       val = (u64)high << 32 | low;
+                       break;
+               }
+
+               pp->ethtool_stats[i] += val;
+       }
+}
+
+static void mvneta_ethtool_get_stats(struct net_device *dev,
+                                    struct ethtool_stats *stats, u64 *data)
+{
+       struct mvneta_port *pp = netdev_priv(dev);
+       int i;
+
+       mvneta_ethtool_update_stats(pp);
+
+       for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+               *data++ = pp->ethtool_stats[i];
+}
+
+static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+       if (sset == ETH_SS_STATS)
+               return ARRAY_SIZE(mvneta_statistics);
+       return -EOPNOTSUPP;
+}
+
 static const struct net_device_ops mvneta_netdev_ops = {
        .ndo_open            = mvneta_open,
        .ndo_stop            = mvneta_stop,
@@ -3003,6 +3113,9 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
        .get_drvinfo    = mvneta_ethtool_get_drvinfo,
        .get_ringparam  = mvneta_ethtool_get_ringparam,
        .set_ringparam  = mvneta_ethtool_set_ringparam,
+       .get_strings    = mvneta_ethtool_get_strings,
+       .get_ethtool_stats = mvneta_ethtool_get_stats,
+       .get_sset_count = mvneta_ethtool_get_sset_count,
 };
 
 /* Initialize hw */
index f79d8124321e525b04de13e7ad1509105b789a2d..ddb5541882f5bfb5dd9086bb015e44cf2648ff99 100644 (file)
@@ -95,9 +95,6 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
                (u16) (mdev->dev->caps.fw_ver & 0xffff));
        strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
                sizeof(drvinfo->bus_info));
-       drvinfo->n_stats = 0;
-       drvinfo->regdump_len = 0;
-       drvinfo->eedump_len = 0;
 }
 
 static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
index 597d8923c8e1c9cb14deedba68e088e7fd4dc605..886e1bc86374d990fc92655c7e1b40a3d611b45c 100644 (file)
@@ -2816,7 +2816,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        struct mlx4_en_priv *priv;
        int i;
        int err;
-       u64 mac_u64;
 
        dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
                                 MAX_TX_RINGS, MAX_RX_RINGS);
@@ -2908,17 +2907,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        dev->addr_len = ETH_ALEN;
        mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
        if (!is_valid_ether_addr(dev->dev_addr)) {
-               if (mlx4_is_slave(priv->mdev->dev)) {
-                       eth_hw_addr_random(dev);
-                       en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
-                       mac_u64 = mlx4_mac_to_u64(dev->dev_addr);
-                       mdev->dev->caps.def_mac[priv->port] = mac_u64;
-               } else {
-                       en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
-                              priv->port, dev->dev_addr);
-                       err = -EINVAL;
-                       goto out;
-               }
+               en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+                      priv->port, dev->dev_addr);
+               err = -EINVAL;
+               goto out;
+       } else if (mlx4_is_slave(priv->mdev->dev) &&
+                  (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
+               /* Random MAC was assigned in mlx4_slave_cap
+                * in mlx4_core module
+                */
+               dev->addr_assign_type |= NET_ADDR_RANDOM;
+               en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
        }
 
        memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
index 8e81e53c370e7d54e6367c012212cccc73ee26fd..c3448847936570582ace12e6baec0f51604f1072 100644 (file)
@@ -1364,6 +1364,10 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
         * and performing a NOP command
         */
        for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
+               /* Make sure request_irq was called */
+               if (!priv->eq_table.eq[i].have_irq)
+                       continue;
+
                /* Temporary use polling for command completions */
                mlx4_cmd_use_polling(dev);
 
index e8ec1dec5789a8d80499e8c478e4822567480284..f13a4d7bbf9597535e5f6271dea3769389bc90b6 100644 (file)
@@ -2840,3 +2840,19 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
        return -EOPNOTSUPP;
 }
 EXPORT_SYMBOL(set_phv_bit);
+
+void mlx4_replace_zero_macs(struct mlx4_dev *dev)
+{
+       int i;
+       u8 mac_addr[ETH_ALEN];
+
+       dev->port_random_macs = 0;
+       for (i = 1; i <= dev->caps.num_ports; ++i)
+               if (!dev->caps.def_mac[i] &&
+                   dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
+                       eth_random_addr(mac_addr);
+                       dev->port_random_macs |= 1 << i;
+                       dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr);
+               }
+}
+EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs);
index 006757f80988bcb71cfc352542305b61310ef40f..85f1b1e7e505727bcdb7f424348d9dce0c5825d3 100644 (file)
@@ -863,6 +863,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
                return -ENODEV;
        }
 
+       mlx4_replace_zero_macs(dev);
+
        dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL);
        dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
        dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
@@ -2669,14 +2671,11 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
 
        if (msi_x) {
                int nreq = dev->caps.num_ports * num_online_cpus() + 1;
-               bool shared_ports = false;
 
                nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
                             nreq);
-               if (nreq > MAX_MSIX) {
+               if (nreq > MAX_MSIX)
                        nreq = MAX_MSIX;
-                       shared_ports = true;
-               }
 
                entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
                if (!entries)
@@ -2699,9 +2698,6 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
                bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
                            dev->caps.num_ports);
 
-               if (MLX4_IS_LEGACY_EQ_MODE(dev->caps))
-                       shared_ports = true;
-
                for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
                        if (i == MLX4_EQ_ASYNC)
                                continue;
@@ -2709,7 +2705,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
                        priv->eq_table.eq[i].irq =
                                entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
 
-                       if (shared_ports) {
+                       if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
                                bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
                                            dev->caps.num_ports);
                                /* We don't set affinity hint when there
index 232b2b55f23b9170b32f351926c200b6ac0e7f1c..e1cf9036af225992c3961254cad0b474ca191e84 100644 (file)
@@ -1378,6 +1378,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
 
 void mlx4_init_quotas(struct mlx4_dev *dev);
 
+/* for VFs, replace zero MACs with randomly-generated MACs at driver start */
+void mlx4_replace_zero_macs(struct mlx4_dev *dev);
 int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
 /* Returns the VF index of slave */
 int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
index 78f51e103880d4dcae7745ec5cb5b2425e370e73..93195191f45bf00b09ff34a9db04ec9e0797f586 100644 (file)
@@ -318,7 +318,7 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
                                key, NULL);
        } else {
                mailbox = mlx4_alloc_cmd_mailbox(dev);
-               if (IS_ERR_OR_NULL(mailbox))
+               if (IS_ERR(mailbox))
                        return PTR_ERR(mailbox);
 
                err = mlx4_cmd_box(dev, 0, mailbox->dma, key,
index c3e54b7e8780f8abff5aa32c362ffb2cc8399803..fabfc9e0a948dfe8aa90f34f1565d76abe592f3e 100644 (file)
@@ -256,8 +256,154 @@ static void dump_buf(void *buf, int size, int data_only, int offset)
 
 enum {
        MLX5_DRIVER_STATUS_ABORTED = 0xfe,
+       MLX5_DRIVER_SYND = 0xbadd00de,
 };
 
+static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
+                                      u32 *synd, u8 *status)
+{
+       *synd = 0;
+       *status = 0;
+
+       switch (op) {
+       case MLX5_CMD_OP_TEARDOWN_HCA:
+       case MLX5_CMD_OP_DISABLE_HCA:
+       case MLX5_CMD_OP_MANAGE_PAGES:
+       case MLX5_CMD_OP_DESTROY_MKEY:
+       case MLX5_CMD_OP_DESTROY_EQ:
+       case MLX5_CMD_OP_DESTROY_CQ:
+       case MLX5_CMD_OP_DESTROY_QP:
+       case MLX5_CMD_OP_DESTROY_PSV:
+       case MLX5_CMD_OP_DESTROY_SRQ:
+       case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+       case MLX5_CMD_OP_DESTROY_DCT:
+       case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
+       case MLX5_CMD_OP_DEALLOC_PD:
+       case MLX5_CMD_OP_DEALLOC_UAR:
+       case MLX5_CMD_OP_DETTACH_FROM_MCG:
+       case MLX5_CMD_OP_DEALLOC_XRCD:
+       case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
+       case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
+       case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
+       case MLX5_CMD_OP_DESTROY_TIR:
+       case MLX5_CMD_OP_DESTROY_SQ:
+       case MLX5_CMD_OP_DESTROY_RQ:
+       case MLX5_CMD_OP_DESTROY_RMP:
+       case MLX5_CMD_OP_DESTROY_TIS:
+       case MLX5_CMD_OP_DESTROY_RQT:
+       case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
+       case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
+       case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
+               return MLX5_CMD_STAT_OK;
+
+       case MLX5_CMD_OP_QUERY_HCA_CAP:
+       case MLX5_CMD_OP_QUERY_ADAPTER:
+       case MLX5_CMD_OP_INIT_HCA:
+       case MLX5_CMD_OP_ENABLE_HCA:
+       case MLX5_CMD_OP_QUERY_PAGES:
+       case MLX5_CMD_OP_SET_HCA_CAP:
+       case MLX5_CMD_OP_QUERY_ISSI:
+       case MLX5_CMD_OP_SET_ISSI:
+       case MLX5_CMD_OP_CREATE_MKEY:
+       case MLX5_CMD_OP_QUERY_MKEY:
+       case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
+       case MLX5_CMD_OP_PAGE_FAULT_RESUME:
+       case MLX5_CMD_OP_CREATE_EQ:
+       case MLX5_CMD_OP_QUERY_EQ:
+       case MLX5_CMD_OP_GEN_EQE:
+       case MLX5_CMD_OP_CREATE_CQ:
+       case MLX5_CMD_OP_QUERY_CQ:
+       case MLX5_CMD_OP_MODIFY_CQ:
+       case MLX5_CMD_OP_CREATE_QP:
+       case MLX5_CMD_OP_RST2INIT_QP:
+       case MLX5_CMD_OP_INIT2RTR_QP:
+       case MLX5_CMD_OP_RTR2RTS_QP:
+       case MLX5_CMD_OP_RTS2RTS_QP:
+       case MLX5_CMD_OP_SQERR2RTS_QP:
+       case MLX5_CMD_OP_2ERR_QP:
+       case MLX5_CMD_OP_2RST_QP:
+       case MLX5_CMD_OP_QUERY_QP:
+       case MLX5_CMD_OP_SQD_RTS_QP:
+       case MLX5_CMD_OP_INIT2INIT_QP:
+       case MLX5_CMD_OP_CREATE_PSV:
+       case MLX5_CMD_OP_CREATE_SRQ:
+       case MLX5_CMD_OP_QUERY_SRQ:
+       case MLX5_CMD_OP_ARM_RQ:
+       case MLX5_CMD_OP_CREATE_XRC_SRQ:
+       case MLX5_CMD_OP_QUERY_XRC_SRQ:
+       case MLX5_CMD_OP_ARM_XRC_SRQ:
+       case MLX5_CMD_OP_CREATE_DCT:
+       case MLX5_CMD_OP_DRAIN_DCT:
+       case MLX5_CMD_OP_QUERY_DCT:
+       case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+       case MLX5_CMD_OP_QUERY_VPORT_STATE:
+       case MLX5_CMD_OP_MODIFY_VPORT_STATE:
+       case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
+       case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
+       case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
+       case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
+       case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
+       case MLX5_CMD_OP_SET_ROCE_ADDRESS:
+       case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+       case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
+       case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
+       case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
+       case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
+       case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+       case MLX5_CMD_OP_QUERY_Q_COUNTER:
+       case MLX5_CMD_OP_ALLOC_PD:
+       case MLX5_CMD_OP_ALLOC_UAR:
+       case MLX5_CMD_OP_CONFIG_INT_MODERATION:
+       case MLX5_CMD_OP_ACCESS_REG:
+       case MLX5_CMD_OP_ATTACH_TO_MCG:
+       case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
+       case MLX5_CMD_OP_MAD_IFC:
+       case MLX5_CMD_OP_QUERY_MAD_DEMUX:
+       case MLX5_CMD_OP_SET_MAD_DEMUX:
+       case MLX5_CMD_OP_NOP:
+       case MLX5_CMD_OP_ALLOC_XRCD:
+       case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+       case MLX5_CMD_OP_QUERY_CONG_STATUS:
+       case MLX5_CMD_OP_MODIFY_CONG_STATUS:
+       case MLX5_CMD_OP_QUERY_CONG_PARAMS:
+       case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
+       case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+       case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+       case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+       case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+       case MLX5_CMD_OP_CREATE_TIR:
+       case MLX5_CMD_OP_MODIFY_TIR:
+       case MLX5_CMD_OP_QUERY_TIR:
+       case MLX5_CMD_OP_CREATE_SQ:
+       case MLX5_CMD_OP_MODIFY_SQ:
+       case MLX5_CMD_OP_QUERY_SQ:
+       case MLX5_CMD_OP_CREATE_RQ:
+       case MLX5_CMD_OP_MODIFY_RQ:
+       case MLX5_CMD_OP_QUERY_RQ:
+       case MLX5_CMD_OP_CREATE_RMP:
+       case MLX5_CMD_OP_MODIFY_RMP:
+       case MLX5_CMD_OP_QUERY_RMP:
+       case MLX5_CMD_OP_CREATE_TIS:
+       case MLX5_CMD_OP_MODIFY_TIS:
+       case MLX5_CMD_OP_QUERY_TIS:
+       case MLX5_CMD_OP_CREATE_RQT:
+       case MLX5_CMD_OP_MODIFY_RQT:
+       case MLX5_CMD_OP_QUERY_RQT:
+       case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+       case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+       case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+       case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+       case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+       case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+               *status = MLX5_DRIVER_STATUS_ABORTED;
+               *synd = MLX5_DRIVER_SYND;
+               return -EIO;
+       default:
+               mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
+               return -EINVAL;
+       }
+}
+
 const char *mlx5_command_str(int command)
 {
        switch (command) {
@@ -592,6 +738,16 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
        return err;
 }
 
+static __be32 *get_synd_ptr(struct mlx5_outbox_hdr *out)
+{
+       return &out->syndrome;
+}
+
+static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
+{
+       return &out->status;
+}
+
 /*  Notes:
  *    1. Callback functions may not sleep
  *    2. page queue commands do not support asynchrous completion
@@ -1200,6 +1356,11 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
        return msg;
 }
 
+static u16 opcode_from_in(struct mlx5_inbox_hdr *in)
+{
+       return be16_to_cpu(in->opcode);
+}
+
 static int is_manage_pages(struct mlx5_inbox_hdr *in)
 {
        return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
@@ -1214,6 +1375,15 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
        gfp_t gfp;
        int err;
        u8 status = 0;
+       u32 drv_synd;
+
+       if (pci_channel_offline(dev->pdev) ||
+           dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+               err = mlx5_internal_err_ret_value(dev, opcode_from_in(in), &drv_synd, &status);
+               *get_synd_ptr(out) = cpu_to_be32(drv_synd);
+               *get_status_ptr(out) = status;
+               return err;
+       }
 
        pages_queue = is_manage_pages(in);
        gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
index e71563ce05d1bc34123fd4aa63348d569adf4c57..22d603f7827333e2dce6eab7be6ed6acd5a14cdb 100644 (file)
@@ -598,6 +598,8 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
                return;
 
        priv->vlan.filter_disabled = false;
+       if (priv->netdev->flags & IFF_PROMISC)
+               return;
        mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
 }
 
@@ -607,6 +609,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
                return;
 
        priv->vlan.filter_disabled = true;
+       if (priv->netdev->flags & IFF_PROMISC)
+               return;
        mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
 }
 
@@ -717,8 +721,12 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
        bool enable_broadcast  = !ea->broadcast_enabled &&  broadcast_enabled;
        bool disable_broadcast =  ea->broadcast_enabled && !broadcast_enabled;
 
-       if (enable_promisc)
+       if (enable_promisc) {
                mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
+               if (!priv->vlan.filter_disabled)
+                       mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+                                           0);
+       }
        if (enable_allmulti)
                mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
        if (enable_broadcast)
@@ -730,8 +738,12 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
                mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
        if (disable_allmulti)
                mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
-       if (disable_promisc)
+       if (disable_promisc) {
+               if (!priv->vlan.filter_disabled)
+                       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+                                           0);
                mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+       }
 
        ea->promisc_enabled   = promisc_enabled;
        ea->allmulti_enabled  = allmulti_enabled;
index 9b81e1ceb8dec8454506a5e70f39254ba16a121e..f5deb642d0d6c0e693805e34234cbb0cb2e5ccc6 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/vmalloc.h>
+#include <linux/hardirq.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/cmd.h>
 #include "mlx5_core.h"
@@ -57,6 +58,91 @@ enum {
        MLX5_HEALTH_SYNDR_HIGH_TEMP             = 0x10
 };
 
+enum {
+       MLX5_NIC_IFC_FULL               = 0,
+       MLX5_NIC_IFC_DISABLED           = 1,
+       MLX5_NIC_IFC_NO_DRAM_NIC        = 2
+};
+
+static u8 get_nic_interface(struct mlx5_core_dev *dev)
+{
+       return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
+}
+
+static void trigger_cmd_completions(struct mlx5_core_dev *dev)
+{
+       unsigned long flags;
+       u64 vector;
+
+       /* wait for pending handlers to complete */
+       synchronize_irq(dev->priv.msix_arr[MLX5_EQ_VEC_CMD].vector);
+       spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
+       vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
+       if (!vector)
+               goto no_trig;
+
+       vector |= MLX5_TRIGGERED_CMD_COMP;
+       spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+
+       mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
+       mlx5_cmd_comp_handler(dev, vector);
+       return;
+
+no_trig:
+       spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+}
+
+static int in_fatal(struct mlx5_core_dev *dev)
+{
+       struct mlx5_core_health *health = &dev->priv.health;
+       struct health_buffer __iomem *h = health->health;
+
+       if (get_nic_interface(dev) == MLX5_NIC_IFC_DISABLED)
+               return 1;
+
+       if (ioread32be(&h->fw_ver) == 0xffffffff)
+               return 1;
+
+       return 0;
+}
+
+void mlx5_enter_error_state(struct mlx5_core_dev *dev)
+{
+       if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+               return;
+
+       mlx5_core_err(dev, "start\n");
+       if (pci_channel_offline(dev->pdev) || in_fatal(dev))
+               dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+
+       mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
+       mlx5_core_err(dev, "end\n");
+}
+
+static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
+{
+       u8 nic_interface = get_nic_interface(dev);
+
+       switch (nic_interface) {
+       case MLX5_NIC_IFC_FULL:
+               mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
+               break;
+
+       case MLX5_NIC_IFC_DISABLED:
+               mlx5_core_warn(dev, "starting teardown\n");
+               break;
+
+       case MLX5_NIC_IFC_NO_DRAM_NIC:
+               mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
+               break;
+       default:
+               mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n",
+                              nic_interface);
+       }
+
+       mlx5_disable_device(dev);
+}
+
 static void health_care(struct work_struct *work)
 {
        struct mlx5_core_health *health;
@@ -67,6 +153,7 @@ static void health_care(struct work_struct *work)
        priv = container_of(health, struct mlx5_priv, health);
        dev = container_of(priv, struct mlx5_core_dev, priv);
        mlx5_core_warn(dev, "handling bad device here\n");
+       mlx5_handle_bad_state(dev);
 }
 
 static const char *hsynd_str(u8 synd)
@@ -122,6 +209,10 @@ static void print_health_info(struct mlx5_core_dev *dev)
        u32 fw;
        int i;
 
+       /* If the syndrom is 0, the device is OK and no need to print buffer */
+       if (!ioread8(&h->synd))
+               return;
+
        for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
                dev_err(&dev->pdev->dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i));
 
@@ -136,13 +227,29 @@ static void print_health_info(struct mlx5_core_dev *dev)
        dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
 }
 
+static unsigned long get_next_poll_jiffies(void)
+{
+       unsigned long next;
+
+       get_random_bytes(&next, sizeof(next));
+       next %= HZ;
+       next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
+
+       return next;
+}
+
 static void poll_health(unsigned long data)
 {
        struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
        struct mlx5_core_health *health = &dev->priv.health;
-       unsigned long next;
        u32 count;
 
+       if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+               trigger_cmd_completions(dev);
+               mod_timer(&health->timer, get_next_poll_jiffies());
+               return;
+       }
+
        count = ioread32be(health->health_counter);
        if (count == health->prev)
                ++health->miss_counter;
@@ -151,14 +258,16 @@ static void poll_health(unsigned long data)
 
        health->prev = count;
        if (health->miss_counter == MAX_MISSES) {
-               mlx5_core_err(dev, "device's health compromised\n");
+               dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n");
                print_health_info(dev);
-               queue_work(health->wq, &health->work);
        } else {
-               get_random_bytes(&next, sizeof(next));
-               next %= HZ;
-               next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
-               mod_timer(&health->timer, next);
+               mod_timer(&health->timer, get_next_poll_jiffies());
+       }
+
+       if (in_fatal(dev) && !health->sick) {
+               health->sick = true;
+               print_health_info(dev);
+               queue_work(health->wq, &health->work);
        }
 }
 
index b6edc58766adeb98a46a526323d54f8897402c8a..2388aec208fa92fc56da476882b51fd6d0d2b6d9 100644 (file)
 #include <linux/slab.h>
 #include <linux/io-mapping.h>
 #include <linux/interrupt.h>
+#include <linux/delay.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/cq.h>
 #include <linux/mlx5/qp.h>
 #include <linux/mlx5/srq.h>
 #include <linux/debugfs.h>
 #include <linux/kmod.h>
+#include <linux/delay.h>
 #include <linux/mlx5/mlx5_ifc.h>
 #include "mlx5_core.h"
 
@@ -151,6 +153,25 @@ static struct mlx5_profile profile[] = {
        },
 };
 
+#define FW_INIT_TIMEOUT_MILI   2000
+#define FW_INIT_WAIT_MS                2
+
+static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
+{
+       unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
+       int err = 0;
+
+       while (fw_initializing(dev)) {
+               if (time_after(jiffies, end)) {
+                       err = -EBUSY;
+                       break;
+               }
+               msleep(FW_INIT_WAIT_MS);
+       }
+
+       return err;
+}
+
 static int set_dma_caps(struct pci_dev *pdev)
 {
        int err;
@@ -181,6 +202,34 @@ static int set_dma_caps(struct pci_dev *pdev)
        return err;
 }
 
+static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
+{
+       struct pci_dev *pdev = dev->pdev;
+       int err = 0;
+
+       mutex_lock(&dev->pci_status_mutex);
+       if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
+               err = pci_enable_device(pdev);
+               if (!err)
+                       dev->pci_status = MLX5_PCI_STATUS_ENABLED;
+       }
+       mutex_unlock(&dev->pci_status_mutex);
+
+       return err;
+}
+
+static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
+{
+       struct pci_dev *pdev = dev->pdev;
+
+       mutex_lock(&dev->pci_status_mutex);
+       if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
+               pci_disable_device(pdev);
+               dev->pci_status = MLX5_PCI_STATUS_DISABLED;
+       }
+       mutex_unlock(&dev->pci_status_mutex);
+}
+
 static int request_bar(struct pci_dev *pdev)
 {
        int err = 0;
@@ -807,7 +856,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        if (!priv->dbg_root)
                return -ENOMEM;
 
-       err = pci_enable_device(pdev);
+       err = mlx5_pci_enable_device(dev);
        if (err) {
                dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
                goto err_dbg;
@@ -841,7 +890,7 @@ err_clr_master:
        pci_clear_master(dev->pdev);
        release_bar(dev->pdev);
 err_disable:
-       pci_disable_device(dev->pdev);
+       mlx5_pci_disable_device(dev);
 
 err_dbg:
        debugfs_remove(priv->dbg_root);
@@ -853,7 +902,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        iounmap(dev->iseg);
        pci_clear_master(dev->pdev);
        release_bar(dev->pdev);
-       pci_disable_device(dev->pdev);
+       mlx5_pci_disable_device(dev);
        debugfs_remove(priv->dbg_root);
 }
 
@@ -863,13 +912,32 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        struct pci_dev *pdev = dev->pdev;
        int err;
 
+       mutex_lock(&dev->intf_state_mutex);
+       if (dev->interface_state == MLX5_INTERFACE_STATE_UP) {
+               dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
+                        __func__);
+               goto out;
+       }
+
        dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
                 fw_rev_min(dev), fw_rev_sub(dev));
 
+       /* on load removing any previous indication of internal error, device is
+        * up
+        */
+       dev->state = MLX5_DEVICE_STATE_UP;
+
        err = mlx5_cmd_init(dev);
        if (err) {
                dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
-               return err;
+               goto out_err;
+       }
+
+       err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
+       if (err) {
+               dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
+                       FW_INIT_TIMEOUT_MILI);
+               goto out_err;
        }
 
        mlx5_pagealloc_init(dev);
@@ -994,6 +1062,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        if (err)
                pr_info("failed request module on %s\n", MLX5_IB_MOD);
 
+       dev->interface_state = MLX5_INTERFACE_STATE_UP;
+out:
+       mutex_unlock(&dev->intf_state_mutex);
+
        return 0;
 
 err_reg_dev:
@@ -1024,7 +1096,7 @@ err_stop_poll:
        mlx5_stop_health_poll(dev);
        if (mlx5_cmd_teardown_hca(dev)) {
                dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
-               return err;
+               goto out_err;
        }
 
 err_pagealloc_stop:
@@ -1040,13 +1112,23 @@ err_pagealloc_cleanup:
        mlx5_pagealloc_cleanup(dev);
        mlx5_cmd_cleanup(dev);
 
+out_err:
+       dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+       mutex_unlock(&dev->intf_state_mutex);
+
        return err;
 }
 
 static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
 {
-       int err;
+       int err = 0;
 
+       mutex_lock(&dev->intf_state_mutex);
+       if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) {
+               dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
+                        __func__);
+               goto out;
+       }
        mlx5_unregister_device(dev);
        mlx5_cleanup_mr_table(dev);
        mlx5_cleanup_srq_table(dev);
@@ -1072,10 +1154,12 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        mlx5_cmd_cleanup(dev);
 
 out:
+       dev->interface_state = MLX5_INTERFACE_STATE_DOWN;
+       mutex_unlock(&dev->intf_state_mutex);
        return err;
 }
 
-static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
                     unsigned long param)
 {
        struct mlx5_priv *priv = &dev->priv;
@@ -1125,6 +1209,8 @@ static int init_one(struct pci_dev *pdev,
 
        INIT_LIST_HEAD(&priv->ctx_list);
        spin_lock_init(&priv->ctx_lock);
+       mutex_init(&dev->pci_status_mutex);
+       mutex_init(&dev->intf_state_mutex);
        err = mlx5_pci_init(dev, priv);
        if (err) {
                dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
@@ -1172,6 +1258,112 @@ static void remove_one(struct pci_dev *pdev)
        kfree(dev);
 }
 
+static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
+                                             pci_channel_state_t state)
+{
+       struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+       struct mlx5_priv *priv = &dev->priv;
+
+       dev_info(&pdev->dev, "%s was called\n", __func__);
+       mlx5_enter_error_state(dev);
+       mlx5_unload_one(dev, priv);
+       mlx5_pci_disable_device(dev);
+       return state == pci_channel_io_perm_failure ?
+               PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
+{
+       struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+       int err = 0;
+
+       dev_info(&pdev->dev, "%s was called\n", __func__);
+
+       err = mlx5_pci_enable_device(dev);
+       if (err) {
+               dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
+                       , __func__, err);
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+       pci_set_master(pdev);
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+
+       return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+void mlx5_disable_device(struct mlx5_core_dev *dev)
+{
+       mlx5_pci_err_detected(dev->pdev, 0);
+}
+
+/* wait for the device to show vital signs. For now we check
+ * that we can read the device ID and that the health buffer
+ * shows a non zero value which is different than 0xffffffff
+ */
+static void wait_vital(struct pci_dev *pdev)
+{
+       struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+       struct mlx5_core_health *health = &dev->priv.health;
+       const int niter = 100;
+       u32 count;
+       u16 did;
+       int i;
+
+       /* Wait for firmware to be ready after reset */
+       msleep(1000);
+       for (i = 0; i < niter; i++) {
+               if (pci_read_config_word(pdev, 2, &did)) {
+                       dev_warn(&pdev->dev, "failed reading config word\n");
+                       break;
+               }
+               if (did == pdev->device) {
+                       dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
+                       break;
+               }
+               msleep(50);
+       }
+       if (i == niter)
+               dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
+
+       for (i = 0; i < niter; i++) {
+               count = ioread32be(health->health_counter);
+               if (count && count != 0xffffffff) {
+                       dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
+                       break;
+               }
+               msleep(50);
+       }
+
+       if (i == niter)
+               dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
+}
+
+static void mlx5_pci_resume(struct pci_dev *pdev)
+{
+       struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+       struct mlx5_priv *priv = &dev->priv;
+       int err;
+
+       dev_info(&pdev->dev, "%s was called\n", __func__);
+
+       pci_save_state(pdev);
+       wait_vital(pdev);
+
+       err = mlx5_load_one(dev, priv);
+       if (err)
+               dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
+                       , __func__, err);
+       else
+               dev_info(&pdev->dev, "%s: device recovered\n", __func__);
+}
+
+static const struct pci_error_handlers mlx5_err_handler = {
+       .error_detected = mlx5_pci_err_detected,
+       .slot_reset     = mlx5_pci_slot_reset,
+       .resume         = mlx5_pci_resume
+};
+
 static const struct pci_device_id mlx5_core_pci_table[] = {
        { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
        { PCI_VDEVICE(MELLANOX, 0x1012) }, /* Connect-IB VF */
@@ -1188,7 +1380,8 @@ static struct pci_driver mlx5_core_driver = {
        .name           = DRIVER_NAME,
        .id_table       = mlx5_core_pci_table,
        .probe          = init_one,
-       .remove         = remove_one
+       .remove         = remove_one,
+       .err_handler    = &mlx5_err_handler
 };
 
 static int __init init(void)
index 30c0be721b089073b6b028630a64fa79e9e1fc17..cee5b7a839bc335fc140ca03eb9aadda09b25900 100644 (file)
@@ -86,6 +86,10 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
 int mlx5_query_board_id(struct mlx5_core_dev *dev);
 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
+void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+                    unsigned long param);
+void mlx5_enter_error_state(struct mlx5_core_dev *dev);
+void mlx5_disable_device(struct mlx5_core_dev *dev);
 
 void mlx5e_init(void);
 void mlx5e_cleanup(void);
index 76432a510ac242d641fbda4fdbe57d9287ad2c6b..1cda5d268ec96e27b8121ca4b00147d576b8b48b 100644 (file)
@@ -493,15 +493,20 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
        struct fw_page *fwp;
        struct rb_node *p;
        int nclaimed = 0;
-       int err;
+       int err = 0;
 
        do {
                p = rb_first(&dev->priv.page_root);
                if (p) {
                        fwp = rb_entry(p, struct fw_page, rb_node);
-                       err = reclaim_pages(dev, fwp->func_id,
-                                           optimal_reclaimed_pages(),
-                                           &nclaimed);
+                       if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+                               free_4k(dev, fwp->addr);
+                               nclaimed = 1;
+                       } else {
+                               err = reclaim_pages(dev, fwp->func_id,
+                                                   optimal_reclaimed_pages(),
+                                                   &nclaimed);
+                       }
                        if (err) {
                                mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
                                               err);
index ae302614e74be279d028feaffc4982153bc9e266..a87e773e93f3439dbd4cf93bc08a9a7563f4977b 100644 (file)
@@ -302,7 +302,7 @@ static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
        u32 in[MLX5_ST_SZ_DW(pvlc_reg)];
 
        memset(in, 0, sizeof(in));
-       MLX5_SET(ptys_reg, in, local_port, local_port);
+       MLX5_SET(pvlc_reg, in, local_port, local_port);
 
        return mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
                                    pvlc_size, MLX5_REG_PVLC, 0, 0);
index 2941d9c5ae486250f340915901464b22cc5aef93..e36e12219c9be2efbf919e9f3427f5dc26e37ce2 100644 (file)
@@ -30,3 +30,14 @@ config MLXSW_SWITCHX2
 
          To compile this driver as a module, choose M here: the
          module will be called mlxsw_switchx2.
+
+config MLXSW_SPECTRUM
+       tristate "Mellanox Technologies Spectrum support"
+       depends on MLXSW_CORE && NET_SWITCHDEV
+       default m
+       ---help---
+         This driver supports Mellanox Technologies Spectrum Ethernet
+         Switch ASICs.
+
+         To compile this driver as a module, choose M here: the
+         module will be called mlxsw_spectrum.
index 0a05f65ee81480f9f976176918e1869cc6a36a72..af015818fd19030b2b699913f75a8368c733745b 100644 (file)
@@ -4,3 +4,6 @@ obj-$(CONFIG_MLXSW_PCI)         += mlxsw_pci.o
 mlxsw_pci-objs                 := pci.o
 obj-$(CONFIG_MLXSW_SWITCHX2)   += mlxsw_switchx2.o
 mlxsw_switchx2-objs            := switchx2.o
+obj-$(CONFIG_MLXSW_SPECTRUM)   += mlxsw_spectrum.o
+mlxsw_spectrum-objs            := spectrum.o spectrum_buffers.o \
+                                  spectrum_switchdev.o
index 770db17eb03f4701228e994cfde910679551c9c5..cd63b82636888a2c7e9a90f41a00a8d96b28ae6f 100644 (file)
@@ -464,6 +464,8 @@ MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8);
  * passed in this command must be pinned.
  */
 
+#define MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX 32
+
 static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core,
                                   char *in_mbox, u32 vpm_entries_count)
 {
@@ -568,7 +570,7 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1);
  */
 MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1);
 
-/* cmd_mbox_config_profile_set_fid_based
+/* cmd_mbox_config_profile_set_flood_mode
  * Capability bit. Setting a bit to 1 configures the profile
  * according to the mailbox contents.
  */
@@ -649,12 +651,8 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12);
 MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16);
 
 /* cmd_mbox_config_profile_max_flood_tables
- * Maximum number of Flooding Tables. Flooding Tables are associated to
- * the different packet types for the different switch partitions.
- * Note that the table size depends on the fid_based mode.
- * In SwitchX silicon, tables are split equally between the switch
- * partitions. e.g. for 2 swids and 8 tables, the first 4 are associated
- * with swid-1 and the last 4 are associated with swid-2.
+ * Maximum number of single-entry flooding tables. Different flooding tables
+ * can be associated with different packet types.
  */
 MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
 
@@ -665,15 +663,42 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
  */
 MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
 
-/* cmd_mbox_config_profile_fid_based
- * FID Based Flood Mode
- * 00 Do not use FID to offset the index into the Port Group Table/Multicast ID
- * 01 Use FID to offset the index to the Port Group Table (pgi)
- * 10 Use FID to offset the index to the Port Group Table (pgi) and
- * the Multicast ID
+/* cmd_mbox_config_profile_flood_mode
+ * Flooding mode to use.
+ * 0-2 - Backward compatible modes for SwitchX devices.
+ * 3 - Mixed mode, where:
+ * max_flood_tables indicates the number of single-entry tables.
+ * max_vid_flood_tables indicates the number of per-VID tables.
+ * max_fid_offset_flood_tables indicates the number of FID-offset tables.
+ * max_fid_flood_tables indicates the number of per-FID tables.
  */
 MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);
 
+/* cmd_mbox_config_profile_max_fid_offset_flood_tables
+ * Maximum number of FID-offset flooding tables.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+            max_fid_offset_flood_tables, 0x34, 24, 4);
+
+/* cmd_mbox_config_profile_fid_offset_flood_table_size
+ * The size (number of entries) of each FID-offset flood table.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+            fid_offset_flood_table_size, 0x34, 0, 16);
+
+/* cmd_mbox_config_profile_max_fid_flood_tables
+ * Maximum number of per-FID flooding tables.
+ *
+ * Note: This flooding tables cover special FIDs only (vFIDs), starting at
+ * FID value 4K and higher.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_fid_flood_tables, 0x38, 24, 4);
+
+/* cmd_mbox_config_profile_fid_flood_table_size
+ * The size (number of entries) of each per-FID table.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, fid_flood_table_size, 0x38, 0, 16);
+
 /* cmd_mbox_config_profile_max_ib_mc
  * Maximum number of multicast FDB records for InfiniBand
  * FDB (in 512 chunks) per InfiniBand switch partition.
index dbcaf5df8967e828f85648de4edce281860f2a61..bd80ac714a8a0a03d81ec77942bfed253a9362fe 100644 (file)
@@ -374,26 +374,31 @@ static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
        int err;
        int ret;
 
+       mlxsw_core->emad.trans_active = true;
+
        err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
        if (err) {
                dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
                        mlxsw_core->emad.tid);
                dev_kfree_skb(skb);
-               return err;
+               goto trans_inactive_out;
        }
 
-       mlxsw_core->emad.trans_active = true;
        ret = wait_event_timeout(mlxsw_core->emad.wait,
                                 !(mlxsw_core->emad.trans_active),
                                 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
        if (!ret) {
                dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
                         mlxsw_core->emad.tid);
-               mlxsw_core->emad.trans_active = false;
-               return -EIO;
+               err = -EIO;
+               goto trans_inactive_out;
        }
 
        return 0;
+
+trans_inactive_out:
+       mlxsw_core->emad.trans_active = false;
+       return err;
 }
 
 static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
@@ -506,7 +511,6 @@ static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
                return err;
 
        mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
-                           MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
                            MLXSW_TRAP_ID_ETHEMAD);
        return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
 }
@@ -551,8 +555,8 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
 {
        char hpkt_pl[MLXSW_REG_HPKT_LEN];
 
+       mlxsw_core->emad.use_emad = false;
        mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
-                           MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
                            MLXSW_TRAP_ID_ETHEMAD);
        mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
 
index 165808471188567613efde6488b34eca38ed1522..807827350a89900b0d25b108c9f894c1d5622be5 100644 (file)
@@ -54,6 +54,7 @@
        MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind)
 
 #define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2"
+#define MLXSW_DEVICE_KIND_SPECTRUM "spectrum"
 
 struct mlxsw_core;
 struct mlxsw_driver;
@@ -153,6 +154,10 @@ struct mlxsw_config_profile {
        u8      max_flood_tables;
        u8      max_vid_flood_tables;
        u8      flood_mode;
+       u8      max_fid_offset_flood_tables;
+       u16     fid_offset_flood_table_size;
+       u8      max_fid_flood_tables;
+       u16     fid_flood_table_size;
        u16     max_ib_mc;
        u16     max_pkey;
        u8      ar_sec;
index ffd55d030ce28dbf902f6990a0dfb7f96b08a766..a94dbda6590b36ee8a59a90c36f1ede510a2ae14 100644 (file)
@@ -171,15 +171,21 @@ static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item,
 }
 
 static inline void __mlxsw_item_memcpy_from(char *buf, char *dst,
-                                           struct mlxsw_item *item)
+                                           struct mlxsw_item *item,
+                                           unsigned short index)
 {
-       memcpy(dst, &buf[item->offset], item->size.bytes);
+       unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
+
+       memcpy(dst, &buf[offset], item->size.bytes);
 }
 
-static inline void __mlxsw_item_memcpy_to(char *buf, char *src,
-                                         struct mlxsw_item *item)
+static inline void __mlxsw_item_memcpy_to(char *buf, const char *src,
+                                         struct mlxsw_item *item,
+                                         unsigned short index)
 {
-       memcpy(&buf[item->offset], src, item->size.bytes);
+       unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
+
+       memcpy(&buf[offset], src, item->size.bytes);
 }
 
 static inline u16
@@ -187,6 +193,7 @@ __mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
 {
        u16 max_index, be_index;
        u16 offset;             /* byte offset inside the array */
+       u8 in_byte_index;
 
        BUG_ON(index && !item->element_size);
        if (item->offset % sizeof(u32) != 0 ||
@@ -199,7 +206,8 @@ __mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
        max_index = (item->size.bytes << 3) / item->element_size - 1;
        be_index = max_index - index;
        offset = be_index * item->element_size >> 3;
-       *shift = index % (BITS_PER_BYTE / item->element_size) << 1;
+       in_byte_index  = index % (BITS_PER_BYTE / item->element_size);
+       *shift = in_byte_index * item->element_size;
 
        return item->offset + offset;
 }
@@ -371,12 +379,40 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                   \
 static inline void                                                             \
 mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst)                \
 {                                                                              \
-       __mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\
+       __mlxsw_item_memcpy_from(buf, dst,                                      \
+                                &__ITEM_NAME(_type, _cname, _iname), 0);       \
+}                                                                              \
+static inline void                                                             \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src)    \
+{                                                                              \
+       __mlxsw_item_memcpy_to(buf, src,                                        \
+                              &__ITEM_NAME(_type, _cname, _iname), 0);         \
+}
+
+#define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes,     \
+                              _step, _instepoffset)                            \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .step = _step,                                                          \
+       .in_step_offset = _instepoffset,                                        \
+       .size = {.bytes = _sizebytes,},                                         \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline void                                                             \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf,                   \
+                                                 unsigned short index,         \
+                                                 char *dst)                    \
+{                                                                              \
+       __mlxsw_item_memcpy_from(buf, dst,                                      \
+                                &__ITEM_NAME(_type, _cname, _iname), index);   \
 }                                                                              \
 static inline void                                                             \
-mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src)          \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf,                     \
+                                               unsigned short index,           \
+                                               const char *src)                \
 {                                                                              \
-       __mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname));  \
+       __mlxsw_item_memcpy_to(buf, src,                                        \
+                              &__ITEM_NAME(_type, _cname, _iname), index);     \
 }
 
 #define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes,       \
index 462cea31ecbb7be6387512dc741286d7fb56658e..371ea3f56aed1c5d9f3154869e812dee47930471 100644 (file)
@@ -57,6 +57,7 @@ static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
 
 static const struct pci_device_id mlxsw_pci_id_table[] = {
        {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
+       {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
        {0, }
 };
 
@@ -67,6 +68,8 @@ static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id)
        switch (id->device) {
        case PCI_DEVICE_ID_MELLANOX_SWITCHX2:
                return MLXSW_DEVICE_KIND_SWITCHX2;
+       case PCI_DEVICE_ID_MELLANOX_SPECTRUM:
+               return MLXSW_DEVICE_KIND_SPECTRUM;
        default:
                BUG();
        }
@@ -171,8 +174,8 @@ struct mlxsw_pci {
        struct msix_entry msix_entry;
        struct mlxsw_core *core;
        struct {
-               u16 num_pages;
                struct mlxsw_pci_mem_item *items;
+               unsigned int count;
        } fw_area;
        struct {
                struct mlxsw_pci_mem_item out_mbox;
@@ -431,8 +434,7 @@ static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
 
        mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
        if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
-               if (net_ratelimit())
-                       dev_err(&pdev->dev, "failed to dma map tx frag\n");
+               dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
                return -EIO;
        }
        mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
@@ -497,6 +499,7 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
                              struct mlxsw_pci_queue *q)
 {
        struct mlxsw_pci_queue_elem_info *elem_info;
+       u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
        int i;
        int err;
 
@@ -504,9 +507,9 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
        q->consumer_counter = 0;
 
        /* Set CQ of same number of this RDQ with base
-        * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs.
+        * above SDQ count as the lower ones are assigned to SDQs.
         */
-       mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT);
+       mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
        mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
        for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
                dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
@@ -699,8 +702,8 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
 put_new_skb:
        memset(wqe, 0, q->elem_size);
        err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
-       if (err && net_ratelimit())
-               dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n");
+       if (err)
+               dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
        /* Everything is set up, ring doorbell to pass elem to HW */
        q->producer_counter++;
        mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
@@ -830,7 +833,8 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
 {
        struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
        struct mlxsw_pci *mlxsw_pci = q->pci;
-       unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)];
+       u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
+       unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
        char *eqe;
        u8 cqn;
        bool cq_handle = false;
@@ -866,7 +870,7 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
 
        if (!cq_handle)
                return;
-       for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) {
+       for_each_set_bit(cqn, active_cqns, cq_count) {
                q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
                mlxsw_pci_queue_tasklet_schedule(q);
        }
@@ -1067,10 +1071,8 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
        num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
        eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
 
-       if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) ||
-           (num_rdqs != MLXSW_PCI_RDQS_COUNT) ||
-           (num_cqs != MLXSW_PCI_CQS_COUNT) ||
-           (num_eqs != MLXSW_PCI_EQS_COUNT)) {
+       if (num_sdqs + num_rdqs > num_cqs ||
+           num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
                dev_err(&pdev->dev, "Unsupported number of queues\n");
                return -EINVAL;
        }
@@ -1215,6 +1217,14 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
                        mbox, profile->max_flood_tables);
                mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
                        mbox, profile->max_vid_flood_tables);
+               mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
+                       mbox, profile->max_fid_offset_flood_tables);
+               mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
+                       mbox, profile->fid_offset_flood_table_size);
+               mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
+                       mbox, profile->max_fid_flood_tables);
+               mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
+                       mbox, profile->fid_flood_table_size);
        }
        if (profile->used_flood_mode) {
                mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
@@ -1272,6 +1282,7 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
                                  u16 num_pages)
 {
        struct mlxsw_pci_mem_item *mem_item;
+       int nent = 0;
        int i;
        int err;
 
@@ -1279,7 +1290,7 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
                                           GFP_KERNEL);
        if (!mlxsw_pci->fw_area.items)
                return -ENOMEM;
-       mlxsw_pci->fw_area.num_pages = num_pages;
+       mlxsw_pci->fw_area.count = num_pages;
 
        mlxsw_cmd_mbox_zero(mbox);
        for (i = 0; i < num_pages; i++) {
@@ -1293,13 +1304,22 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
                        err = -ENOMEM;
                        goto err_alloc;
                }
-               mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr);
-               mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */
+               mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
+               mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
+               if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
+                       err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
+                       if (err)
+                               goto err_cmd_map_fa;
+                       nent = 0;
+                       mlxsw_cmd_mbox_zero(mbox);
+               }
        }
 
-       err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages);
-       if (err)
-               goto err_cmd_map_fa;
+       if (nent) {
+               err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
+               if (err)
+                       goto err_cmd_map_fa;
+       }
 
        return 0;
 
@@ -1322,7 +1342,7 @@ static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
 
        mlxsw_cmd_unmap_fa(mlxsw_pci->core);
 
-       for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) {
+       for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
                mem_item = &mlxsw_pci->fw_area.items[i];
 
                pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
@@ -1582,11 +1602,11 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
 
        if (in_mbox)
                memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
-       mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32);
-       mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr);
+       mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
+       mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
 
-       mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32);
-       mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr);
+       mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
+       mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
 
        mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
        mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
index 1ef9664b451255bdc7c2d08a1fabffe91256c9aa..142f33d978c5f940fb3ebb0bd721adcab62d8bb0 100644 (file)
@@ -40,6 +40,7 @@
 #include "item.h"
 
 #define PCI_DEVICE_ID_MELLANOX_SWITCHX2        0xc738
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM        0xcb84
 #define MLXSW_PCI_BAR0_SIZE            (1024 * 1024) /* 1MB */
 #define MLXSW_PCI_PAGE_SIZE            4096
 
@@ -71,9 +72,7 @@
 #define MLXSW_PCI_DOORBELL(offset, type_offset, num)   \
        ((offset) + (type_offset) + (num) * 4)
 
-#define MLXSW_PCI_RDQS_COUNT   24
-#define MLXSW_PCI_SDQS_COUNT   24
-#define MLXSW_PCI_CQS_COUNT    (MLXSW_PCI_RDQS_COUNT + MLXSW_PCI_SDQS_COUNT)
+#define MLXSW_PCI_CQS_MAX      96
 #define MLXSW_PCI_EQS_COUNT    2
 #define MLXSW_PCI_EQ_ASYNC_NUM 0
 #define MLXSW_PCI_EQ_COMP_NUM  1
index 096e1c12175a89481838db432cf9be676d339bb5..4fcba46bbae06c38a23825d97818fae4cf6fc1a9 100644 (file)
@@ -99,57 +99,6 @@ static const struct mlxsw_reg_info mlxsw_reg_spad = {
  */
 MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
 
-/* SMID - Switch Multicast ID
- * --------------------------
- * In multi-chip configuration, each device should maintain mapping between
- * Multicast ID (MID) into a list of local ports. This mapping is used in all
- * the devices other than the ingress device, and is implemented as part of the
- * FDB. The MID record maps from a MID, which is a unique identi- fier of the
- * multicast group within the stacking domain, into a list of local ports into
- * which the packet is replicated.
- */
-#define MLXSW_REG_SMID_ID 0x2007
-#define MLXSW_REG_SMID_LEN 0x420
-
-static const struct mlxsw_reg_info mlxsw_reg_smid = {
-       .id = MLXSW_REG_SMID_ID,
-       .len = MLXSW_REG_SMID_LEN,
-};
-
-/* reg_smid_swid
- * Switch partition ID.
- * Access: Index
- */
-MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8);
-
-/* reg_smid_mid
- * Multicast identifier - global identifier that represents the multicast group
- * across all devices
- * Access: Index
- */
-MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16);
-
-/* reg_smid_port
- * Local port memebership (1 bit per port).
- * Access: RW
- */
-MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1);
-
-/* reg_smid_port_mask
- * Local port mask (1 bit per port).
- * Access: W
- */
-MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1);
-
-static inline void mlxsw_reg_smid_pack(char *payload, u16 mid)
-{
-       MLXSW_REG_ZERO(smid, payload);
-       mlxsw_reg_smid_swid_set(payload, 0);
-       mlxsw_reg_smid_mid_set(payload, mid);
-       mlxsw_reg_smid_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
-       mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
-}
-
 /* SSPR - Switch System Port Record Register
  * -----------------------------------------
  * Configures the system port to local port mapping.
@@ -208,11 +157,359 @@ static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port)
        mlxsw_reg_sspr_system_port_set(payload, local_port);
 }
 
+/* SFDAT - Switch Filtering Database Aging Time
+ * --------------------------------------------
+ * Controls the Switch aging time. Aging time is able to be set per Switch
+ * Partition.
+ */
+#define MLXSW_REG_SFDAT_ID 0x2009
+#define MLXSW_REG_SFDAT_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_sfdat = {
+       .id = MLXSW_REG_SFDAT_ID,
+       .len = MLXSW_REG_SFDAT_LEN,
+};
+
+/* reg_sfdat_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfdat, swid, 0x00, 24, 8);
+
+/* reg_sfdat_age_time
+ * Aging time in seconds
+ * Min - 10 seconds
+ * Max - 1,000,000 seconds
+ * Default is 300 seconds.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdat, age_time, 0x04, 0, 20);
+
+static inline void mlxsw_reg_sfdat_pack(char *payload, u32 age_time)
+{
+       MLXSW_REG_ZERO(sfdat, payload);
+       mlxsw_reg_sfdat_swid_set(payload, 0);
+       mlxsw_reg_sfdat_age_time_set(payload, age_time);
+}
+
+/* SFD - Switch Filtering Database
+ * -------------------------------
+ * The following register defines the access to the filtering database.
+ * The register supports querying, adding, removing and modifying the database.
+ * The access is optimized for bulk updates in which case more than one
+ * FDB record is present in the same command.
+ */
+#define MLXSW_REG_SFD_ID 0x200A
+#define MLXSW_REG_SFD_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_SFD_REC_LEN 0x10 /* record length */
+#define MLXSW_REG_SFD_REC_MAX_COUNT 64
+#define MLXSW_REG_SFD_LEN (MLXSW_REG_SFD_BASE_LEN +    \
+                          MLXSW_REG_SFD_REC_LEN * MLXSW_REG_SFD_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_sfd = {
+       .id = MLXSW_REG_SFD_ID,
+       .len = MLXSW_REG_SFD_LEN,
+};
+
+/* reg_sfd_swid
+ * Switch partition ID for queries. Reserved on Write.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfd, swid, 0x00, 24, 8);
+
+enum mlxsw_reg_sfd_op {
+       /* Dump entire FDB a (process according to record_locator) */
+       MLXSW_REG_SFD_OP_QUERY_DUMP = 0,
+       /* Query records by {MAC, VID/FID} value */
+       MLXSW_REG_SFD_OP_QUERY_QUERY = 1,
+       /* Query and clear activity. Query records by {MAC, VID/FID} value */
+       MLXSW_REG_SFD_OP_QUERY_QUERY_AND_CLEAR_ACTIVITY = 2,
+       /* Test. Response indicates if each of the records could be
+        * added to the FDB.
+        */
+       MLXSW_REG_SFD_OP_WRITE_TEST = 0,
+       /* Add/modify. Aged-out records cannot be added. This command removes
+        * the learning notification of the {MAC, VID/FID}. Response includes
+        * the entries that were added to the FDB.
+        */
+       MLXSW_REG_SFD_OP_WRITE_EDIT = 1,
+       /* Remove record by {MAC, VID/FID}. This command also removes
+        * the learning notification and aged-out notifications
+        * of the {MAC, VID/FID}. The response provides current (pre-removal)
+        * entries as non-aged-out.
+        */
+       MLXSW_REG_SFD_OP_WRITE_REMOVE = 2,
+       /* Remove learned notification by {MAC, VID/FID}. The response provides
+        * the removed learning notification.
+        */
+       MLXSW_REG_SFD_OP_WRITE_REMOVE_NOTIFICATION = 2,
+};
+
+/* reg_sfd_op
+ * Operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sfd, op, 0x04, 30, 2);
+
+/* reg_sfd_record_locator
+ * Used for querying the FDB. Use record_locator=0 to initiate the
+ * query. When a record is returned, a new record_locator is
+ * returned to be used in the subsequent query.
+ * Reserved for database update.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfd, record_locator, 0x04, 0, 30);
+
+/* reg_sfd_num_rec
+ * Request: Number of records to read/add/modify/remove
+ * Response: Number of records read/added/replaced/removed
+ * See above description for more details.
+ * Ranges 0..64
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfd, num_rec, 0x08, 0, 8);
+
+static inline void mlxsw_reg_sfd_pack(char *payload, enum mlxsw_reg_sfd_op op,
+                                     u32 record_locator)
+{
+       MLXSW_REG_ZERO(sfd, payload);
+       mlxsw_reg_sfd_op_set(payload, op);
+       mlxsw_reg_sfd_record_locator_set(payload, record_locator);
+}
+
+/* reg_sfd_rec_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_swid, MLXSW_REG_SFD_BASE_LEN, 24, 8,
+                    MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+enum mlxsw_reg_sfd_rec_type {
+       MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0,
+};
+
+/* reg_sfd_rec_type
+ * FDB record type.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_type, MLXSW_REG_SFD_BASE_LEN, 20, 4,
+                    MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+enum mlxsw_reg_sfd_rec_policy {
+       /* Replacement disabled, aging disabled. */
+       MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY = 0,
+       /* (mlag remote): Replacement enabled, aging disabled,
+        * learning notification enabled on this port.
+        */
+       MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG = 1,
+       /* (ingress device): Replacement enabled, aging enabled. */
+       MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS = 3,
+};
+
+/* reg_sfd_rec_policy
+ * Policy.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_policy, MLXSW_REG_SFD_BASE_LEN, 18, 2,
+                    MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+/* reg_sfd_rec_a
+ * Activity. Set for new static entries. Set for static entries if a frame SMAC
+ * lookup hits on the entry.
+ * To clear the a bit, use "query and clear activity" op.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_a, MLXSW_REG_SFD_BASE_LEN, 16, 1,
+                    MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+/* reg_sfd_rec_mac
+ * MAC address.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF_INDEXED(reg, sfd, rec_mac, MLXSW_REG_SFD_BASE_LEN, 6,
+                      MLXSW_REG_SFD_REC_LEN, 0x02);
+
+enum mlxsw_reg_sfd_rec_action {
+       /* forward */
+       MLXSW_REG_SFD_REC_ACTION_NOP = 0,
+       /* forward and trap, trap_id is FDB_TRAP */
+       MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1,
+       /* trap and do not forward, trap_id is FDB_TRAP */
+       MLXSW_REG_SFD_REC_ACTION_TRAP = 3,
+       MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15,
+};
+
+/* reg_sfd_rec_action
+ * Action to apply on the packet.
+ * Note: Dynamic entries can only be configured with NOP action.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4,
+                    MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+/* reg_sfd_uc_sub_port
+ * LAG sub port.
+ * Must be 0 if multichannel VEPA is not enabled.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
+                    MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_fid_vid
+ * Filtering ID or VLAN ID
+ * For SwitchX and SwitchX-2:
+ * - Dynamic entries (policy 2,3) use FID
+ * - Static entries (policy 0) use VID
+ * - When independent learning is configured, VID=FID
+ * For Spectrum: use FID for both Dynamic and Static entries.
+ * VID should not be used.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+                    MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_system_port
+ * Unique port identifier for the final destination of the packet.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_system_port, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+                    MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
+                                        enum mlxsw_reg_sfd_rec_policy policy,
+                                        const char *mac, u16 vid,
+                                        enum mlxsw_reg_sfd_rec_action action,
+                                        u8 local_port)
+{
+       u8 num_rec = mlxsw_reg_sfd_num_rec_get(payload);
+
+       if (rec_index >= num_rec)
+               mlxsw_reg_sfd_num_rec_set(payload, rec_index + 1);
+       mlxsw_reg_sfd_rec_swid_set(payload, rec_index, 0);
+       mlxsw_reg_sfd_rec_type_set(payload, rec_index,
+                                  MLXSW_REG_SFD_REC_TYPE_UNICAST);
+       mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
+       mlxsw_reg_sfd_rec_mac_memcpy_to(payload, rec_index, mac);
+       mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0);
+       mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, vid);
+       mlxsw_reg_sfd_rec_action_set(payload, rec_index, action);
+       mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
+}
+
+static inline void
+mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index,
+                       char *mac, u16 *p_vid,
+                       u8 *p_local_port)
+{
+       mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
+       *p_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index);
+       *p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index);
+}
+
+/* SFN - Switch FDB Notification Register
+ * -------------------------------------------
+ * The switch provides notifications on newly learned FDB entries and
+ * aged out entries. The notifications can be polled by software.
+ */
+#define MLXSW_REG_SFN_ID 0x200B
+#define MLXSW_REG_SFN_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_SFN_REC_LEN 0x10 /* record length */
+#define MLXSW_REG_SFN_REC_MAX_COUNT 64
+#define MLXSW_REG_SFN_LEN (MLXSW_REG_SFN_BASE_LEN +    \
+                          MLXSW_REG_SFN_REC_LEN * MLXSW_REG_SFN_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_sfn = {
+       .id = MLXSW_REG_SFN_ID,
+       .len = MLXSW_REG_SFN_LEN,
+};
+
+/* reg_sfn_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfn, swid, 0x00, 24, 8);
+
+/* reg_sfn_num_rec
+ * Request: Number of learned notifications and aged-out notification
+ * records requested.
+ * Response: Number of notification records returned (must be smaller
+ * than or equal to the value requested)
+ * Ranges 0..64
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sfn, num_rec, 0x04, 0, 8);
+
+static inline void mlxsw_reg_sfn_pack(char *payload)
+{
+       MLXSW_REG_ZERO(sfn, payload);
+       mlxsw_reg_sfn_swid_set(payload, 0);
+       mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT);
+}
+
+/* reg_sfn_rec_swid
+ * Switch partition ID.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, rec_swid, MLXSW_REG_SFN_BASE_LEN, 24, 8,
+                    MLXSW_REG_SFN_REC_LEN, 0x00, false);
+
+enum mlxsw_reg_sfn_rec_type {
+       /* MAC addresses learned on a regular port. */
+       MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC = 0x5,
+       /* Aged-out MAC address on a regular port */
+       MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7,
+};
+
+/* reg_sfn_rec_type
+ * Notification record type.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, rec_type, MLXSW_REG_SFN_BASE_LEN, 20, 4,
+                    MLXSW_REG_SFN_REC_LEN, 0x00, false);
+
+/* reg_sfn_rec_mac
+ * MAC address.
+ * Access: RO
+ */
+MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6,
+                      MLXSW_REG_SFN_REC_LEN, 0x02);
+
+/* reg_sfd_mac_sub_port
+ * VEPA channel on the local port.
+ * 0 if multichannel VEPA is not enabled.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_sub_port, MLXSW_REG_SFN_BASE_LEN, 16, 8,
+                    MLXSW_REG_SFN_REC_LEN, 0x08, false);
+
+/* reg_sfd_mac_fid
+ * Filtering identifier.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_fid, MLXSW_REG_SFN_BASE_LEN, 0, 16,
+                    MLXSW_REG_SFN_REC_LEN, 0x08, false);
+
+/* reg_sfd_mac_system_port
+ * Unique port identifier for the final destination of the packet.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_system_port, MLXSW_REG_SFN_BASE_LEN, 0, 16,
+                    MLXSW_REG_SFN_REC_LEN, 0x0C, false);
+
+static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index,
+                                           char *mac, u16 *p_vid,
+                                           u8 *p_local_port)
+{
+       mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
+       *p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
+       *p_local_port = mlxsw_reg_sfn_mac_system_port_get(payload, rec_index);
+}
+
 /* SPMS - Switch Port MSTP/RSTP State Register
  * -------------------------------------------
  * Configures the spanning tree state of a physical port.
  */
-#define MLXSW_REG_SPMS_ID 0x200d
+#define MLXSW_REG_SPMS_ID 0x200D
 #define MLXSW_REG_SPMS_LEN 0x404
 
 static const struct mlxsw_reg_info mlxsw_reg_spms = {
@@ -243,20 +540,166 @@ enum mlxsw_reg_spms_state {
  */
 MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
 
-static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid,
-                                      enum mlxsw_reg_spms_state state)
+static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port)
 {
        MLXSW_REG_ZERO(spms, payload);
        mlxsw_reg_spms_local_port_set(payload, local_port);
+}
+
+static inline void mlxsw_reg_spms_vid_pack(char *payload, u16 vid,
+                                          enum mlxsw_reg_spms_state state)
+{
        mlxsw_reg_spms_state_set(payload, vid, state);
 }
 
+/* SPVID - Switch Port VID
+ * -----------------------
+ * The switch port VID configures the default VID for a port.
+ */
+#define MLXSW_REG_SPVID_ID 0x200E
+#define MLXSW_REG_SPVID_LEN 0x08
+
+static const struct mlxsw_reg_info mlxsw_reg_spvid = {
+       .id = MLXSW_REG_SPVID_ID,
+       .len = MLXSW_REG_SPVID_LEN,
+};
+
+/* reg_spvid_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8);
+
+/* reg_spvid_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8);
+
+/* reg_spvid_pvid
+ * Port default VID
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12);
+
+static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
+{
+       MLXSW_REG_ZERO(spvid, payload);
+       mlxsw_reg_spvid_local_port_set(payload, local_port);
+       mlxsw_reg_spvid_pvid_set(payload, pvid);
+}
+
+/* SPVM - Switch Port VLAN Membership
+ * ----------------------------------
+ * The Switch Port VLAN Membership register configures the VLAN membership
+ * of a port in a VLAN denoted by VID. VLAN membership is managed per
+ * virtual port. The register can be used to add and remove VID(s) from a port.
+ */
+#define MLXSW_REG_SPVM_ID 0x200F
+#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
+#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_SPVM_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN +  \
+                   MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_spvm = {
+       .id = MLXSW_REG_SPVM_ID,
+       .len = MLXSW_REG_SPVM_LEN,
+};
+
+/* reg_spvm_pt
+ * Priority tagged. If this bit is set, packets forwarded to the port with
+ * untagged VLAN membership (u bit is set) will be tagged with priority tag
+ * (VID=0)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvm, pt, 0x00, 31, 1);
+
+/* reg_spvm_pte
+ * Priority Tagged Update Enable. On Write operations, if this bit is cleared,
+ * the pt bit will NOT be updated. To update the pt bit, pte must be set.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, spvm, pte, 0x00, 30, 1);
+
+/* reg_spvm_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvm, local_port, 0x00, 16, 8);
+
+/* reg_spvm_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvm, sub_port, 0x00, 8, 8);
+
+/* reg_spvm_num_rec
+ * Number of records to update. Each record contains: i, e, u, vid.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvm, num_rec, 0x00, 0, 8);
+
+/* reg_spvm_rec_i
+ * Ingress membership in VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_i,
+                    MLXSW_REG_SPVM_BASE_LEN, 14, 1,
+                    MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+/* reg_spvm_rec_e
+ * Egress membership in VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_e,
+                    MLXSW_REG_SPVM_BASE_LEN, 13, 1,
+                    MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+/* reg_spvm_rec_u
+ * Untagged - port is an untagged member - egress transmission uses untagged
+ * frames on VID<n>
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_u,
+                    MLXSW_REG_SPVM_BASE_LEN, 12, 1,
+                    MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+/* reg_spvm_rec_vid
+ * Egress membership in VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_vid,
+                    MLXSW_REG_SPVM_BASE_LEN, 0, 12,
+                    MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port,
+                                      u16 vid_begin, u16 vid_end,
+                                      bool is_member, bool untagged)
+{
+       int size = vid_end - vid_begin + 1;
+       int i;
+
+       MLXSW_REG_ZERO(spvm, payload);
+       mlxsw_reg_spvm_local_port_set(payload, local_port);
+       mlxsw_reg_spvm_num_rec_set(payload, size);
+
+       for (i = 0; i < size; i++) {
+               mlxsw_reg_spvm_rec_i_set(payload, i, is_member);
+               mlxsw_reg_spvm_rec_e_set(payload, i, is_member);
+               mlxsw_reg_spvm_rec_u_set(payload, i, untagged);
+               mlxsw_reg_spvm_rec_vid_set(payload, i, vid_begin + i);
+       }
+}
+
 /* SFGC - Switch Flooding Group Configuration
  * ------------------------------------------
  * The following register controls the association of flooding tables and MIDs
  * to packet types used for flooding.
  */
-#define MLXSW_REG_SFGC_ID  0x2011
+#define MLXSW_REG_SFGC_ID 0x2011
 #define MLXSW_REG_SFGC_LEN 0x10
 
 static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
@@ -265,13 +708,15 @@ static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
 };
 
 enum mlxsw_reg_sfgc_type {
-       MLXSW_REG_SFGC_TYPE_BROADCAST = 0,
-       MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST = 1,
-       MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4 = 2,
-       MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6 = 3,
-       MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP = 5,
-       MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL = 6,
-       MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST = 7,
+       MLXSW_REG_SFGC_TYPE_BROADCAST,
+       MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
+       MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
+       MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
+       MLXSW_REG_SFGC_TYPE_RESERVED,
+       MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
+       MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL,
+       MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST,
+       MLXSW_REG_SFGC_TYPE_MAX,
 };
 
 /* reg_sfgc_type
@@ -408,7 +853,7 @@ static inline void mlxsw_reg_sftr_pack(char *payload,
                                       unsigned int flood_table,
                                       unsigned int index,
                                       enum mlxsw_flood_table_type table_type,
-                                      unsigned int range)
+                                      unsigned int range, u8 port, bool set)
 {
        MLXSW_REG_ZERO(sftr, payload);
        mlxsw_reg_sftr_swid_set(payload, 0);
@@ -416,8 +861,8 @@ static inline void mlxsw_reg_sftr_pack(char *payload,
        mlxsw_reg_sftr_index_set(payload, index);
        mlxsw_reg_sftr_table_type_set(payload, table_type);
        mlxsw_reg_sftr_range_set(payload, range);
-       mlxsw_reg_sftr_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
-       mlxsw_reg_sftr_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+       mlxsw_reg_sftr_port_set(payload, port, set);
+       mlxsw_reg_sftr_port_mask_set(payload, port, 1);
 }
 
 /* SPMLR - Switch Port MAC Learning Register
@@ -473,6 +918,285 @@ static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port,
        mlxsw_reg_spmlr_learn_mode_set(payload, mode);
 }
 
+/* SVFA - Switch VID to FID Allocation Register
+ * --------------------------------------------
+ * Controls the VID to FID mapping and {Port, VID} to FID mapping for
+ * virtualized ports.
+ */
+#define MLXSW_REG_SVFA_ID 0x201C
+#define MLXSW_REG_SVFA_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_svfa = {
+       .id = MLXSW_REG_SVFA_ID,
+       .len = MLXSW_REG_SVFA_LEN,
+};
+
+/* reg_svfa_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, svfa, swid, 0x00, 24, 8);
+
+/* reg_svfa_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: Reserved for 802.1Q FIDs.
+ */
+MLXSW_ITEM32(reg, svfa, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_svfa_mt {
+       MLXSW_REG_SVFA_MT_VID_TO_FID,
+       MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+};
+
+/* reg_svfa_mapping_table
+ * Mapping table:
+ * 0 - VID to FID
+ * 1 - {Port, VID} to FID
+ * Access: Index
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, mapping_table, 0x00, 8, 3);
+
+/* reg_svfa_v
+ * Valid.
+ * Valid if set.
+ * Access: RW
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, v, 0x00, 0, 1);
+
+/* reg_svfa_fid
+ * Filtering ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, svfa, fid, 0x04, 16, 16);
+
+/* reg_svfa_vid
+ * VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, svfa, vid, 0x04, 0, 12);
+
+/* reg_svfa_counter_set_type
+ * Counter set type for flow counters.
+ * Access: RW
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8);
+
+/* reg_svfa_counter_index
+ * Counter index for flow counters.
+ * Access: RW
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24);
+
+static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port,
+                                      enum mlxsw_reg_svfa_mt mt, bool valid,
+                                      u16 fid, u16 vid)
+{
+       MLXSW_REG_ZERO(svfa, payload);
+       local_port = mt == MLXSW_REG_SVFA_MT_VID_TO_FID ? 0 : local_port;
+       mlxsw_reg_svfa_swid_set(payload, 0);
+       mlxsw_reg_svfa_local_port_set(payload, local_port);
+       mlxsw_reg_svfa_mapping_table_set(payload, mt);
+       mlxsw_reg_svfa_v_set(payload, valid);
+       mlxsw_reg_svfa_fid_set(payload, fid);
+       mlxsw_reg_svfa_vid_set(payload, vid);
+}
+
+/* SVPE - Switch Virtual-Port Enabling Register
+ * --------------------------------------------
+ * Enables port virtualization.
+ */
+#define MLXSW_REG_SVPE_ID 0x201E
+#define MLXSW_REG_SVPE_LEN 0x4
+
+static const struct mlxsw_reg_info mlxsw_reg_svpe = {
+       .id = MLXSW_REG_SVPE_ID,
+       .len = MLXSW_REG_SVPE_LEN,
+};
+
+/* reg_svpe_local_port
+ * Local port number
+ * Access: Index
+ *
+ * Note: CPU port is not supported (uses VLAN mode only).
+ */
+MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8);
+
+/* reg_svpe_vp_en
+ * Virtual port enable.
+ * 0 - Disable, VLAN mode (VID to FID).
+ * 1 - Enable, Virtual port mode ({Port, VID} to FID).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, svpe, vp_en, 0x00, 8, 1);
+
+static inline void mlxsw_reg_svpe_pack(char *payload, u8 local_port,
+                                      bool enable)
+{
+       MLXSW_REG_ZERO(svpe, payload);
+       mlxsw_reg_svpe_local_port_set(payload, local_port);
+       mlxsw_reg_svpe_vp_en_set(payload, enable);
+}
+
+/* SFMR - Switch FID Management Register
+ * -------------------------------------
+ * Creates and configures FIDs.
+ */
+#define MLXSW_REG_SFMR_ID 0x201F
+#define MLXSW_REG_SFMR_LEN 0x18
+
+static const struct mlxsw_reg_info mlxsw_reg_sfmr = {
+       .id = MLXSW_REG_SFMR_ID,
+       .len = MLXSW_REG_SFMR_LEN,
+};
+
+enum mlxsw_reg_sfmr_op {
+       MLXSW_REG_SFMR_OP_CREATE_FID,
+       MLXSW_REG_SFMR_OP_DESTROY_FID,
+};
+
+/* reg_sfmr_op
+ * Operation.
+ * 0 - Create or edit FID.
+ * 1 - Destroy FID.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, sfmr, op, 0x00, 24, 4);
+
+/* reg_sfmr_fid
+ * Filtering ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfmr, fid, 0x00, 0, 16);
+
+/* reg_sfmr_fid_offset
+ * FID offset.
+ * Used to point into the flooding table selected by SFGC register if
+ * the table is of type FID-Offset. Otherwise, this field is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfmr, fid_offset, 0x08, 0, 16);
+
+/* reg_sfmr_vtfp
+ * Valid Tunnel Flood Pointer.
+ * If not set, then nve_tunnel_flood_ptr is reserved and considered NULL.
+ * Access: RW
+ *
+ * Note: Reserved for 802.1Q FIDs.
+ */
+MLXSW_ITEM32(reg, sfmr, vtfp, 0x0C, 31, 1);
+
+/* reg_sfmr_nve_tunnel_flood_ptr
+ * Underlay Flooding and BC Pointer.
+ * Used as a pointer to the first entry of the group based link lists of
+ * flooding or BC entries (for NVE tunnels).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfmr, nve_tunnel_flood_ptr, 0x0C, 0, 24);
+
+/* reg_sfmr_vv
+ * VNI Valid.
+ * If not set, then vni is reserved.
+ * Access: RW
+ *
+ * Note: Reserved for 802.1Q FIDs.
+ */
+MLXSW_ITEM32(reg, sfmr, vv, 0x10, 31, 1);
+
+/* reg_sfmr_vni
+ * Virtual Network Identifier.
+ * Access: RW
+ *
+ * Note: A given VNI can only be assigned to one FID.
+ */
+MLXSW_ITEM32(reg, sfmr, vni, 0x10, 0, 24);
+
+static inline void mlxsw_reg_sfmr_pack(char *payload,
+                                      enum mlxsw_reg_sfmr_op op, u16 fid,
+                                      u16 fid_offset)
+{
+       MLXSW_REG_ZERO(sfmr, payload);
+       mlxsw_reg_sfmr_op_set(payload, op);
+       mlxsw_reg_sfmr_fid_set(payload, fid);
+       mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset);
+       mlxsw_reg_sfmr_vtfp_set(payload, false);
+       mlxsw_reg_sfmr_vv_set(payload, false);
+}
+
+/* SPVMLR - Switch Port VLAN MAC Learning Register
+ * -----------------------------------------------
+ * Controls the switch MAC learning policy per {Port, VID}.
+ */
+#define MLXSW_REG_SPVMLR_ID 0x2020
+#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
+#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
+                             MLXSW_REG_SPVMLR_REC_LEN * \
+                             MLXSW_REG_SPVMLR_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_spvmlr = {
+       .id = MLXSW_REG_SPVMLR_ID,
+       .len = MLXSW_REG_SPVMLR_LEN,
+};
+
+/* reg_spvmlr_local_port
+ * Local ingress port.
+ * Access: Index
+ *
+ * Note: CPU port is not supported.
+ */
+MLXSW_ITEM32(reg, spvmlr, local_port, 0x00, 16, 8);
+
+/* reg_spvmlr_num_rec
+ * Number of records to update.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvmlr, num_rec, 0x00, 0, 8);
+
+/* reg_spvmlr_rec_learn_enable
+ * 0 - Disable learning for {Port, VID}.
+ * 1 - Enable learning for {Port, VID}.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_learn_enable, MLXSW_REG_SPVMLR_BASE_LEN,
+                    31, 1, MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
+
+/* reg_spvmlr_rec_vid
+ * VLAN ID to be added/removed from port or for querying.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_vid, MLXSW_REG_SPVMLR_BASE_LEN, 0, 12,
+                    MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
+                                        u16 vid_begin, u16 vid_end,
+                                        bool learn_enable)
+{
+       int num_rec = vid_end - vid_begin + 1;
+       int i;
+
+       WARN_ON(num_rec < 1 || num_rec > MLXSW_REG_SPVMLR_REC_MAX_COUNT);
+
+       MLXSW_REG_ZERO(spvmlr, payload);
+       mlxsw_reg_spvmlr_local_port_set(payload, local_port);
+       mlxsw_reg_spvmlr_num_rec_set(payload, num_rec);
+
+       for (i = 0; i < num_rec; i++) {
+               mlxsw_reg_spvmlr_rec_learn_enable_set(payload, i, learn_enable);
+               mlxsw_reg_spvmlr_rec_vid_set(payload, i, vid_begin + i);
+       }
+}
+
 /* PMLP - Ports Module to Local Port Register
  * ------------------------------------------
  * Configures the assignment of modules to local ports.
@@ -1008,12 +1732,88 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
        mlxsw_reg_ppcnt_prio_tc_set(payload, 0);
 }
 
+/* PBMC - Port Buffer Management Control Register
+ * ----------------------------------------------
+ * The PBMC register configures and retrieves the port packet buffer
+ * allocation for different Prios, and the Pause threshold management.
+ */
+#define MLXSW_REG_PBMC_ID 0x500C
+#define MLXSW_REG_PBMC_LEN 0x68
+
+static const struct mlxsw_reg_info mlxsw_reg_pbmc = {
+       .id = MLXSW_REG_PBMC_ID,
+       .len = MLXSW_REG_PBMC_LEN,
+};
+
+/* reg_pbmc_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pbmc, local_port, 0x00, 16, 8);
+
+/* reg_pbmc_xoff_timer_value
+ * When device generates a pause frame, it uses this value as the pause
+ * timer (time for the peer port to pause in quota-512 bit time).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pbmc, xoff_timer_value, 0x04, 16, 16);
+
+/* reg_pbmc_xoff_refresh
+ * The time before a new pause frame should be sent to refresh the pause RW
+ * state. Using the same units as xoff_timer_value above (in quota-512 bit
+ * time).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16);
+
+/* reg_pbmc_buf_lossy
+ * The field indicates if the buffer is lossy.
+ * 0 - Lossless
+ * 1 - Lossy
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_lossy, 0x0C, 25, 1, 0x08, 0x00, false);
+
+/* reg_pbmc_buf_epsb
+ * Eligible for Port Shared buffer.
+ * If epsb is set, packets assigned to buffer are allowed to insert the port
+ * shared buffer.
+ * When buf_lossy is MLXSW_REG_PBMC_LOSSY_LOSSY this field is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_epsb, 0x0C, 24, 1, 0x08, 0x00, false);
+
+/* reg_pbmc_buf_size
+ * The part of the packet buffer array is allocated for the specific buffer.
+ * Units are represented in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false);
+
+static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port,
+                                      u16 xoff_timer_value, u16 xoff_refresh)
+{
+       MLXSW_REG_ZERO(pbmc, payload);
+       mlxsw_reg_pbmc_local_port_set(payload, local_port);
+       mlxsw_reg_pbmc_xoff_timer_value_set(payload, xoff_timer_value);
+       mlxsw_reg_pbmc_xoff_refresh_set(payload, xoff_refresh);
+}
+
+static inline void mlxsw_reg_pbmc_lossy_buffer_pack(char *payload,
+                                                   int buf_index,
+                                                   u16 size)
+{
+       mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 1);
+       mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0);
+       mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
+}
+
 /* PSPA - Port Switch Partition Allocation
  * ---------------------------------------
  * Controls the association of a port with a switch partition and enables
  * configuring ports as stacking ports.
  */
-#define MLXSW_REG_PSPA_ID 0x500d
+#define MLXSW_REG_PSPA_ID 0x500D
 #define MLXSW_REG_PSPA_LEN 0x8
 
 static const struct mlxsw_reg_info mlxsw_reg_pspa = {
@@ -1074,8 +1874,11 @@ MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8);
  */
 MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
 
-#define MLXSW_REG_HTGT_TRAP_GROUP_EMAD 0x0
-#define MLXSW_REG_HTGT_TRAP_GROUP_RX   0x1
+enum mlxsw_reg_htgt_trap_group {
+       MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+       MLXSW_REG_HTGT_TRAP_GROUP_RX,
+       MLXSW_REG_HTGT_TRAP_GROUP_CTRL,
+};
 
 /* reg_htgt_trap_group
  * Trap group number. User defined number specifying which trap groups
@@ -1142,6 +1945,7 @@ MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
 
 #define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD     0x15
 #define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX       0x14
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL     0x13
 
 /* reg_htgt_local_path_rdq
  * Receive descriptor queue (RDQ) to use for the trap group.
@@ -1149,21 +1953,29 @@ MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
  */
 MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6);
 
-static inline void mlxsw_reg_htgt_pack(char *payload, u8 trap_group)
+static inline void mlxsw_reg_htgt_pack(char *payload,
+                                      enum mlxsw_reg_htgt_trap_group group)
 {
        u8 swid, rdq;
 
        MLXSW_REG_ZERO(htgt, payload);
-       if (MLXSW_REG_HTGT_TRAP_GROUP_EMAD == trap_group) {
+       switch (group) {
+       case MLXSW_REG_HTGT_TRAP_GROUP_EMAD:
                swid = MLXSW_PORT_SWID_ALL_SWIDS;
                rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD;
-       } else {
+               break;
+       case MLXSW_REG_HTGT_TRAP_GROUP_RX:
                swid = 0;
                rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX;
+               break;
+       case MLXSW_REG_HTGT_TRAP_GROUP_CTRL:
+               swid = 0;
+               rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL;
+               break;
        }
        mlxsw_reg_htgt_swid_set(payload, swid);
        mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL);
-       mlxsw_reg_htgt_trap_group_set(payload, trap_group);
+       mlxsw_reg_htgt_trap_group_set(payload, group);
        mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE);
        mlxsw_reg_htgt_pid_set(payload, 0);
        mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU);
@@ -1254,17 +2066,290 @@ enum {
  */
 MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2);
 
-static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action,
-                                      u8 trap_group, u16 trap_id)
+static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id)
 {
+       enum mlxsw_reg_htgt_trap_group trap_group;
+
        MLXSW_REG_ZERO(hpkt, payload);
        mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED);
        mlxsw_reg_hpkt_action_set(payload, action);
+       switch (trap_id) {
+       case MLXSW_TRAP_ID_ETHEMAD:
+       case MLXSW_TRAP_ID_PUDE:
+               trap_group = MLXSW_REG_HTGT_TRAP_GROUP_EMAD;
+               break;
+       default:
+               trap_group = MLXSW_REG_HTGT_TRAP_GROUP_RX;
+               break;
+       }
        mlxsw_reg_hpkt_trap_group_set(payload, trap_group);
        mlxsw_reg_hpkt_trap_id_set(payload, trap_id);
        mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
 }
 
+/* SBPR - Shared Buffer Pools Register
+ * -----------------------------------
+ * The SBPR configures and retrieves the shared buffer pools and configuration.
+ */
+#define MLXSW_REG_SBPR_ID 0xB001
+#define MLXSW_REG_SBPR_LEN 0x14
+
+static const struct mlxsw_reg_info mlxsw_reg_sbpr = {
+       .id = MLXSW_REG_SBPR_ID,
+       .len = MLXSW_REG_SBPR_LEN,
+};
+
+enum mlxsw_reg_sbpr_dir {
+       MLXSW_REG_SBPR_DIR_INGRESS,
+       MLXSW_REG_SBPR_DIR_EGRESS,
+};
+
+/* reg_sbpr_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpr, dir, 0x00, 24, 2);
+
+/* reg_sbpr_pool
+ * Pool index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpr, pool, 0x00, 0, 4);
+
+/* reg_sbpr_size
+ * Pool size in buffer cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpr, size, 0x04, 0, 24);
+
+enum mlxsw_reg_sbpr_mode {
+       MLXSW_REG_SBPR_MODE_STATIC,
+       MLXSW_REG_SBPR_MODE_DYNAMIC,
+};
+
+/* reg_sbpr_mode
+ * Pool quota calculation mode.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4);
+
+static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool,
+                                      enum mlxsw_reg_sbpr_dir dir,
+                                      enum mlxsw_reg_sbpr_mode mode, u32 size)
+{
+       MLXSW_REG_ZERO(sbpr, payload);
+       mlxsw_reg_sbpr_pool_set(payload, pool);
+       mlxsw_reg_sbpr_dir_set(payload, dir);
+       mlxsw_reg_sbpr_mode_set(payload, mode);
+       mlxsw_reg_sbpr_size_set(payload, size);
+}
+
+/* SBCM - Shared Buffer Class Management Register
+ * ----------------------------------------------
+ * The SBCM register configures and retrieves the shared buffer allocation
+ * and configuration according to Port-PG, including the binding to pool
+ * and definition of the associated quota.
+ */
+#define MLXSW_REG_SBCM_ID 0xB002
+#define MLXSW_REG_SBCM_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_sbcm = {
+       .id = MLXSW_REG_SBCM_ID,
+       .len = MLXSW_REG_SBCM_LEN,
+};
+
+/* reg_sbcm_local_port
+ * Local port number.
+ * For Ingress: excludes CPU port and Router port
+ * For Egress: excludes IP Router
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8);
+
+/* reg_sbcm_pg_buff
+ * PG buffer - Port PG (dir=ingress) / traffic class (dir=egress)
+ * For PG buffer: range is 0..cap_max_pg_buffers - 1
+ * For traffic class: range is 0..cap_max_tclass - 1
+ * Note that when traffic class is in MC aware mode then the traffic
+ * classes which are MC aware cannot be configured.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6);
+
+enum mlxsw_reg_sbcm_dir {
+       MLXSW_REG_SBCM_DIR_INGRESS,
+       MLXSW_REG_SBCM_DIR_EGRESS,
+};
+
+/* reg_sbcm_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2);
+
+/* reg_sbcm_min_buff
+ * Minimum buffer size for the limiter, in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24);
+
+/* reg_sbcm_max_buff
+ * When the pool associated to the port-pg/tclass is configured to
+ * static, Maximum buffer size for the limiter configured in cells.
+ * When the pool associated to the port-pg/tclass is configured to
+ * dynamic, the max_buff holds the "alpha" parameter, supporting
+ * the following values:
+ * 0: 0
+ * i: (1/128)*2^(i-1), for i=1..14
+ * 0xFF: Infinity
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24);
+
+/* reg_sbcm_pool
+ * Association of the port-priority to a pool.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4);
+
+static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
+                                      enum mlxsw_reg_sbcm_dir dir,
+                                      u32 min_buff, u32 max_buff, u8 pool)
+{
+       MLXSW_REG_ZERO(sbcm, payload);
+       mlxsw_reg_sbcm_local_port_set(payload, local_port);
+       mlxsw_reg_sbcm_pg_buff_set(payload, pg_buff);
+       mlxsw_reg_sbcm_dir_set(payload, dir);
+       mlxsw_reg_sbcm_min_buff_set(payload, min_buff);
+       mlxsw_reg_sbcm_max_buff_set(payload, max_buff);
+       mlxsw_reg_sbcm_pool_set(payload, pool);
+}
+
+/* SBPM - Shared Buffer Class Management Register
+ * ----------------------------------------------
+ * The SBPM register configures and retrieves the shared buffer allocation
+ * and configuration according to Port-Pool, including the definition
+ * of the associated quota.
+ */
+#define MLXSW_REG_SBPM_ID 0xB003
+#define MLXSW_REG_SBPM_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_sbpm = {
+       .id = MLXSW_REG_SBPM_ID,
+       .len = MLXSW_REG_SBPM_LEN,
+};
+
+/* reg_sbpm_local_port
+ * Local port number.
+ * For Ingress: excludes CPU port and Router port
+ * For Egress: excludes IP Router
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8);
+
+/* reg_sbpm_pool
+ * The pool associated to quota counting on the local_port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4);
+
+enum mlxsw_reg_sbpm_dir {
+       MLXSW_REG_SBPM_DIR_INGRESS,
+       MLXSW_REG_SBPM_DIR_EGRESS,
+};
+
+/* reg_sbpm_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2);
+
+/* reg_sbpm_min_buff
+ * Minimum buffer size for the limiter, in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24);
+
+/* reg_sbpm_max_buff
+ * When the pool associated to the port-pg/tclass is configured to
+ * static, Maximum buffer size for the limiter configured in cells.
+ * When the pool associated to the port-pg/tclass is configured to
+ * dynamic, the max_buff holds the "alpha" parameter, supporting
+ * the following values:
+ * 0: 0
+ * i: (1/128)*2^(i-1), for i=1..14
+ * 0xFF: Infinity
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24);
+
+static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool,
+                                      enum mlxsw_reg_sbpm_dir dir,
+                                      u32 min_buff, u32 max_buff)
+{
+       MLXSW_REG_ZERO(sbpm, payload);
+       mlxsw_reg_sbpm_local_port_set(payload, local_port);
+       mlxsw_reg_sbpm_pool_set(payload, pool);
+       mlxsw_reg_sbpm_dir_set(payload, dir);
+       mlxsw_reg_sbpm_min_buff_set(payload, min_buff);
+       mlxsw_reg_sbpm_max_buff_set(payload, max_buff);
+}
+
+/* SBMM - Shared Buffer Multicast Management Register
+ * --------------------------------------------------
+ * The SBMM register configures and retrieves the shared buffer allocation
+ * and configuration for MC packets according to Switch-Priority, including
+ * the binding to pool and definition of the associated quota.
+ */
+#define MLXSW_REG_SBMM_ID 0xB004
+#define MLXSW_REG_SBMM_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_sbmm = {
+       .id = MLXSW_REG_SBMM_ID,
+       .len = MLXSW_REG_SBMM_LEN,
+};
+
+/* reg_sbmm_prio
+ * Switch Priority.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbmm, prio, 0x00, 8, 4);
+
+/* reg_sbmm_min_buff
+ * Minimum buffer size for the limiter, in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbmm, min_buff, 0x18, 0, 24);
+
+/* reg_sbmm_max_buff
+ * When the pool associated to the port-pg/tclass is configured to
+ * static, Maximum buffer size for the limiter configured in cells.
+ * When the pool associated to the port-pg/tclass is configured to
+ * dynamic, the max_buff holds the "alpha" parameter, supporting
+ * the following values:
+ * 0: 0
+ * i: (1/128)*2^(i-1), for i=1..14
+ * 0xFF: Infinity
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbmm, max_buff, 0x1C, 0, 24);
+
+/* reg_sbmm_pool
+ * Association of the port-priority to a pool.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbmm, pool, 0x24, 0, 4);
+
+static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff,
+                                      u32 max_buff, u8 pool)
+{
+       MLXSW_REG_ZERO(sbmm, payload);
+       mlxsw_reg_sbmm_prio_set(payload, prio);
+       mlxsw_reg_sbmm_min_buff_set(payload, min_buff);
+       mlxsw_reg_sbmm_max_buff_set(payload, max_buff);
+       mlxsw_reg_sbmm_pool_set(payload, pool);
+}
+
 static inline const char *mlxsw_reg_id_str(u16 reg_id)
 {
        switch (reg_id) {
@@ -1272,18 +2357,34 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
                return "SGCR";
        case MLXSW_REG_SPAD_ID:
                return "SPAD";
-       case MLXSW_REG_SMID_ID:
-               return "SMID";
        case MLXSW_REG_SSPR_ID:
                return "SSPR";
+       case MLXSW_REG_SFDAT_ID:
+               return "SFDAT";
+       case MLXSW_REG_SFD_ID:
+               return "SFD";
+       case MLXSW_REG_SFN_ID:
+               return "SFN";
        case MLXSW_REG_SPMS_ID:
                return "SPMS";
+       case MLXSW_REG_SPVID_ID:
+               return "SPVID";
+       case MLXSW_REG_SPVM_ID:
+               return "SPVM";
        case MLXSW_REG_SFGC_ID:
                return "SFGC";
        case MLXSW_REG_SFTR_ID:
                return "SFTR";
        case MLXSW_REG_SPMLR_ID:
                return "SPMLR";
+       case MLXSW_REG_SVFA_ID:
+               return "SVFA";
+       case MLXSW_REG_SVPE_ID:
+               return "SVPE";
+       case MLXSW_REG_SFMR_ID:
+               return "SFMR";
+       case MLXSW_REG_SPVMLR_ID:
+               return "SPVMLR";
        case MLXSW_REG_PMLP_ID:
                return "PMLP";
        case MLXSW_REG_PMTU_ID:
@@ -1296,12 +2397,22 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
                return "PAOS";
        case MLXSW_REG_PPCNT_ID:
                return "PPCNT";
+       case MLXSW_REG_PBMC_ID:
+               return "PBMC";
        case MLXSW_REG_PSPA_ID:
                return "PSPA";
        case MLXSW_REG_HTGT_ID:
                return "HTGT";
        case MLXSW_REG_HPKT_ID:
                return "HPKT";
+       case MLXSW_REG_SBPR_ID:
+               return "SBPR";
+       case MLXSW_REG_SBCM_ID:
+               return "SBCM";
+       case MLXSW_REG_SBPM_ID:
+               return "SBPM";
+       case MLXSW_REG_SBMM_ID:
+               return "SBMM";
        default:
                return "*UNKNOWN*";
        }
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
new file mode 100644 (file)
index 0000000..6e9906d
--- /dev/null
@@ -0,0 +1,1948 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/bitops.h>
+#include <net/switchdev.h>
+#include <generated/utsrelease.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+#include "port.h"
+#include "trap.h"
+#include "txheader.h"
+
+static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
+static const char mlxsw_sp_driver_version[] = "1.0";
+
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_rx_is_router
+ * Packet is sent from the router. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
+
+/* tx_hdr_fid_valid
+ * Indicates if the 'fid' field is valid and should be used for
+ * forwarding lookup. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
+
+/* tx_hdr_swid
+ * Switch partition ID. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_control_tclass
+ * Indicates if the packet should use the control TClass and not one
+ * of the data TClasses.
+ */
+MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
+
+/* tx_hdr_etclass
+ * Egress TClass to be used on the egress device on the egress port.
+ */
+MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_fid
+ * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
+ * set, otherwise calculated based on the packet's VID using VID to FID mapping.
+ * Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
+static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+                                    const struct mlxsw_tx_info *tx_info)
+{
+       char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+
+       memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+       mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+       mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+       mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+       mlxsw_tx_hdr_swid_set(txhdr, 0);
+       mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
+       mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
+       mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+}
+
+static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
+{
+       char spad_pl[MLXSW_REG_SPAD_LEN];
+       int err;
+
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
+       if (err)
+               return err;
+       mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
+       return 0;
+}
+
+static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                         bool is_up)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char paos_pl[MLXSW_REG_PAOS_LEN];
+
+       mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
+                           is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
+                           MLXSW_PORT_ADMIN_STATUS_DOWN);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
+}
+
+static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
+                                        bool *p_is_up)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char paos_pl[MLXSW_REG_PAOS_LEN];
+       u8 oper_status;
+       int err;
+
+       mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
+       if (err)
+               return err;
+       oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
+       *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
+       return 0;
+}
+
+static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
+{
+       char sfmr_pl[MLXSW_REG_SFMR_LEN];
+       int err;
+
+       mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
+                           MLXSW_SP_VFID_BASE + vfid, 0);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+
+       if (err)
+               return err;
+
+       set_bit(vfid, mlxsw_sp->active_vfids);
+       return 0;
+}
+
+static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
+{
+       char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+       clear_bit(vfid, mlxsw_sp->active_vfids);
+
+       mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
+                           MLXSW_SP_VFID_BASE + vfid, 0);
+       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                     unsigned char *addr)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char ppad_pl[MLXSW_REG_PPAD_LEN];
+
+       mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
+       mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
+}
+
+static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
+
+       ether_addr_copy(addr, mlxsw_sp->base_mac);
+       addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
+       return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
+}
+
+static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                      u16 vid, enum mlxsw_reg_spms_state state)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char *spms_pl;
+       int err;
+
+       spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+       if (!spms_pl)
+               return -ENOMEM;
+       mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+       mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
+       kfree(spms_pl);
+       return err;
+}
+
+static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char pmtu_pl[MLXSW_REG_PMTU_LEN];
+       int max_mtu;
+       int err;
+
+       mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+       mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
+       if (err)
+               return err;
+       max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+
+       if (mtu > max_mtu)
+               return -EINVAL;
+
+       mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
+}
+
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char pspa_pl[MLXSW_REG_PSPA_LEN];
+
+       mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
+}
+
+static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                    bool enable)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char svpe_pl[MLXSW_REG_SVPE_LEN];
+
+       mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
+}
+
+int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
+                                u16 vid)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+       mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
+                           fid, vid);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                         u16 vid, bool learn_enable)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char *spvmlr_pl;
+       int err;
+
+       spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
+       if (!spvmlr_pl)
+               return -ENOMEM;
+       mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
+                             learn_enable);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
+       kfree(spvmlr_pl);
+       return err;
+}
+
+static int
+mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char sspr_pl[MLXSW_REG_SSPR_LEN];
+
+       mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
+}
+
+static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port,
+                                     bool *p_usable)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char pmlp_pl[MLXSW_REG_PMLP_LEN];
+       int err;
+
+       mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+       if (err)
+               return err;
+       *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
+       return 0;
+}
+
+static int mlxsw_sp_port_open(struct net_device *dev)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       int err;
+
+       err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+       if (err)
+               return err;
+       netif_start_queue(dev);
+       return 0;
+}
+
+static int mlxsw_sp_port_stop(struct net_device *dev)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+       netif_stop_queue(dev);
+       return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+}
+
+static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
+                                     struct net_device *dev)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+       const struct mlxsw_tx_info tx_info = {
+               .local_port = mlxsw_sp_port->local_port,
+               .is_emad = false,
+       };
+       u64 len;
+       int err;
+
+       if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
+               return NETDEV_TX_BUSY;
+
+       if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
+               struct sk_buff *skb_orig = skb;
+
+               skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
+               if (!skb) {
+                       this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+                       dev_kfree_skb_any(skb_orig);
+                       return NETDEV_TX_OK;
+               }
+       }
+
+       if (eth_skb_pad(skb)) {
+               this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+               return NETDEV_TX_OK;
+       }
+
+       mlxsw_sp_txhdr_construct(skb, &tx_info);
+       len = skb->len;
+       /* Due to a race we might fail here because of a full queue. In that
+        * unlikely case we simply drop the packet.
+        */
+       err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
+
+       if (!err) {
+               pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
+               u64_stats_update_begin(&pcpu_stats->syncp);
+               pcpu_stats->tx_packets++;
+               pcpu_stats->tx_bytes += len;
+               u64_stats_update_end(&pcpu_stats->syncp);
+       } else {
+               this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+               dev_kfree_skb_any(skb);
+       }
+       return NETDEV_TX_OK;
+}
+
+static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct sockaddr *addr = p;
+       int err;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
+       if (err)
+               return err;
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+       return 0;
+}
+
+static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       int err;
+
+       err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
+       if (err)
+               return err;
+       dev->mtu = mtu;
+       return 0;
+}
+
+static struct rtnl_link_stats64 *
+mlxsw_sp_port_get_stats64(struct net_device *dev,
+                         struct rtnl_link_stats64 *stats)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct mlxsw_sp_port_pcpu_stats *p;
+       u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+       u32 tx_dropped = 0;
+       unsigned int start;
+       int i;
+
+       for_each_possible_cpu(i) {
+               p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
+               do {
+                       start = u64_stats_fetch_begin_irq(&p->syncp);
+                       rx_packets      = p->rx_packets;
+                       rx_bytes        = p->rx_bytes;
+                       tx_packets      = p->tx_packets;
+                       tx_bytes        = p->tx_bytes;
+               } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+               stats->rx_packets       += rx_packets;
+               stats->rx_bytes         += rx_bytes;
+               stats->tx_packets       += tx_packets;
+               stats->tx_bytes         += tx_bytes;
+               /* tx_dropped is u32, updated without syncp protection. */
+               tx_dropped      += p->tx_dropped;
+       }
+       stats->tx_dropped       = tx_dropped;
+       return stats;
+}
+
+int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
+                          u16 vid_end, bool is_member, bool untagged)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char *spvm_pl;
+       int err;
+
+       spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
+       if (!spvm_pl)
+               return -ENOMEM;
+
+       mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
+                           vid_end, is_member, untagged);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
+       kfree(spvm_pl);
+       return err;
+}
+
+static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+       u16 vid, last_visited_vid;
+       int err;
+
+       for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+               err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
+                                                  vid);
+               if (err) {
+                       last_visited_vid = vid;
+                       goto err_port_vid_to_fid_set;
+               }
+       }
+
+       err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
+       if (err) {
+               last_visited_vid = VLAN_N_VID;
+               goto err_port_vid_to_fid_set;
+       }
+
+       return 0;
+
+err_port_vid_to_fid_set:
+       for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
+               mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
+                                            vid);
+       return err;
+}
+
+static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+       u16 vid;
+       int err;
+
+       err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+       if (err)
+               return err;
+
+       for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+               err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
+                                                  vid, vid);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
+                         u16 vid)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char *sftr_pl;
+       int err;
+
+       /* VLAN 0 is added to HW filter when device goes up, but it is
+        * reserved in our case, so simply return.
+        */
+       if (!vid)
+               return 0;
+
+       if (test_bit(vid, mlxsw_sp_port->active_vfids)) {
+               netdev_warn(dev, "VID=%d already configured\n", vid);
+               return 0;
+       }
+
+       if (!test_bit(vid, mlxsw_sp->active_vfids)) {
+               err = mlxsw_sp_vfid_create(mlxsw_sp, vid);
+               if (err) {
+                       netdev_err(dev, "Failed to create vFID=%d\n",
+                                  MLXSW_SP_VFID_BASE + vid);
+                       return err;
+               }
+
+               sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+               if (!sftr_pl) {
+                       err = -ENOMEM;
+                       goto err_flood_table_alloc;
+               }
+               mlxsw_reg_sftr_pack(sftr_pl, 0, vid,
+                                   MLXSW_REG_SFGC_TABLE_TYPE_FID, 0,
+                                   MLXSW_PORT_CPU_PORT, true);
+               err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+               kfree(sftr_pl);
+               if (err) {
+                       netdev_err(dev, "Failed to configure flood table\n");
+                       goto err_flood_table_config;
+               }
+       }
+
+       /* In case we fail in the following steps, we intentionally do not
+        * destroy the associated vFID.
+        */
+
+       /* When adding the first VLAN interface on a bridged port we need to
+        * transition all the active 802.1Q bridge VLANs to use explicit
+        * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
+        */
+       if (!mlxsw_sp_port->nr_vfids) {
+               err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
+               if (err) {
+                       netdev_err(dev, "Failed to set to Virtual mode\n");
+                       return err;
+               }
+       }
+
+       err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
+                                          MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+                                          true, MLXSW_SP_VFID_BASE + vid, vid);
+       if (err) {
+               netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
+                          vid, MLXSW_SP_VFID_BASE + vid);
+               goto err_port_vid_to_fid_set;
+       }
+
+       err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
+       if (err) {
+               netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
+               goto err_port_vid_learning_set;
+       }
+
+       err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false);
+       if (err) {
+               netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
+                          vid);
+               goto err_port_add_vid;
+       }
+
+       err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
+                                         MLXSW_REG_SPMS_STATE_FORWARDING);
+       if (err) {
+               netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
+               goto err_port_stp_state_set;
+       }
+
+       mlxsw_sp_port->nr_vfids++;
+       set_bit(vid, mlxsw_sp_port->active_vfids);
+
+       return 0;
+
+err_flood_table_config:
+err_flood_table_alloc:
+       mlxsw_sp_vfid_destroy(mlxsw_sp, vid);
+       return err;
+
+err_port_stp_state_set:
+       mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+err_port_add_vid:
+       mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+err_port_vid_learning_set:
+       mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
+                                    MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
+                                    MLXSW_SP_VFID_BASE + vid, vid);
+err_port_vid_to_fid_set:
+       mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+       return err;
+}
+
+int mlxsw_sp_port_kill_vid(struct net_device *dev,
+                          __be16 __always_unused proto, u16 vid)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       int err;
+
+       /* VLAN 0 is removed from HW filter when device goes down, but
+        * it is reserved in our case, so simply return.
+        */
+       if (!vid)
+               return 0;
+
+       if (!test_bit(vid, mlxsw_sp_port->active_vfids)) {
+               netdev_warn(dev, "VID=%d does not exist\n", vid);
+               return 0;
+       }
+
+       err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
+                                         MLXSW_REG_SPMS_STATE_DISCARDING);
+       if (err) {
+               netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
+               return err;
+       }
+
+       err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+       if (err) {
+               netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
+                          vid);
+               return err;
+       }
+
+       err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+       if (err) {
+               netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
+               return err;
+       }
+
+       err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
+                                          MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+                                          false, MLXSW_SP_VFID_BASE + vid,
+                                          vid);
+       if (err) {
+               netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
+                          vid, MLXSW_SP_VFID_BASE + vid);
+               return err;
+       }
+
+       /* When removing the last VLAN interface on a bridged port we need to
+        * transition all active 802.1Q bridge VLANs to use VID to FID
+        * mappings and set port's mode to VLAN mode.
+        */
+       if (mlxsw_sp_port->nr_vfids == 1) {
+               err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+               if (err) {
+                       netdev_err(dev, "Failed to set to VLAN mode\n");
+                       return err;
+               }
+       }
+
+       mlxsw_sp_port->nr_vfids--;
+       clear_bit(vid, mlxsw_sp_port->active_vfids);
+
+       return 0;
+}
+
+static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
+       .ndo_open               = mlxsw_sp_port_open,
+       .ndo_stop               = mlxsw_sp_port_stop,
+       .ndo_start_xmit         = mlxsw_sp_port_xmit,
+       .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
+       .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
+       .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
+       .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
+       .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
+       .ndo_fdb_add            = switchdev_port_fdb_add,
+       .ndo_fdb_del            = switchdev_port_fdb_del,
+       .ndo_fdb_dump           = switchdev_port_fdb_dump,
+       .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
+       .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
+       .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
+};
+
+static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
+                                     struct ethtool_drvinfo *drvinfo)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+       strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, mlxsw_sp_driver_version,
+               sizeof(drvinfo->version));
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+                "%d.%d.%d",
+                mlxsw_sp->bus_info->fw_rev.major,
+                mlxsw_sp->bus_info->fw_rev.minor,
+                mlxsw_sp->bus_info->fw_rev.subminor);
+       strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
+               sizeof(drvinfo->bus_info));
+}
+
+struct mlxsw_sp_port_hw_stats {
+       char str[ETH_GSTRING_LEN];
+       u64 (*getter)(char *payload);
+};
+
+static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
+       {
+               .str = "a_frames_transmitted_ok",
+               .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
+       },
+       {
+               .str = "a_frames_received_ok",
+               .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
+       },
+       {
+               .str = "a_frame_check_sequence_errors",
+               .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
+       },
+       {
+               .str = "a_alignment_errors",
+               .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
+       },
+       {
+               .str = "a_octets_transmitted_ok",
+               .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
+       },
+       {
+               .str = "a_octets_received_ok",
+               .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
+       },
+       {
+               .str = "a_multicast_frames_xmitted_ok",
+               .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
+       },
+       {
+               .str = "a_broadcast_frames_xmitted_ok",
+               .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
+       },
+       {
+               .str = "a_multicast_frames_received_ok",
+               .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
+       },
+       {
+               .str = "a_broadcast_frames_received_ok",
+               .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
+       },
+       {
+               .str = "a_in_range_length_errors",
+               .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
+       },
+       {
+               .str = "a_out_of_range_length_field",
+               .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
+       },
+       {
+               .str = "a_frame_too_long_errors",
+               .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
+       },
+       {
+               .str = "a_symbol_error_during_carrier",
+               .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
+       },
+       {
+               .str = "a_mac_control_frames_transmitted",
+               .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
+       },
+       {
+               .str = "a_mac_control_frames_received",
+               .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
+       },
+       {
+               .str = "a_unsupported_opcodes_received",
+               .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
+       },
+       {
+               .str = "a_pause_mac_ctrl_frames_received",
+               .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
+       },
+       {
+               .str = "a_pause_mac_ctrl_frames_xmitted",
+               .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
+       },
+};
+
+#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
+
+static void mlxsw_sp_port_get_strings(struct net_device *dev,
+                                     u32 stringset, u8 *data)
+{
+       u8 *p = data;
+       int i;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
+                       memcpy(p, mlxsw_sp_port_hw_stats[i].str,
+                              ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
+               break;
+       }
+}
+
+static void mlxsw_sp_port_get_stats(struct net_device *dev,
+                                   struct ethtool_stats *stats, u64 *data)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+       int i;
+       int err;
+
+       mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+       for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
+               data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
+}
+
+static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return MLXSW_SP_PORT_HW_STATS_LEN;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+struct mlxsw_sp_port_link_mode {
+       u32 mask;
+       u32 supported;
+       u32 advertised;
+       u32 speed;
+};
+
+static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+               .supported      = SUPPORTED_100baseT_Full,
+               .advertised     = ADVERTISED_100baseT_Full,
+               .speed          = 100,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
+               .speed          = 100,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
+                                 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
+               .supported      = SUPPORTED_1000baseKX_Full,
+               .advertised     = ADVERTISED_1000baseKX_Full,
+               .speed          = 1000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
+               .supported      = SUPPORTED_10000baseT_Full,
+               .advertised     = ADVERTISED_10000baseT_Full,
+               .speed          = 10000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
+               .supported      = SUPPORTED_10000baseKX4_Full,
+               .advertised     = ADVERTISED_10000baseKX4_Full,
+               .speed          = 10000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
+               .supported      = SUPPORTED_10000baseKR_Full,
+               .advertised     = ADVERTISED_10000baseKR_Full,
+               .speed          = 10000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
+               .supported      = SUPPORTED_20000baseKR2_Full,
+               .advertised     = ADVERTISED_20000baseKR2_Full,
+               .speed          = 20000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
+               .supported      = SUPPORTED_40000baseCR4_Full,
+               .advertised     = ADVERTISED_40000baseCR4_Full,
+               .speed          = 40000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
+               .supported      = SUPPORTED_40000baseKR4_Full,
+               .advertised     = ADVERTISED_40000baseKR4_Full,
+               .speed          = 40000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
+               .supported      = SUPPORTED_40000baseSR4_Full,
+               .advertised     = ADVERTISED_40000baseSR4_Full,
+               .speed          = 40000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
+               .supported      = SUPPORTED_40000baseLR4_Full,
+               .advertised     = ADVERTISED_40000baseLR4_Full,
+               .speed          = 40000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
+                                 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
+                                 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+               .speed          = 25000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+               .speed          = 50000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+               .supported      = SUPPORTED_56000baseKR4_Full,
+               .advertised     = ADVERTISED_56000baseKR4_Full,
+               .speed          = 56000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+               .speed          = 100000,
+       },
+};
+
+#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
+
+static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
+{
+       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+                             MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+               return SUPPORTED_FIBRE;
+
+       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+                             MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
+               return SUPPORTED_Backplane;
+       return 0;
+}
+
+static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
+{
+       u32 modes = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+               if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
+                       modes |= mlxsw_sp_port_link_mode[i].supported;
+       }
+       return modes;
+}
+
+static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
+{
+       u32 modes = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+               if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
+                       modes |= mlxsw_sp_port_link_mode[i].advertised;
+       }
+       return modes;
+}
+
+static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
+                                           struct ethtool_cmd *cmd)
+{
+       u32 speed = SPEED_UNKNOWN;
+       u8 duplex = DUPLEX_UNKNOWN;
+       int i;
+
+       if (!carrier_ok)
+               goto out;
+
+       for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+               if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
+                       speed = mlxsw_sp_port_link_mode[i].speed;
+                       duplex = DUPLEX_FULL;
+                       break;
+               }
+       }
+out:
+       ethtool_cmd_speed_set(cmd, speed);
+       cmd->duplex = duplex;
+}
+
+static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
+{
+       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+               return PORT_FIBRE;
+
+       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
+               return PORT_DA;
+
+       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+                             MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
+               return PORT_NONE;
+
+       return PORT_OTHER;
+}
+
+static int mlxsw_sp_port_get_settings(struct net_device *dev,
+                                     struct ethtool_cmd *cmd)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char ptys_pl[MLXSW_REG_PTYS_LEN];
+       u32 eth_proto_cap;
+       u32 eth_proto_admin;
+       u32 eth_proto_oper;
+       int err;
+
+       mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+       if (err) {
+               netdev_err(dev, "Failed to get proto");
+               return err;
+       }
+       mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
+                             &eth_proto_admin, &eth_proto_oper);
+
+       cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
+                        mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
+                        SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+       cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
+       mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
+                                       eth_proto_oper, cmd);
+
+       eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+       cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
+       cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
+
+       cmd->transceiver = XCVR_INTERNAL;
+       return 0;
+}
+
+static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
+{
+       u32 ptys_proto = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+               if (advertising & mlxsw_sp_port_link_mode[i].advertised)
+                       ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
+       }
+       return ptys_proto;
+}
+
+static u32 mlxsw_sp_to_ptys_speed(u32 speed)
+{
+       u32 ptys_proto = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+               if (speed == mlxsw_sp_port_link_mode[i].speed)
+                       ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
+       }
+       return ptys_proto;
+}
+
+static int mlxsw_sp_port_set_settings(struct net_device *dev,
+                                     struct ethtool_cmd *cmd)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char ptys_pl[MLXSW_REG_PTYS_LEN];
+       u32 speed;
+       u32 eth_proto_new;
+       u32 eth_proto_cap;
+       u32 eth_proto_admin;
+       bool is_up;
+       int err;
+
+       speed = ethtool_cmd_speed(cmd);
+
+       eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
+               mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
+               mlxsw_sp_to_ptys_speed(speed);
+
+       mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+       if (err) {
+               netdev_err(dev, "Failed to get proto");
+               return err;
+       }
+       mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+
+       eth_proto_new = eth_proto_new & eth_proto_cap;
+       if (!eth_proto_new) {
+               netdev_err(dev, "Not supported proto admin requested");
+               return -EINVAL;
+       }
+       if (eth_proto_new == eth_proto_admin)
+               return 0;
+
+       mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+       if (err) {
+               netdev_err(dev, "Failed to set proto admin");
+               return err;
+       }
+
+       err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
+       if (err) {
+               netdev_err(dev, "Failed to get oper status");
+               return err;
+       }
+       if (!is_up)
+               return 0;
+
+       err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+       if (err) {
+               netdev_err(dev, "Failed to set admin status");
+               return err;
+       }
+
+       err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+       if (err) {
+               netdev_err(dev, "Failed to set admin status");
+               return err;
+       }
+
+       return 0;
+}
+
+static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
+       .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
+       .get_link               = ethtool_op_get_link,
+       .get_strings            = mlxsw_sp_port_get_strings,
+       .get_ethtool_stats      = mlxsw_sp_port_get_stats,
+       .get_sset_count         = mlxsw_sp_port_get_sset_count,
+       .get_settings           = mlxsw_sp_port_get_settings,
+       .set_settings           = mlxsw_sp_port_set_settings,
+};
+
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port;
+       struct net_device *dev;
+       bool usable;
+       int err;
+
+       dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
+       if (!dev)
+               return -ENOMEM;
+       mlxsw_sp_port = netdev_priv(dev);
+       mlxsw_sp_port->dev = dev;
+       mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
+       mlxsw_sp_port->local_port = local_port;
+       mlxsw_sp_port->learning = 1;
+       mlxsw_sp_port->learning_sync = 1;
+       mlxsw_sp_port->pvid = 1;
+
+       mlxsw_sp_port->pcpu_stats =
+               netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
+       if (!mlxsw_sp_port->pcpu_stats) {
+               err = -ENOMEM;
+               goto err_alloc_stats;
+       }
+
+       dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
+       dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
+
+       err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
+                       mlxsw_sp_port->local_port);
+               goto err_dev_addr_init;
+       }
+
+       netif_carrier_off(dev);
+
+       dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
+                        NETIF_F_HW_VLAN_CTAG_FILTER;
+
+       /* Each packet needs to have a Tx header (metadata) on top all other
+        * headers.
+        */
+       dev->hard_header_len += MLXSW_TXHDR_LEN;
+
+       err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n",
+                       mlxsw_sp_port->local_port);
+               goto err_port_module_check;
+       }
+
+       if (!usable) {
+               dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
+                       mlxsw_sp_port->local_port);
+               goto port_not_usable;
+       }
+
+       err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
+                       mlxsw_sp_port->local_port);
+               goto err_port_system_port_mapping_set;
+       }
+
+       err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
+                       mlxsw_sp_port->local_port);
+               goto err_port_swid_set;
+       }
+
+       err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
+                       mlxsw_sp_port->local_port);
+               goto err_port_mtu_set;
+       }
+
+       err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+       if (err)
+               goto err_port_admin_status_set;
+
+       err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
+                       mlxsw_sp_port->local_port);
+               goto err_port_buffers_init;
+       }
+
+       mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
+       err = register_netdev(dev);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
+                       mlxsw_sp_port->local_port);
+               goto err_register_netdev;
+       }
+
+       err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
+       if (err)
+               goto err_port_vlan_init;
+
+       mlxsw_sp->ports[local_port] = mlxsw_sp_port;
+       return 0;
+
+err_port_vlan_init:
+       unregister_netdev(dev);
+err_register_netdev:
+err_port_buffers_init:
+err_port_admin_status_set:
+err_port_mtu_set:
+err_port_swid_set:
+err_port_system_port_mapping_set:
+port_not_usable:
+err_port_module_check:
+err_dev_addr_init:
+       free_percpu(mlxsw_sp_port->pcpu_stats);
+err_alloc_stats:
+       free_netdev(dev);
+       return err;
+}
+
+static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       u16 vfid;
+
+       for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID)
+               mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
+}
+
+static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+       if (!mlxsw_sp_port)
+               return;
+       mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
+       unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+       mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
+       free_percpu(mlxsw_sp_port->pcpu_stats);
+       free_netdev(mlxsw_sp_port->dev);
+}
+
+static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
+{
+       int i;
+
+       for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+               mlxsw_sp_port_remove(mlxsw_sp, i);
+       kfree(mlxsw_sp->ports);
+}
+
+static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
+{
+       size_t alloc_size;
+       int i;
+       int err;
+
+       alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
+       mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
+       if (!mlxsw_sp->ports)
+               return -ENOMEM;
+
+       for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+               err = mlxsw_sp_port_create(mlxsw_sp, i);
+               if (err)
+                       goto err_port_create;
+       }
+       return 0;
+
+err_port_create:
+       for (i--; i >= 1; i--)
+               mlxsw_sp_port_remove(mlxsw_sp, i);
+       kfree(mlxsw_sp->ports);
+       return err;
+}
+
+static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
+                                    char *pude_pl, void *priv)
+{
+       struct mlxsw_sp *mlxsw_sp = priv;
+       struct mlxsw_sp_port *mlxsw_sp_port;
+       enum mlxsw_reg_pude_oper_status status;
+       u8 local_port;
+
+       local_port = mlxsw_reg_pude_local_port_get(pude_pl);
+       mlxsw_sp_port = mlxsw_sp->ports[local_port];
+       if (!mlxsw_sp_port) {
+               dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
+                        local_port);
+               return;
+       }
+
+       status = mlxsw_reg_pude_oper_status_get(pude_pl);
+       if (status == MLXSW_PORT_OPER_STATUS_UP) {
+               netdev_info(mlxsw_sp_port->dev, "link up\n");
+               netif_carrier_on(mlxsw_sp_port->dev);
+       } else {
+               netdev_info(mlxsw_sp_port->dev, "link down\n");
+               netif_carrier_off(mlxsw_sp_port->dev);
+       }
+}
+
+static struct mlxsw_event_listener mlxsw_sp_pude_event = {
+       .func = mlxsw_sp_pude_event_func,
+       .trap_id = MLXSW_TRAP_ID_PUDE,
+};
+
+static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
+                                  enum mlxsw_event_trap_id trap_id)
+{
+       struct mlxsw_event_listener *el;
+       char hpkt_pl[MLXSW_REG_HPKT_LEN];
+       int err;
+
+       switch (trap_id) {
+       case MLXSW_TRAP_ID_PUDE:
+               el = &mlxsw_sp_pude_event;
+               break;
+       }
+       err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
+       if (err)
+               return err;
+
+       mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+       if (err)
+               goto err_event_trap_set;
+
+       return 0;
+
+err_event_trap_set:
+       mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
+       return err;
+}
+
+static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
+                                     enum mlxsw_event_trap_id trap_id)
+{
+       struct mlxsw_event_listener *el;
+
+       switch (trap_id) {
+       case MLXSW_TRAP_ID_PUDE:
+               el = &mlxsw_sp_pude_event;
+               break;
+       }
+       mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
+}
+
+static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
+                                     void *priv)
+{
+       struct mlxsw_sp *mlxsw_sp = priv;
+       struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+       struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+
+       if (unlikely(!mlxsw_sp_port)) {
+               dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
+                                    local_port);
+               return;
+       }
+
+       skb->dev = mlxsw_sp_port->dev;
+
+       pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
+       u64_stats_update_begin(&pcpu_stats->syncp);
+       pcpu_stats->rx_packets++;
+       pcpu_stats->rx_bytes += skb->len;
+       u64_stats_update_end(&pcpu_stats->syncp);
+
+       skb->protocol = eth_type_trans(skb, skb->dev);
+       netif_receive_skb(skb);
+}
+
+static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
+       {
+               .func = mlxsw_sp_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_FDB_MC,
+       },
+       /* Traps for specific L2 packet types, not trapped as FDB MC */
+       {
+               .func = mlxsw_sp_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_STP,
+       },
+       {
+               .func = mlxsw_sp_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_LACP,
+       },
+       {
+               .func = mlxsw_sp_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_EAPOL,
+       },
+       {
+               .func = mlxsw_sp_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_LLDP,
+       },
+       {
+               .func = mlxsw_sp_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_MMRP,
+       },
+       {
+               .func = mlxsw_sp_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_MVRP,
+       },
+       {
+               .func = mlxsw_sp_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_RPVST,
+       },
+       {
+               .func = mlxsw_sp_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_DHCP,
+       },
+       {
+               .func = mlxsw_sp_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
+       },
+       {
+               .func = mlxsw_sp_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
+       },
+       {
+               .func = mlxsw_sp_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
+       },
+       {
+               .func = mlxsw_sp_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
+       },
+       {
+               .func = mlxsw_sp_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
+       },
+};
+
+static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
+{
+       char htgt_pl[MLXSW_REG_HTGT_LEN];
+       char hpkt_pl[MLXSW_REG_HPKT_LEN];
+       int i;
+       int err;
+
+       mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
+       if (err)
+               return err;
+
+       mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
+       if (err)
+               return err;
+
+       for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
+               err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
+                                                     &mlxsw_sp_rx_listener[i],
+                                                     mlxsw_sp);
+               if (err)
+                       goto err_rx_listener_register;
+
+               mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+                                   mlxsw_sp_rx_listener[i].trap_id);
+               err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+               if (err)
+                       goto err_rx_trap_set;
+       }
+       return 0;
+
+err_rx_trap_set:
+       mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
+                                         &mlxsw_sp_rx_listener[i],
+                                         mlxsw_sp);
+err_rx_listener_register:
+       for (i--; i >= 0; i--) {
+               mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+                                   mlxsw_sp_rx_listener[i].trap_id);
+               mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+
+               mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
+                                                 &mlxsw_sp_rx_listener[i],
+                                                 mlxsw_sp);
+       }
+       return err;
+}
+
+static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       char hpkt_pl[MLXSW_REG_HPKT_LEN];
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
+               mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+                                   mlxsw_sp_rx_listener[i].trap_id);
+               mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+
+               mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
+                                                 &mlxsw_sp_rx_listener[i],
+                                                 mlxsw_sp);
+       }
+}
+
+static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
+                                enum mlxsw_reg_sfgc_type type,
+                                enum mlxsw_reg_sfgc_bridge_type bridge_type)
+{
+       enum mlxsw_flood_table_type table_type;
+       enum mlxsw_sp_flood_table flood_table;
+       char sfgc_pl[MLXSW_REG_SFGC_LEN];
+
+       if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) {
+               table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
+               flood_table = 0;
+       } else {
+               table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
+               if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
+                       flood_table = MLXSW_SP_FLOOD_TABLE_UC;
+               else
+                       flood_table = MLXSW_SP_FLOOD_TABLE_BM;
+       }
+
+       mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
+                           flood_table);
+       return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
+}
+
+static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
+{
+       int type, err;
+
+       /* For non-offloaded netdevs, flood all traffic types to CPU
+        * port.
+        */
+       for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
+               if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
+                       continue;
+
+               err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
+                                           MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
+               if (err)
+                       return err;
+       }
+
+       /* For bridged ports, use one flooding table for unknown unicast
+        * traffic and a second table for unregistered multicast and
+        * broadcast.
+        */
+       for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
+               if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
+                       continue;
+
+               err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
+                                           MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
+                        const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+       struct mlxsw_sp *mlxsw_sp = priv;
+       int err;
+
+       mlxsw_sp->core = mlxsw_core;
+       mlxsw_sp->bus_info = mlxsw_bus_info;
+
+       err = mlxsw_sp_base_mac_get(mlxsw_sp);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
+               return err;
+       }
+
+       err = mlxsw_sp_ports_create(mlxsw_sp);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
+               goto err_ports_create;
+       }
+
+       err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
+               goto err_event_register;
+       }
+
+       err = mlxsw_sp_traps_init(mlxsw_sp);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
+               goto err_rx_listener_register;
+       }
+
+       err = mlxsw_sp_flood_init(mlxsw_sp);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
+               goto err_flood_init;
+       }
+
+       err = mlxsw_sp_buffers_init(mlxsw_sp);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
+               goto err_buffers_init;
+       }
+
+       err = mlxsw_sp_switchdev_init(mlxsw_sp);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
+               goto err_switchdev_init;
+       }
+
+       return 0;
+
+err_switchdev_init:
+err_buffers_init:
+err_flood_init:
+       mlxsw_sp_traps_fini(mlxsw_sp);
+err_rx_listener_register:
+       mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
+err_event_register:
+       mlxsw_sp_ports_remove(mlxsw_sp);
+err_ports_create:
+       mlxsw_sp_vfids_fini(mlxsw_sp);
+       return err;
+}
+
+static void mlxsw_sp_fini(void *priv)
+{
+       struct mlxsw_sp *mlxsw_sp = priv;
+
+       mlxsw_sp_switchdev_fini(mlxsw_sp);
+       mlxsw_sp_traps_fini(mlxsw_sp);
+       mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
+       mlxsw_sp_ports_remove(mlxsw_sp);
+       mlxsw_sp_vfids_fini(mlxsw_sp);
+}
+
+static struct mlxsw_config_profile mlxsw_sp_config_profile = {
+       .used_max_vepa_channels         = 1,
+       .max_vepa_channels              = 0,
+       .used_max_lag                   = 1,
+       .max_lag                        = 64,
+       .used_max_port_per_lag          = 1,
+       .max_port_per_lag               = 16,
+       .used_max_mid                   = 1,
+       .max_mid                        = 7000,
+       .used_max_pgt                   = 1,
+       .max_pgt                        = 0,
+       .used_max_system_port           = 1,
+       .max_system_port                = 64,
+       .used_max_vlan_groups           = 1,
+       .max_vlan_groups                = 127,
+       .used_max_regions               = 1,
+       .max_regions                    = 400,
+       .used_flood_tables              = 1,
+       .used_flood_mode                = 1,
+       .flood_mode                     = 3,
+       .max_fid_offset_flood_tables    = 2,
+       .fid_offset_flood_table_size    = VLAN_N_VID - 1,
+       .max_fid_flood_tables           = 1,
+       .fid_flood_table_size           = VLAN_N_VID,
+       .used_max_ib_mc                 = 1,
+       .max_ib_mc                      = 0,
+       .used_max_pkey                  = 1,
+       .max_pkey                       = 0,
+       .swid_config                    = {
+               {
+                       .used_type      = 1,
+                       .type           = MLXSW_PORT_SWID_TYPE_ETH,
+               }
+       },
+};
+
+static struct mlxsw_driver mlxsw_sp_driver = {
+       .kind                   = MLXSW_DEVICE_KIND_SPECTRUM,
+       .owner                  = THIS_MODULE,
+       .priv_size              = sizeof(struct mlxsw_sp),
+       .init                   = mlxsw_sp_init,
+       .fini                   = mlxsw_sp_fini,
+       .txhdr_construct        = mlxsw_sp_txhdr_construct,
+       .txhdr_len              = MLXSW_TXHDR_LEN,
+       .profile                = &mlxsw_sp_config_profile,
+};
+
+static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
+{
+       return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
+}
+
+static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       struct net_device *dev = mlxsw_sp_port->dev;
+       int err;
+
+       /* When port is not bridged untagged packets are tagged with
+        * PVID=VID=1, thereby creating an implicit VLAN interface in
+        * the device. Remove it and let bridge code take care of its
+        * own VLANs.
+        */
+       err = mlxsw_sp_port_kill_vid(dev, 0, 1);
+       if (err)
+               netdev_err(dev, "Failed to remove VID 1\n");
+
+       return err;
+}
+
+static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       struct net_device *dev = mlxsw_sp_port->dev;
+       int err;
+
+       /* Add implicit VLAN interface in the device, so that untagged
+        * packets will be classified to the default vFID.
+        */
+       err = mlxsw_sp_port_add_vid(dev, 0, 1);
+       if (err)
+               netdev_err(dev, "Failed to add VID 1\n");
+
+       return err;
+}
+
+static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
+                                        struct net_device *br_dev)
+{
+       return !mlxsw_sp->master_bridge.dev ||
+              mlxsw_sp->master_bridge.dev == br_dev;
+}
+
+static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
+                                      struct net_device *br_dev)
+{
+       mlxsw_sp->master_bridge.dev = br_dev;
+       mlxsw_sp->master_bridge.ref_count++;
+}
+
+static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
+                                      struct net_device *br_dev)
+{
+       if (--mlxsw_sp->master_bridge.ref_count == 0)
+               mlxsw_sp->master_bridge.dev = NULL;
+}
+
+static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
+                                   unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct netdev_notifier_changeupper_info *info;
+       struct mlxsw_sp_port *mlxsw_sp_port;
+       struct net_device *upper_dev;
+       struct mlxsw_sp *mlxsw_sp;
+       int err;
+
+       if (!mlxsw_sp_port_dev_check(dev))
+               return NOTIFY_DONE;
+
+       mlxsw_sp_port = netdev_priv(dev);
+       mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       info = ptr;
+
+       switch (event) {
+       case NETDEV_PRECHANGEUPPER:
+               upper_dev = info->upper_dev;
+               /* HW limitation forbids to put ports to multiple bridges. */
+               if (info->master && info->linking &&
+                   netif_is_bridge_master(upper_dev) &&
+                   !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
+                       return NOTIFY_BAD;
+               break;
+       case NETDEV_CHANGEUPPER:
+               upper_dev = info->upper_dev;
+               if (info->master &&
+                   netif_is_bridge_master(upper_dev)) {
+                       if (info->linking) {
+                               err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
+                               if (err)
+                                       netdev_err(dev, "Failed to join bridge\n");
+                               mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
+                               mlxsw_sp_port->bridged = true;
+                       } else {
+                               err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
+                               if (err)
+                                       netdev_err(dev, "Failed to leave bridge\n");
+                               mlxsw_sp_port->bridged = false;
+                               mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
+                       }
+               }
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
+       .notifier_call = mlxsw_sp_netdevice_event,
+};
+
+static int __init mlxsw_sp_module_init(void)
+{
+       int err;
+
+       register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+       err = mlxsw_core_driver_register(&mlxsw_sp_driver);
+       if (err)
+               goto err_core_driver_register;
+       return 0;
+
+err_core_driver_register:
+       unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+       return err;
+}
+
+static void __exit mlxsw_sp_module_exit(void)
+{
+       mlxsw_core_driver_unregister(&mlxsw_sp_driver);
+       unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+}
+
+module_init(mlxsw_sp_module_init);
+module_exit(mlxsw_sp_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox Spectrum driver");
+MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
new file mode 100644 (file)
index 0000000..fc00749
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_H
+#define _MLXSW_SPECTRUM_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+
+#include "core.h"
+
+#define MLXSW_SP_VFID_BASE VLAN_N_VID
+
+struct mlxsw_sp_port;
+
+struct mlxsw_sp {
+       unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)];
+       unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)];
+       struct mlxsw_sp_port **ports;
+       struct mlxsw_core *core;
+       const struct mlxsw_bus_info *bus_info;
+       unsigned char base_mac[ETH_ALEN];
+       struct {
+               struct delayed_work dw;
+#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
+               unsigned int interval; /* ms */
+       } fdb_notify;
+#define MLXSW_SP_DEFAULT_AGEING_TIME 300
+       u32 ageing_time;
+       struct {
+               struct net_device *dev;
+               unsigned int ref_count;
+       } master_bridge;
+};
+
+struct mlxsw_sp_port_pcpu_stats {
+       u64                     rx_packets;
+       u64                     rx_bytes;
+       u64                     tx_packets;
+       u64                     tx_bytes;
+       struct u64_stats_sync   syncp;
+       u32                     tx_dropped;
+};
+
+struct mlxsw_sp_port {
+       struct net_device *dev;
+       struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
+       struct mlxsw_sp *mlxsw_sp;
+       u8 local_port;
+       u8 stp_state;
+       u8 learning:1;
+       u8 learning_sync:1;
+       u16 pvid;
+       bool bridged;
+       /* 802.1Q bridge VLANs */
+       unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+       /* VLAN interfaces */
+       unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)];
+       u16 nr_vfids;
+};
+
+enum mlxsw_sp_flood_table {
+       MLXSW_SP_FLOOD_TABLE_UC,
+       MLXSW_SP_FLOOD_TABLE_BM,
+};
+
+int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
+
+int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
+                                u16 vid);
+int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
+                          u16 vid_end, bool is_member, bool untagged);
+int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
+                         u16 vid);
+int mlxsw_sp_port_kill_vid(struct net_device *dev,
+                          __be16 __always_unused proto, u16 vid);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
new file mode 100644 (file)
index 0000000..d59195e
--- /dev/null
@@ -0,0 +1,422 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "port.h"
+#include "reg.h"
+
+struct mlxsw_sp_pb {
+       u8 index;
+       u16 size;
+};
+
+#define MLXSW_SP_PB(_index, _size)     \
+       {                               \
+               .index = _index,        \
+               .size = _size,          \
+       }
+
+static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = {
+       MLXSW_SP_PB(0, 208),
+       MLXSW_SP_PB(1, 208),
+       MLXSW_SP_PB(2, 208),
+       MLXSW_SP_PB(3, 208),
+       MLXSW_SP_PB(4, 208),
+       MLXSW_SP_PB(5, 208),
+       MLXSW_SP_PB(6, 208),
+       MLXSW_SP_PB(7, 208),
+       MLXSW_SP_PB(9, 208),
+};
+
+#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
+
+static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       char pbmc_pl[MLXSW_REG_PBMC_LEN];
+       int i;
+
+       mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
+                           0xffff, 0xffff / 2);
+       for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
+               const struct mlxsw_sp_pb *pb;
+
+               pb = &mlxsw_sp_pbs[i];
+               mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size);
+       }
+       return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
+                              MLXSW_REG(pbmc), pbmc_pl);
+}
+
+#define MLXSW_SP_SB_BYTES_PER_CELL 96
+
+struct mlxsw_sp_sb_pool {
+       u8 pool;
+       enum mlxsw_reg_sbpr_dir dir;
+       enum mlxsw_reg_sbpr_mode mode;
+       u32 size;
+};
+
+#define MLXSW_SP_SB_POOL_INGRESS_SIZE                          \
+       ((15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) /      \
+        MLXSW_SP_SB_BYTES_PER_CELL)
+#define MLXSW_SP_SB_POOL_EGRESS_SIZE                           \
+       ((14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) /       \
+        MLXSW_SP_SB_BYTES_PER_CELL)
+
+#define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size)            \
+       {                                                       \
+               .pool = _pool,                                  \
+               .dir = _dir,                                    \
+               .mode = _mode,                                  \
+               .size = _size,                                  \
+       }
+
+#define MLXSW_SP_SB_POOL_INGRESS(_pool, _size)                 \
+       MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_INGRESS,     \
+                        MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
+
+#define MLXSW_SP_SB_POOL_EGRESS(_pool, _size)                  \
+       MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_EGRESS,      \
+                        MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
+
+static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = {
+       MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_SB_POOL_INGRESS_SIZE),
+       MLXSW_SP_SB_POOL_INGRESS(1, 0),
+       MLXSW_SP_SB_POOL_INGRESS(2, 0),
+       MLXSW_SP_SB_POOL_INGRESS(3, 0),
+       MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_SB_POOL_EGRESS_SIZE),
+       MLXSW_SP_SB_POOL_EGRESS(1, 0),
+       MLXSW_SP_SB_POOL_EGRESS(2, 0),
+       MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_SB_POOL_EGRESS_SIZE),
+};
+
+#define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools)
+
+static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp)
+{
+       char sbpr_pl[MLXSW_REG_SBPR_LEN];
+       int i;
+       int err;
+
+       for (i = 0; i < MLXSW_SP_SB_POOLS_LEN; i++) {
+               const struct mlxsw_sp_sb_pool *pool;
+
+               pool = &mlxsw_sp_sb_pools[i];
+               mlxsw_reg_sbpr_pack(sbpr_pl, pool->pool, pool->dir,
+                                   pool->mode, pool->size);
+               err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+struct mlxsw_sp_sb_cm {
+       union {
+               u8 pg;
+               u8 tc;
+       } u;
+       enum mlxsw_reg_sbcm_dir dir;
+       u32 min_buff;
+       u32 max_buff;
+       u8 pool;
+};
+
+#define MLXSW_SP_SB_CM(_pg_tc, _dir, _min_buff, _max_buff, _pool)      \
+       {                                                               \
+               .u.pg = _pg_tc,                                         \
+               .dir = _dir,                                            \
+               .min_buff = _min_buff,                                  \
+               .max_buff = _max_buff,                                  \
+               .pool = _pool,                                          \
+       }
+
+#define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff)              \
+       MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBCM_DIR_INGRESS,                 \
+                      _min_buff, _max_buff, 0)
+
+#define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff)               \
+       MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS,                  \
+                      _min_buff, _max_buff, 0)
+
+#define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc)                            \
+       MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, 104, 2, 3)
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = {
+       MLXSW_SP_SB_CM_INGRESS(0, 10000 / MLXSW_SP_SB_BYTES_PER_CELL, 8),
+       MLXSW_SP_SB_CM_INGRESS(1, 0, 0),
+       MLXSW_SP_SB_CM_INGRESS(2, 0, 0),
+       MLXSW_SP_SB_CM_INGRESS(3, 0, 0),
+       MLXSW_SP_SB_CM_INGRESS(4, 0, 0),
+       MLXSW_SP_SB_CM_INGRESS(5, 0, 0),
+       MLXSW_SP_SB_CM_INGRESS(6, 0, 0),
+       MLXSW_SP_SB_CM_INGRESS(7, 0, 0),
+       MLXSW_SP_SB_CM_INGRESS(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff),
+       MLXSW_SP_SB_CM_EGRESS(0, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+       MLXSW_SP_SB_CM_EGRESS(1, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+       MLXSW_SP_SB_CM_EGRESS(2, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+       MLXSW_SP_SB_CM_EGRESS(3, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+       MLXSW_SP_SB_CM_EGRESS(4, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+       MLXSW_SP_SB_CM_EGRESS(5, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+       MLXSW_SP_SB_CM_EGRESS(6, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+       MLXSW_SP_SB_CM_EGRESS(7, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+       MLXSW_SP_SB_CM_EGRESS(8, 0, 0),
+       MLXSW_SP_SB_CM_EGRESS(9, 0, 0),
+       MLXSW_SP_SB_CM_EGRESS(10, 0, 0),
+       MLXSW_SP_SB_CM_EGRESS(11, 0, 0),
+       MLXSW_SP_SB_CM_EGRESS(12, 0, 0),
+       MLXSW_SP_SB_CM_EGRESS(13, 0, 0),
+       MLXSW_SP_SB_CM_EGRESS(14, 0, 0),
+       MLXSW_SP_SB_CM_EGRESS(15, 0, 0),
+       MLXSW_SP_SB_CM_EGRESS(16, 1, 0xff),
+};
+
+#define MLXSW_SP_SB_CMS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms)
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(0),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(1),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(2),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(3),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(4),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(5),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(6),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(7),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(8),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(9),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(10),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(11),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(12),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(13),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(14),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(15),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(16),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(17),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(18),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(19),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(20),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(21),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(22),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(23),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(24),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(25),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(26),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(27),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(28),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(29),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(30),
+       MLXSW_SP_CPU_PORT_SB_CM_EGRESS(31),
+};
+
+#define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
+       ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
+
+static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+                               const struct mlxsw_sp_sb_cm *cms,
+                               size_t cms_len)
+{
+       char sbcm_pl[MLXSW_REG_SBCM_LEN];
+       int i;
+       int err;
+
+       for (i = 0; i < cms_len; i++) {
+               const struct mlxsw_sp_sb_cm *cm;
+
+               cm = &cms[i];
+               mlxsw_reg_sbcm_pack(sbcm_pl, local_port, cm->u.pg, cm->dir,
+                                   cm->min_buff, cm->max_buff, cm->pool);
+               err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       return mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
+                                   mlxsw_sp_port->local_port, mlxsw_sp_sb_cms,
+                                   MLXSW_SP_SB_CMS_LEN);
+}
+
+static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
+{
+       return mlxsw_sp_sb_cms_init(mlxsw_sp, 0, mlxsw_sp_cpu_port_sb_cms,
+                                   MLXSW_SP_CPU_PORT_SB_MCS_LEN);
+}
+
+struct mlxsw_sp_sb_pm {
+       u8 pool;
+       enum mlxsw_reg_sbpm_dir dir;
+       u32 min_buff;
+       u32 max_buff;
+};
+
+#define MLXSW_SP_SB_PM(_pool, _dir, _min_buff, _max_buff)      \
+       {                                                       \
+               .pool = _pool,                                  \
+               .dir = _dir,                                    \
+               .min_buff = _min_buff,                          \
+               .max_buff = _max_buff,                          \
+       }
+
+#define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff)    \
+       MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_INGRESS,       \
+                      _min_buff, _max_buff)
+
+#define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff)     \
+       MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_EGRESS,        \
+                      _min_buff, _max_buff)
+
+static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = {
+       MLXSW_SP_SB_PM_INGRESS(0, 0, 0xff),
+       MLXSW_SP_SB_PM_INGRESS(1, 0, 0),
+       MLXSW_SP_SB_PM_INGRESS(2, 0, 0),
+       MLXSW_SP_SB_PM_INGRESS(3, 0, 0),
+       MLXSW_SP_SB_PM_EGRESS(0, 0, 7),
+       MLXSW_SP_SB_PM_EGRESS(1, 0, 0),
+       MLXSW_SP_SB_PM_EGRESS(2, 0, 0),
+       MLXSW_SP_SB_PM_EGRESS(3, 0, 0),
+};
+
+#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms)
+
+static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       char sbpm_pl[MLXSW_REG_SBPM_LEN];
+       int i;
+       int err;
+
+       for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) {
+               const struct mlxsw_sp_sb_pm *pm;
+
+               pm = &mlxsw_sp_sb_pms[i];
+               mlxsw_reg_sbpm_pack(sbpm_pl, mlxsw_sp_port->local_port,
+                                   pm->pool, pm->dir,
+                                   pm->min_buff, pm->max_buff);
+               err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
+                                     MLXSW_REG(sbpm), sbpm_pl);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+struct mlxsw_sp_sb_mm {
+       u8 prio;
+       u32 min_buff;
+       u32 max_buff;
+       u8 pool;
+};
+
+#define MLXSW_SP_SB_MM(_prio, _min_buff, _max_buff, _pool)     \
+       {                                                       \
+               .prio = _prio,                                  \
+               .min_buff = _min_buff,                          \
+               .max_buff = _max_buff,                          \
+               .pool = _pool,                                  \
+       }
+
+static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
+       MLXSW_SP_SB_MM(0, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+       MLXSW_SP_SB_MM(1, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+       MLXSW_SP_SB_MM(2, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+       MLXSW_SP_SB_MM(3, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+       MLXSW_SP_SB_MM(4, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+       MLXSW_SP_SB_MM(5, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+       MLXSW_SP_SB_MM(6, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+       MLXSW_SP_SB_MM(7, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+       MLXSW_SP_SB_MM(8, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+       MLXSW_SP_SB_MM(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+       MLXSW_SP_SB_MM(10, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+       MLXSW_SP_SB_MM(11, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+       MLXSW_SP_SB_MM(12, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+       MLXSW_SP_SB_MM(13, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+       MLXSW_SP_SB_MM(14, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+};
+
+#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
+
+static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
+{
+       char sbmm_pl[MLXSW_REG_SBMM_LEN];
+       int i;
+       int err;
+
+       for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
+               const struct mlxsw_sp_sb_mm *mc;
+
+               mc = &mlxsw_sp_sb_mms[i];
+               mlxsw_reg_sbmm_pack(sbmm_pl, mc->prio, mc->min_buff,
+                                   mc->max_buff, mc->pool);
+               err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
+{
+       int err;
+
+       err = mlxsw_sp_sb_pools_init(mlxsw_sp);
+       if (err)
+               return err;
+       err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
+       if (err)
+               return err;
+       err = mlxsw_sp_sb_mms_init(mlxsw_sp);
+
+       return err;
+}
+
+int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       int err;
+
+       err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
+       if (err)
+               return err;
+       err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
+       if (err)
+               return err;
+       err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
+
+       return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
new file mode 100644 (file)
index 0000000..c39b7a1
--- /dev/null
@@ -0,0 +1,863 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <net/switchdev.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+
+static int mlxsw_sp_port_attr_get(struct net_device *dev,
+                                 struct switchdev_attr *attr)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+       switch (attr->id) {
+       case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+               attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
+               memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
+                      attr->u.ppid.id_len);
+               break;
+       case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+               attr->u.brport_flags =
+                       (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
+                       (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                      u8 state)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       enum mlxsw_reg_spms_state spms_state;
+       char *spms_pl;
+       u16 vid;
+       int err;
+
+       switch (state) {
+       case BR_STATE_DISABLED: /* fall-through */
+       case BR_STATE_FORWARDING:
+               spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
+               break;
+       case BR_STATE_LISTENING: /* fall-through */
+       case BR_STATE_LEARNING:
+               spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
+               break;
+       case BR_STATE_BLOCKING:
+               spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
+               break;
+       default:
+               BUG();
+       }
+
+       spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+       if (!spms_pl)
+               return -ENOMEM;
+       mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+       for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
+               mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
+
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
+       kfree(spms_pl);
+       return err;
+}
+
+static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                           struct switchdev_trans *trans,
+                                           u8 state)
+{
+       if (switchdev_trans_ph_prepare(trans))
+               return 0;
+
+       mlxsw_sp_port->stp_state = state;
+       return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
+}
+
+static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                          struct switchdev_trans *trans,
+                                          unsigned long brport_flags)
+{
+       if (switchdev_trans_ph_prepare(trans))
+               return 0;
+
+       mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
+       mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
+       return 0;
+}
+
+static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
+{
+       char sfdat_pl[MLXSW_REG_SFDAT_LEN];
+       int err;
+
+       mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
+       if (err)
+               return err;
+       mlxsw_sp->ageing_time = ageing_time;
+       return 0;
+}
+
+static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                           struct switchdev_trans *trans,
+                                           unsigned long ageing_jiffies)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+
+       if (switchdev_trans_ph_prepare(trans))
+               return 0;
+
+       return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
+}
+
+static int mlxsw_sp_port_attr_set(struct net_device *dev,
+                                 const struct switchdev_attr *attr,
+                                 struct switchdev_trans *trans)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       int err = 0;
+
+       switch (attr->id) {
+       case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+               err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
+                                                      attr->u.stp_state);
+               break;
+       case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+               err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
+                                                     attr->u.brport_flags);
+               break;
+       case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+               err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
+                                                      attr->u.ageing_time);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       return err;
+}
+
+static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char spvid_pl[MLXSW_REG_SPVID_LEN];
+
+       mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
+}
+
+static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
+{
+       char sfmr_pl[MLXSW_REG_SFMR_LEN];
+       int err;
+
+       mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+
+       if (err)
+               return err;
+
+       set_bit(fid, mlxsw_sp->active_fids);
+       return 0;
+}
+
+static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid)
+{
+       char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+       clear_bit(fid, mlxsw_sp->active_fids);
+
+       mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
+                           fid, fid);
+       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
+{
+       enum mlxsw_reg_svfa_mt mt;
+
+       if (mlxsw_sp_port->nr_vfids)
+               mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+       else
+               mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+
+       return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid);
+}
+
+static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
+{
+       enum mlxsw_reg_svfa_mt mt;
+
+       if (!mlxsw_sp_port->nr_vfids)
+               return 0;
+
+       mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+       return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
+}
+
+static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                    u16 fid, bool set, bool only_uc)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char *sftr_pl;
+       int err;
+
+       sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+       if (!sftr_pl)
+               return -ENOMEM;
+
+       mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid,
+                           MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, 0,
+                           mlxsw_sp_port->local_port, set);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+       if (err)
+               goto buffer_out;
+
+       /* Flooding control allows one to decide whether a given port will
+        * flood unicast traffic for which there is no FDB entry.
+        */
+       if (only_uc)
+               goto buffer_out;
+
+       mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid,
+                           MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, 0,
+                           mlxsw_sp_port->local_port, set);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+
+buffer_out:
+       kfree(sftr_pl);
+       return err;
+}
+
+static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
+                                 u16 vid_end)
+{
+       u16 vid;
+       int err;
+
+       for (vid = vid_begin; vid <= vid_end; vid++) {
+               err = mlxsw_sp_port_add_vid(dev, 0, vid);
+               if (err)
+                       goto err_port_add_vid;
+       }
+       return 0;
+
+err_port_add_vid:
+       for (vid--; vid >= vid_begin; vid--)
+               mlxsw_sp_port_kill_vid(dev, 0, vid);
+       return err;
+}
+
+static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
+                                    u16 vid_begin, u16 vid_end,
+                                    bool flag_untagged, bool flag_pvid)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       struct net_device *dev = mlxsw_sp_port->dev;
+       enum mlxsw_reg_svfa_mt mt;
+       u16 vid, vid_e;
+       int err;
+
+       /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
+        * not bridged, then packets ingressing through the port with
+        * the specified VIDs will be directed to CPU.
+        */
+       if (!mlxsw_sp_port->bridged)
+               return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
+
+       for (vid = vid_begin; vid <= vid_end; vid++) {
+               if (!test_bit(vid, mlxsw_sp->active_fids)) {
+                       err = mlxsw_sp_fid_create(mlxsw_sp, vid);
+                       if (err) {
+                               netdev_err(dev, "Failed to create FID=%d\n",
+                                          vid);
+                               return err;
+                       }
+
+                       /* When creating a FID, we set a VID to FID mapping
+                        * regardless of the port's mode.
+                        */
+                       mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+                       err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt,
+                                                          true, vid, vid);
+                       if (err) {
+                               netdev_err(dev, "Failed to create FID=VID=%d mapping\n",
+                                          vid);
+                               return err;
+                       }
+               }
+
+               /* Set FID mapping according to port's mode */
+               err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid);
+               if (err) {
+                       netdev_err(dev, "Failed to map FID=%d", vid);
+                       return err;
+               }
+
+               err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, true,
+                                               false);
+               if (err) {
+                       netdev_err(dev, "Failed to set flooding for FID=%d",
+                                  vid);
+                       return err;
+               }
+       }
+
+       for (vid = vid_begin; vid <= vid_end;
+            vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
+               vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
+                           vid_end);
+
+               err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, true,
+                                            flag_untagged);
+               if (err) {
+                       netdev_err(mlxsw_sp_port->dev, "Unable to add VIDs %d-%d\n",
+                                  vid, vid_e);
+                       return err;
+               }
+       }
+
+       vid = vid_begin;
+       if (flag_pvid && mlxsw_sp_port->pvid != vid) {
+               err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
+               if (err) {
+                       netdev_err(mlxsw_sp_port->dev, "Unable to add PVID %d\n",
+                                  vid);
+                       return err;
+               }
+               mlxsw_sp_port->pvid = vid;
+       }
+
+       /* Changing activity bits only if HW operation succeded */
+       for (vid = vid_begin; vid <= vid_end; vid++)
+               set_bit(vid, mlxsw_sp_port->active_vlans);
+
+       return mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
+                                          mlxsw_sp_port->stp_state);
+}
+
+static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
+                                  const struct switchdev_obj_port_vlan *vlan,
+                                  struct switchdev_trans *trans)
+{
+       bool untagged_flag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+       bool pvid_flag = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+
+       if (switchdev_trans_ph_prepare(trans))
+               return 0;
+
+       return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
+                                        vlan->vid_begin, vlan->vid_end,
+                                        untagged_flag, pvid_flag);
+}
+
+static int mlxsw_sp_port_fdb_op(struct mlxsw_sp_port *mlxsw_sp_port,
+                               const char *mac, u16 vid, bool adding,
+                               bool dynamic)
+{
+       enum mlxsw_reg_sfd_rec_policy policy;
+       enum mlxsw_reg_sfd_op op;
+       char *sfd_pl;
+       int err;
+
+       if (!vid)
+               vid = mlxsw_sp_port->pvid;
+
+       sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+       if (!sfd_pl)
+               return -ENOMEM;
+
+       policy = dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
+                          MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
+       op = adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
+                     MLXSW_REG_SFD_OP_WRITE_REMOVE;
+       mlxsw_reg_sfd_pack(sfd_pl, op, 0);
+       mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy,
+                             mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP,
+                             mlxsw_sp_port->local_port);
+       err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sfd),
+                             sfd_pl);
+       kfree(sfd_pl);
+
+       return err;
+}
+
+static int
+mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
+                            const struct switchdev_obj_port_fdb *fdb,
+                            struct switchdev_trans *trans)
+{
+       if (switchdev_trans_ph_prepare(trans))
+               return 0;
+
+       return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid,
+                                   true, false);
+}
+
+static int mlxsw_sp_port_obj_add(struct net_device *dev,
+                                const struct switchdev_obj *obj,
+                                struct switchdev_trans *trans)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       int err = 0;
+
+       switch (obj->id) {
+       case SWITCHDEV_OBJ_ID_PORT_VLAN:
+               err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
+                                             SWITCHDEV_OBJ_PORT_VLAN(obj),
+                                             trans);
+               break;
+       case SWITCHDEV_OBJ_ID_PORT_FDB:
+               err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
+                                                  SWITCHDEV_OBJ_PORT_FDB(obj),
+                                                  trans);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       return err;
+}
+
+static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
+                                  u16 vid_end)
+{
+       u16 vid;
+       int err;
+
+       for (vid = vid_begin; vid <= vid_end; vid++) {
+               err = mlxsw_sp_port_kill_vid(dev, 0, vid);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
+                                    u16 vid_begin, u16 vid_end, bool init)
+{
+       struct net_device *dev = mlxsw_sp_port->dev;
+       u16 vid, vid_e;
+       int err;
+
+       /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
+        * not bridged, then prevent packets ingressing through the
+        * port with the specified VIDs from being trapped to CPU.
+        */
+       if (!init && !mlxsw_sp_port->bridged)
+               return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end);
+
+       for (vid = vid_begin; vid <= vid_end;
+            vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
+               vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
+                           vid_end);
+               err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, false,
+                                            false);
+               if (err) {
+                       netdev_err(mlxsw_sp_port->dev, "Unable to del VIDs %d-%d\n",
+                                  vid, vid_e);
+                       return err;
+               }
+       }
+
+       if ((mlxsw_sp_port->pvid >= vid_begin) &&
+           (mlxsw_sp_port->pvid <= vid_end)) {
+               /* Default VLAN is always 1 */
+               mlxsw_sp_port->pvid = 1;
+               err = mlxsw_sp_port_pvid_set(mlxsw_sp_port,
+                                            mlxsw_sp_port->pvid);
+               if (err) {
+                       netdev_err(mlxsw_sp_port->dev, "Unable to del PVID %d\n",
+                                  vid);
+                       return err;
+               }
+       }
+
+       if (init)
+               goto out;
+
+       for (vid = vid_begin; vid <= vid_end; vid++) {
+               err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, false,
+                                               false);
+               if (err) {
+                       netdev_err(dev, "Failed to clear flooding for FID=%d",
+                                  vid);
+                       return err;
+               }
+
+               /* Remove FID mapping in case of Virtual mode */
+               err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
+               if (err) {
+                       netdev_err(dev, "Failed to unmap FID=%d", vid);
+                       return err;
+               }
+       }
+
+out:
+       /* Changing activity bits only if HW operation succeded */
+       for (vid = vid_begin; vid <= vid_end; vid++)
+               clear_bit(vid, mlxsw_sp_port->active_vlans);
+
+       return 0;
+}
+
+static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
+                                  const struct switchdev_obj_port_vlan *vlan)
+{
+       return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
+                                        vlan->vid_begin, vlan->vid_end, false);
+}
+
+static int
+mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
+                            const struct switchdev_obj_port_fdb *fdb)
+{
+       return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid,
+                                   false, false);
+}
+
+static int mlxsw_sp_port_obj_del(struct net_device *dev,
+                                const struct switchdev_obj *obj)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       int err = 0;
+
+       switch (obj->id) {
+       case SWITCHDEV_OBJ_ID_PORT_VLAN:
+               err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
+                                             SWITCHDEV_OBJ_PORT_VLAN(obj));
+               break;
+       case SWITCHDEV_OBJ_ID_PORT_FDB:
+               err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
+                                                  SWITCHDEV_OBJ_PORT_FDB(obj));
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       return err;
+}
+
+static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
+                                 struct switchdev_obj_port_fdb *fdb,
+                                 switchdev_obj_dump_cb_t *cb)
+{
+       char *sfd_pl;
+       char mac[ETH_ALEN];
+       u16 vid;
+       u8 local_port;
+       u8 num_rec;
+       int stored_err = 0;
+       int i;
+       int err;
+
+       sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+       if (!sfd_pl)
+               return -ENOMEM;
+
+       mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
+       do {
+               mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
+               err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core,
+                                     MLXSW_REG(sfd), sfd_pl);
+               if (err)
+                       goto out;
+
+               num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+
+               /* Even in case of error, we have to run the dump to the end
+                * so the session in firmware is finished.
+                */
+               if (stored_err)
+                       continue;
+
+               for (i = 0; i < num_rec; i++) {
+                       switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
+                       case MLXSW_REG_SFD_REC_TYPE_UNICAST:
+                               mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &vid,
+                                                       &local_port);
+                               if (local_port == mlxsw_sp_port->local_port) {
+                                       ether_addr_copy(fdb->addr, mac);
+                                       fdb->ndm_state = NUD_REACHABLE;
+                                       fdb->vid = vid;
+                                       err = cb(&fdb->obj);
+                                       if (err)
+                                               stored_err = err;
+                               }
+                       }
+               }
+       } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
+
+out:
+       kfree(sfd_pl);
+       return stored_err ? stored_err : err;
+}
+
+static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
+                                  struct switchdev_obj_port_vlan *vlan,
+                                  switchdev_obj_dump_cb_t *cb)
+{
+       u16 vid;
+       int err = 0;
+
+       for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+               vlan->flags = 0;
+               if (vid == mlxsw_sp_port->pvid)
+                       vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+               vlan->vid_begin = vid;
+               vlan->vid_end = vid;
+               err = cb(&vlan->obj);
+               if (err)
+                       break;
+       }
+       return err;
+}
+
+static int mlxsw_sp_port_obj_dump(struct net_device *dev,
+                                 struct switchdev_obj *obj,
+                                 switchdev_obj_dump_cb_t *cb)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       int err = 0;
+
+       switch (obj->id) {
+       case SWITCHDEV_OBJ_ID_PORT_VLAN:
+               err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
+                                             SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
+               break;
+       case SWITCHDEV_OBJ_ID_PORT_FDB:
+               err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
+                                            SWITCHDEV_OBJ_PORT_FDB(obj), cb);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       return err;
+}
+
+const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
+       .switchdev_port_attr_get        = mlxsw_sp_port_attr_get,
+       .switchdev_port_attr_set        = mlxsw_sp_port_attr_set,
+       .switchdev_port_obj_add         = mlxsw_sp_port_obj_add,
+       .switchdev_port_obj_del         = mlxsw_sp_port_obj_del,
+       .switchdev_port_obj_dump        = mlxsw_sp_port_obj_dump,
+};
+
+static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
+                                           char *sfn_pl, int rec_index,
+                                           bool adding)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port;
+       char mac[ETH_ALEN];
+       u8 local_port;
+       u16 vid;
+       int err;
+
+       mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &vid, &local_port);
+       mlxsw_sp_port = mlxsw_sp->ports[local_port];
+       if (!mlxsw_sp_port) {
+               dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
+               return;
+       }
+
+       err = mlxsw_sp_port_fdb_op(mlxsw_sp_port, mac, vid,
+                                  adding && mlxsw_sp_port->learning, true);
+       if (err) {
+               if (net_ratelimit())
+                       netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
+               return;
+       }
+
+       if (mlxsw_sp_port->learning && mlxsw_sp_port->learning_sync) {
+               struct switchdev_notifier_fdb_info info;
+               unsigned long notifier_type;
+
+               info.addr = mac;
+               info.vid = vid;
+               notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
+               call_switchdev_notifiers(notifier_type, mlxsw_sp_port->dev,
+                                        &info.info);
+       }
+}
+
+static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
+                                           char *sfn_pl, int rec_index)
+{
+       switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
+       case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
+               mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
+                                               rec_index, true);
+               break;
+       case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
+               mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
+                                               rec_index, false);
+               break;
+       }
+}
+
+static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
+{
+       schedule_delayed_work(&mlxsw_sp->fdb_notify.dw,
+                             msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
+}
+
+static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
+{
+       struct mlxsw_sp *mlxsw_sp;
+       char *sfn_pl;
+       u8 num_rec;
+       int i;
+       int err;
+
+       sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
+       if (!sfn_pl)
+               return;
+
+       mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
+
+       do {
+               mlxsw_reg_sfn_pack(sfn_pl);
+               err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
+               if (err) {
+                       dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
+                       break;
+               }
+               num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
+               for (i = 0; i < num_rec; i++)
+                       mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
+
+       } while (num_rec);
+
+       kfree(sfn_pl);
+       mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
+}
+
+static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
+{
+       int err;
+
+       err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
+               return err;
+       }
+       INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
+       mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
+       mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
+       return 0;
+}
+
+static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
+}
+
+static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       u16 fid;
+
+       for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID)
+               mlxsw_sp_fid_destroy(mlxsw_sp, fid);
+}
+
+int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
+{
+       return mlxsw_sp_fdb_init(mlxsw_sp);
+}
+
+void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       mlxsw_sp_fdb_fini(mlxsw_sp);
+       mlxsw_sp_fids_fini(mlxsw_sp);
+}
+
+int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       struct net_device *dev = mlxsw_sp_port->dev;
+       int err;
+
+       /* Allow only untagged packets to ingress and tag them internally
+        * with VID 1.
+        */
+       mlxsw_sp_port->pvid = 1;
+       err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID, true);
+       if (err) {
+               netdev_err(dev, "Unable to init VLANs\n");
+               return err;
+       }
+
+       /* Add implicit VLAN interface in the device, so that untagged
+        * packets will be classified to the default vFID.
+        */
+       err = mlxsw_sp_port_add_vid(dev, 0, 1);
+       if (err)
+               netdev_err(dev, "Failed to configure default vFID\n");
+
+       return err;
+}
+
+void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
+}
+
+void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+}
index d448431bbc838d2ef261a68322b5da1fa23daa31..50e29c4879dbe7fb20967ba46f0bdb25c4e5e8ee 100644 (file)
@@ -57,13 +57,11 @@ static const char mlxsw_sx_driver_version[] = "1.0";
 
 struct mlxsw_sx_port;
 
-#define MLXSW_SW_HW_ID_LEN 6
-
 struct mlxsw_sx {
        struct mlxsw_sx_port **ports;
        struct mlxsw_core *core;
        const struct mlxsw_bus_info *bus_info;
-       u8 hw_id[MLXSW_SW_HW_ID_LEN];
+       u8 hw_id[ETH_ALEN];
 };
 
 struct mlxsw_sx_port_pcpu_stats {
@@ -925,7 +923,8 @@ static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
        spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
        if (!spms_pl)
                return -ENOMEM;
-       mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state);
+       mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
+       mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
        err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
        kfree(spms_pl);
        return err;
@@ -1069,9 +1068,9 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
        return 0;
 
 err_register_netdev:
-err_port_admin_status_set:
 err_port_mac_learning_mode_set:
 err_port_stp_state_set:
+err_port_admin_status_set:
 err_port_mtu_set:
 err_port_speed_set:
 err_port_swid_set:
@@ -1178,8 +1177,7 @@ static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx,
        if (err)
                return err;
 
-       mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
-                           MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id);
+       mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
        err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
        if (err)
                goto err_event_trap_set;
@@ -1212,9 +1210,8 @@ static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
        struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
 
        if (unlikely(!mlxsw_sx_port)) {
-               if (net_ratelimit())
-                       dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
-                                local_port);
+               dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
+                                    local_port);
                return;
        }
 
@@ -1316,6 +1313,11 @@ static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
        if (err)
                return err;
 
+       mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
+       if (err)
+               return err;
+
        for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
                err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
                                                      &mlxsw_sx_rx_listener[i],
@@ -1324,7 +1326,6 @@ static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
                        goto err_rx_listener_register;
 
                mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
-                                   MLXSW_REG_HTGT_TRAP_GROUP_RX,
                                    mlxsw_sx_rx_listener[i].trap_id);
                err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
                if (err)
@@ -1339,7 +1340,6 @@ err_rx_trap_set:
 err_rx_listener_register:
        for (i--; i >= 0; i--) {
                mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
-                                   MLXSW_REG_HTGT_TRAP_GROUP_RX,
                                    mlxsw_sx_rx_listener[i].trap_id);
                mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
 
@@ -1357,7 +1357,6 @@ static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
 
        for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
                mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
-                                   MLXSW_REG_HTGT_TRAP_GROUP_RX,
                                    mlxsw_sx_rx_listener[i].trap_id);
                mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
 
@@ -1371,25 +1370,15 @@ static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
 {
        char sfgc_pl[MLXSW_REG_SFGC_LEN];
        char sgcr_pl[MLXSW_REG_SGCR_LEN];
-       char *smid_pl;
        char *sftr_pl;
        int err;
 
-       /* Due to FW bug, we must configure SMID. */
-       smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
-       if (!smid_pl)
-               return -ENOMEM;
-       mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID);
-       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl);
-       kfree(smid_pl);
-       if (err)
-               return err;
-
        /* Configure a flooding table, which includes only CPU port. */
        sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
        if (!sftr_pl)
                return -ENOMEM;
-       mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0);
+       mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0,
+                           MLXSW_PORT_CPU_PORT, true);
        err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
        kfree(sftr_pl);
        if (err)
index 06fc46c78a0b85b4a59c638798097291cdca4a03..fdf94720ca62554a3bc28341dcb8f857af490e97 100644 (file)
@@ -38,6 +38,7 @@
 
 #define MLXSW_TXHDR_LEN 0x10
 #define MLXSW_TXHDR_VERSION_0 0
+#define MLXSW_TXHDR_VERSION_1 1
 
 enum {
        MLXSW_TXHDR_ETH_CTL,
index e1329d9c2accb22b2ba93f5b14657f35da30bf71..bf08ce2baf8d1028ef44853f4a460aaecfd1ca5b 100644 (file)
@@ -617,10 +617,10 @@ static int encx24j600_hw_init(struct encx24j600_priv *priv)
                   (eidled & REVID_MASK) >> REVID_SHIFT);
 
        /* PHY Leds: link status,
-        * LEDA: Link + transmit/receive events
-        * LEDB: Link State + colision events
+        * LEDA: Link State + collision events
+        * LEDB: Link State + transmit/receive events
         */
-       encx24j600_update_reg(priv, EIDLED, 0xbc00, 0xbc00);
+       encx24j600_update_reg(priv, EIDLED, 0xff00, 0xcb00);
 
        /* Loopback disabled */
        encx24j600_write_reg(priv, MACON1, 0x9);
index 2d1b9427407982b43673e96a085c7a9ff69e0a53..9ba975853ec6c712f0463d24a48a837d9c7b0152 100644 (file)
@@ -5389,8 +5389,6 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
        strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
        strlcpy(info->version, s2io_driver_version, sizeof(info->version));
        strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
-       info->regdump_len = XENA_REG_SPACE;
-       info->eedump_len = XENA_EEPROM_SPACE;
 }
 
 /**
index be916eb2f2e7304dbbfa35d8df22b618298ca3bc..9a2967016c18aa15f307e46c1542c7c1804e3217 100644 (file)
@@ -105,10 +105,6 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev,
        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
        strlcpy(info->fw_version, vdev->fw_version, sizeof(info->fw_version));
        strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info));
-       info->regdump_len = sizeof(struct vxge_hw_vpath_reg)
-                               * vdev->no_of_vpath;
-
-       info->n_stats = STAT_LEN;
 }
 
 /**
index 66fd868152e579a4ba3483fd4726f0e9d9662436..b159ef8303cc3e65d1e374367d19ca590d934901 100644 (file)
@@ -476,13 +476,12 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
        mac[5] = tmp >> 8;
 }
 
-static void __lpc_eth_clock_enable(struct netdata_local *pldat,
-                                  bool enable)
+static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable)
 {
        if (enable)
-               clk_enable(pldat->clk);
+               clk_prepare_enable(pldat->clk);
        else
-               clk_disable(pldat->clk);
+               clk_disable_unprepare(pldat->clk);
 }
 
 static void __lpc_params_setup(struct netdata_local *pldat)
@@ -1494,7 +1493,7 @@ err_out_free_irq:
 err_out_iounmap:
        iounmap(pldat->net_base);
 err_out_disable_clocks:
-       clk_disable(pldat->clk);
+       clk_disable_unprepare(pldat->clk);
        clk_put(pldat->clk);
 err_out_free_dev:
        free_netdev(ndev);
@@ -1519,7 +1518,7 @@ static int lpc_eth_drv_remove(struct platform_device *pdev)
        iounmap(pldat->net_base);
        mdiobus_unregister(pldat->mii_bus);
        mdiobus_free(pldat->mii_bus);
-       clk_disable(pldat->clk);
+       clk_disable_unprepare(pldat->clk);
        clk_put(pldat->clk);
        free_netdev(ndev);
 
@@ -1540,7 +1539,7 @@ static int lpc_eth_drv_suspend(struct platform_device *pdev,
                if (netif_running(ndev)) {
                        netif_device_detach(ndev);
                        __lpc_eth_shutdown(pldat);
-                       clk_disable(pldat->clk);
+                       clk_disable_unprepare(pldat->clk);
 
                        /*
                         * Reset again now clock is disable to be sure
index 7bf9c028d8d7fea824859142d81307d87e056fdd..c177c7cec13b462b80a2001a9d8a272b606710b1 100644 (file)
@@ -1344,10 +1344,6 @@ static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
        strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
        strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
-       info->n_stats = 0;
-       info->testinfo_len = 0;
-       info->regdump_len = 0;
-       info->eedump_len = 0;
 }
 
 static int octeon_mgmt_get_settings(struct net_device *netdev,
index f6fcf7450352631ad34f7c052fbe5ac960238297..b19be7c6c1f41efe0d7210493ec572d2812066e0 100644 (file)
@@ -164,7 +164,6 @@ static void pch_gbe_get_drvinfo(struct net_device *netdev,
        strlcpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version));
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
-       drvinfo->regdump_len = pch_gbe_get_regs_len(netdev);
 }
 
 /**
index f1f0108c275d7017fff5a5ad39eefeb9beddeb70..30a6f246dfc9f004ac68f4b2cc84658186dce9e6 100644 (file)
@@ -91,4 +91,15 @@ config NETXEN_NIC
        ---help---
          This enables the support for NetXen's Gigabit Ethernet card.
 
+config QED
+       tristate "QLogic QED 25/40/100Gb core driver"
+       depends on PCI
+       ---help---
+         This enables the support for ...
+
+config QEDE
+       tristate "QLogic QED 25/40/100Gb Ethernet NIC"
+       depends on QED
+       ---help---
+         This enables the support for ...
 endif # NET_VENDOR_QLOGIC
index b2a283d9ae60b0a767aaa0353394a494fe40d9d3..cee90e05beb8fec1ee856787833802cb996a9916 100644 (file)
@@ -6,3 +6,5 @@ obj-$(CONFIG_QLA3XXX) += qla3xxx.o
 obj-$(CONFIG_QLCNIC) += qlcnic/
 obj-$(CONFIG_QLGE) += qlge/
 obj-$(CONFIG_NETXEN_NIC) += netxen/
+obj-$(CONFIG_QED) += qed/
+obj-$(CONFIG_QEDE)+= qede/
index 87e073c6e291609793106bfc5646c4695b9698d2..f9034467736c3bb578b97661eebdee37d71054b7 100644 (file)
@@ -93,8 +93,6 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
 
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
-       drvinfo->regdump_len = NETXEN_NIC_REGS_LEN;
-       drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev);
 }
 
 static int
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
new file mode 100644 (file)
index 0000000..5c2fd57
--- /dev/null
@@ -0,0 +1,4 @@
+obj-$(CONFIG_QED) := qed.o
+
+qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
+        qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
new file mode 100644 (file)
index 0000000..ac17d86
--- /dev/null
@@ -0,0 +1,496 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_H
+#define _QED_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/zlib.h>
+#include <linux/hashtable.h>
+#include <linux/qed/qed_if.h>
+#include "qed_hsi.h"
+
+extern const struct qed_common_ops qed_common_ops_pass;
+#define DRV_MODULE_VERSION "8.4.0.0"
+
+#define MAX_HWFNS_PER_DEVICE    (4)
+#define NAME_SIZE 16
+#define VER_SIZE 16
+
+/* cau states */
+enum qed_coalescing_mode {
+       QED_COAL_MODE_DISABLE,
+       QED_COAL_MODE_ENABLE
+};
+
+struct qed_eth_cb_ops;
+struct qed_dev_info;
+
+/* helpers */
+static inline u32 qed_db_addr(u32 cid, u32 DEMS)
+{
+       u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+                     FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
+
+       return db_addr;
+}
+
+#define ALIGNED_TYPE_SIZE(type_name, p_hwfn)                                \
+       ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
+        ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
+
+#define for_each_hwfn(cdev, i)  for (i = 0; i < cdev->num_hwfns; i++)
+
+#define D_TRINE(val, cond1, cond2, true1, true2, def) \
+       (val == (cond1) ? true1 :                     \
+        (val == (cond2) ? true2 : def))
+
+/* forward */
+struct qed_ptt_pool;
+struct qed_spq;
+struct qed_sb_info;
+struct qed_sb_attn_info;
+struct qed_cxt_mngr;
+struct qed_sb_sp_info;
+struct qed_mcp_info;
+
+struct qed_rt_data {
+       u32 init_val;
+       bool b_valid;
+};
+
+/* The PCI personality is not quite synonymous to protocol ID:
+ * 1. All personalities need CORE connections
+ * 2. The Ethernet personality may support also the RoCE protocol
+ */
+enum qed_pci_personality {
+       QED_PCI_ETH,
+       QED_PCI_DEFAULT /* default in shmem */
+};
+
+/* All VFs are symmetric, all counters are PF + all VFs */
+struct qed_qm_iids {
+       u32 cids;
+       u32 vf_cids;
+       u32 tids;
+};
+
+enum QED_RESOURCES {
+       QED_SB,
+       QED_L2_QUEUE,
+       QED_VPORT,
+       QED_RSS_ENG,
+       QED_PQ,
+       QED_RL,
+       QED_MAC,
+       QED_VLAN,
+       QED_ILT,
+       QED_MAX_RESC,
+};
+
+enum QED_FEATURE {
+       QED_PF_L2_QUE,
+       QED_MAX_FEATURES,
+};
+
+enum QED_PORT_MODE {
+       QED_PORT_MODE_DE_2X40G,
+       QED_PORT_MODE_DE_2X50G,
+       QED_PORT_MODE_DE_1X100G,
+       QED_PORT_MODE_DE_4X10G_F,
+       QED_PORT_MODE_DE_4X10G_E,
+       QED_PORT_MODE_DE_4X20G,
+       QED_PORT_MODE_DE_1X40G,
+       QED_PORT_MODE_DE_2X25G,
+       QED_PORT_MODE_DE_1X25G
+};
+
+struct qed_hw_info {
+       /* PCI personality */
+       enum qed_pci_personality        personality;
+
+       /* Resource Allocation scheme results */
+       u32                             resc_start[QED_MAX_RESC];
+       u32                             resc_num[QED_MAX_RESC];
+       u32                             feat_num[QED_MAX_FEATURES];
+
+#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
+#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
+#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
+
+       u8                              num_tc;
+       u8                              offload_tc;
+       u8                              non_offload_tc;
+
+       u32                             concrete_fid;
+       u16                             opaque_fid;
+       u16                             ovlan;
+       u32                             part_num[4];
+
+       u32                             vendor_id;
+       u32                             device_id;
+
+       unsigned char                   hw_mac_addr[ETH_ALEN];
+
+       struct qed_igu_info             *p_igu_info;
+
+       u32                             port_mode;
+       u32                             hw_mode;
+};
+
+struct qed_hw_cid_data {
+       u32     cid;
+       bool    b_cid_allocated;
+
+       /* Additional identifiers */
+       u16     opaque_fid;
+       u8      vport_id;
+};
+
+/* maximun size of read/write commands (HW limit) */
+#define DMAE_MAX_RW_SIZE        0x2000
+
+struct qed_dmae_info {
+       /* Mutex for synchronizing access to functions */
+       struct mutex    mutex;
+
+       u8              channel;
+
+       dma_addr_t      completion_word_phys_addr;
+
+       /* The memory location where the DMAE writes the completion
+        * value when an operation is finished on this context.
+        */
+       u32             *p_completion_word;
+
+       dma_addr_t      intermediate_buffer_phys_addr;
+
+       /* An intermediate buffer for DMAE operations that use virtual
+        * addresses - data is DMA'd to/from this buffer and then
+        * memcpy'd to/from the virtual address
+        */
+       u32             *p_intermediate_buffer;
+
+       dma_addr_t      dmae_cmd_phys_addr;
+       struct dmae_cmd *p_dmae_cmd;
+};
+
+struct qed_qm_info {
+       struct init_qm_pq_params        *qm_pq_params;
+       struct init_qm_vport_params     *qm_vport_params;
+       struct init_qm_port_params      *qm_port_params;
+       u16                             start_pq;
+       u8                              start_vport;
+       u8                              pure_lb_pq;
+       u8                              offload_pq;
+       u8                              pure_ack_pq;
+       u8                              vf_queues_offset;
+       u16                             num_pqs;
+       u16                             num_vf_pqs;
+       u8                              num_vports;
+       u8                              max_phys_tcs_per_port;
+       bool                            pf_rl_en;
+       bool                            pf_wfq_en;
+       bool                            vport_rl_en;
+       bool                            vport_wfq_en;
+       u8                              pf_wfq;
+       u32                             pf_rl;
+};
+
+struct storm_stats {
+       u32     address;
+       u32     len;
+};
+
+struct qed_storm_stats {
+       struct storm_stats mstats;
+       struct storm_stats pstats;
+       struct storm_stats tstats;
+       struct storm_stats ustats;
+};
+
+struct qed_fw_data {
+       struct fw_ver_info      *fw_ver_info;
+       const u8                *modes_tree_buf;
+       union init_op           *init_ops;
+       const u32               *arr_data;
+       u32                     init_ops_size;
+};
+
+struct qed_simd_fp_handler {
+       void    *token;
+       void    (*func)(void *);
+};
+
+struct qed_hwfn {
+       struct qed_dev                  *cdev;
+       u8                              my_id;          /* ID inside the PF */
+#define IS_LEAD_HWFN(edev)              (!((edev)->my_id))
+       u8                              rel_pf_id;      /* Relative to engine*/
+       u8                              abs_pf_id;
+#define QED_PATH_ID(_p_hwfn)           ((_p_hwfn)->abs_pf_id & 1)
+       u8                              port_id;
+       bool                            b_active;
+
+       u32                             dp_module;
+       u8                              dp_level;
+       char                            name[NAME_SIZE];
+
+       bool                            first_on_engine;
+       bool                            hw_init_done;
+
+       /* BAR access */
+       void __iomem                    *regview;
+       void __iomem                    *doorbells;
+       u64                             db_phys_addr;
+       unsigned long                   db_size;
+
+       /* PTT pool */
+       struct qed_ptt_pool             *p_ptt_pool;
+
+       /* HW info */
+       struct qed_hw_info              hw_info;
+
+       /* rt_array (for init-tool) */
+       struct qed_rt_data              *rt_data;
+
+       /* SPQ */
+       struct qed_spq                  *p_spq;
+
+       /* EQ */
+       struct qed_eq                   *p_eq;
+
+       /* Consolidate Q*/
+       struct qed_consq                *p_consq;
+
+       /* Slow-Path definitions */
+       struct tasklet_struct           *sp_dpc;
+       bool                            b_sp_dpc_enabled;
+
+       struct qed_ptt                  *p_main_ptt;
+       struct qed_ptt                  *p_dpc_ptt;
+
+       struct qed_sb_sp_info           *p_sp_sb;
+       struct qed_sb_attn_info         *p_sb_attn;
+
+       /* Protocol related */
+       struct qed_pf_params            pf_params;
+
+       /* Array of sb_info of all status blocks */
+       struct qed_sb_info              *sbs_info[MAX_SB_PER_PF_MIMD];
+       u16                             num_sbs;
+
+       struct qed_cxt_mngr             *p_cxt_mngr;
+
+       /* Flag indicating whether interrupts are enabled or not*/
+       bool                            b_int_enabled;
+
+       struct qed_mcp_info             *mcp_info;
+
+       struct qed_hw_cid_data          *p_tx_cids;
+       struct qed_hw_cid_data          *p_rx_cids;
+
+       struct qed_dmae_info            dmae_info;
+
+       /* QM init */
+       struct qed_qm_info              qm_info;
+       struct qed_storm_stats          storm_stats;
+
+       /* Buffer for unzipping firmware data */
+       void                            *unzip_buf;
+
+       struct qed_simd_fp_handler      simd_proto_handler[64];
+
+       struct z_stream_s               *stream;
+};
+
+struct pci_params {
+       int             pm_cap;
+
+       unsigned long   mem_start;
+       unsigned long   mem_end;
+       unsigned int    irq;
+       u8              pf_num;
+};
+
+struct qed_int_param {
+       u32     int_mode;
+       u8      num_vectors;
+       u8      min_msix_cnt; /* for minimal functionality */
+};
+
+struct qed_int_params {
+       struct qed_int_param    in;
+       struct qed_int_param    out;
+       struct msix_entry       *msix_table;
+       bool                    fp_initialized;
+       u8                      fp_msix_base;
+       u8                      fp_msix_cnt;
+};
+
+struct qed_dev {
+       u32     dp_module;
+       u8      dp_level;
+       char    name[NAME_SIZE];
+
+       u8      type;
+#define QED_DEV_TYPE_BB_A0      (0 << 0)
+#define QED_DEV_TYPE_MASK       (0x3)
+#define QED_DEV_TYPE_SHIFT      (0)
+
+       u16     chip_num;
+#define CHIP_NUM_MASK                   0xffff
+#define CHIP_NUM_SHIFT                  16
+
+       u16     chip_rev;
+#define CHIP_REV_MASK                   0xf
+#define CHIP_REV_SHIFT                  12
+
+       u16                             chip_metal;
+#define CHIP_METAL_MASK                 0xff
+#define CHIP_METAL_SHIFT                4
+
+       u16                             chip_bond_id;
+#define CHIP_BOND_ID_MASK               0xf
+#define CHIP_BOND_ID_SHIFT              0
+
+       u8                              num_engines;
+       u8                              num_ports_in_engines;
+       u8                              num_funcs_in_port;
+
+       u8                              path_id;
+       enum mf_mode                    mf_mode;
+#define IS_MF(_p_hwfn)          (((_p_hwfn)->cdev)->mf_mode != SF)
+#define IS_MF_SI(_p_hwfn)       (((_p_hwfn)->cdev)->mf_mode == MF_NPAR)
+#define IS_MF_SD(_p_hwfn)       (((_p_hwfn)->cdev)->mf_mode == MF_OVLAN)
+
+       int                             pcie_width;
+       int                             pcie_speed;
+       u8                              ver_str[VER_SIZE];
+
+       /* Add MF related configuration */
+       u8                              mcp_rev;
+       u8                              boot_mode;
+
+       u8                              wol;
+
+       u32                             int_mode;
+       enum qed_coalescing_mode        int_coalescing_mode;
+       u8                              rx_coalesce_usecs;
+       u8                              tx_coalesce_usecs;
+
+       /* Start Bar offset of first hwfn */
+       void __iomem                    *regview;
+       void __iomem                    *doorbells;
+       u64                             db_phys_addr;
+       unsigned long                   db_size;
+
+       /* PCI */
+       u8                              cache_shift;
+
+       /* Init */
+       const struct iro                *iro_arr;
+#define IRO (p_hwfn->cdev->iro_arr)
+
+       /* HW functions */
+       u8                              num_hwfns;
+       struct qed_hwfn                 hwfns[MAX_HWFNS_PER_DEVICE];
+
+       u32                             drv_type;
+
+       struct qed_eth_stats            *reset_stats;
+       struct qed_fw_data              *fw_data;
+
+       u32                             mcp_nvm_resp;
+
+       /* Linux specific here */
+       struct  qede_dev                *edev;
+       struct  pci_dev                 *pdev;
+       int                             msg_enable;
+
+       struct pci_params               pci_params;
+
+       struct qed_int_params           int_params;
+
+       u8                              protocol;
+#define IS_QED_ETH_IF(cdev)     ((cdev)->protocol == QED_PROTOCOL_ETH)
+
+       /* Callbacks to protocol driver */
+       union {
+               struct qed_common_cb_ops        *common;
+               struct qed_eth_cb_ops           *eth;
+       } protocol_ops;
+       void                            *ops_cookie;
+
+       const struct firmware           *firmware;
+};
+
+#define QED_GET_TYPE(dev)       (((dev)->type & QED_DEV_TYPE_MASK) >> \
+                                QED_DEV_TYPE_SHIFT)
+#define QED_IS_BB_A0(dev)       (QED_GET_TYPE(dev) == QED_DEV_TYPE_BB_A0)
+#define QED_IS_BB(dev)  (QED_IS_BB_A0(dev))
+
+#define NUM_OF_SBS(dev)         MAX_SB_PER_PATH_BB
+#define NUM_OF_ENG_PFS(dev)     MAX_NUM_PFS_BB
+
+/**
+ * @brief qed_concrete_to_sw_fid - get the sw function id from
+ *        the concrete value.
+ *
+ * @param concrete_fid
+ *
+ * @return inline u8
+ */
+static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
+                                       u32 concrete_fid)
+{
+       u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+
+       return pfid;
+}
+
+#define PURE_LB_TC 8
+
+#define QED_LEADING_HWFN(dev)   (&dev->hwfns[0])
+
+/* Other Linux specific common definitions */
+#define DP_NAME(cdev) ((cdev)->name)
+
+#define REG_ADDR(cdev, offset)          (void __iomem *)((u8 __iomem *)\
+                                               (cdev->regview) + \
+                                                        (offset))
+
+#define REG_RD(cdev, offset)            readl(REG_ADDR(cdev, offset))
+#define REG_WR(cdev, offset, val)       writel((u32)val, REG_ADDR(cdev, offset))
+#define REG_WR16(cdev, offset, val)     writew((u16)val, REG_ADDR(cdev, offset))
+
+#define DOORBELL(cdev, db_addr, val)                    \
+       writel((u32)val, (void __iomem *)((u8 __iomem *)\
+                                         (cdev->doorbells) + (db_addr)))
+
+/* Prototypes */
+int qed_fill_dev_info(struct qed_dev *cdev,
+                     struct qed_dev_info *dev_info);
+void qed_link_update(struct qed_hwfn *hwfn);
+u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
+                  u32 input_len, u8 *input_buf,
+                  u32 max_size, u8 *unzip_buf);
+
+#define QED_ETH_INTERFACE_VERSION       300
+
+#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
new file mode 100644 (file)
index 0000000..7ccdb46
--- /dev/null
@@ -0,0 +1,847 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+/* Max number of connection types in HW (DQ/CDU etc.) */
+#define MAX_CONN_TYPES         PROTOCOLID_COMMON
+#define NUM_TASK_TYPES         2
+#define NUM_TASK_PF_SEGMENTS   4
+
+/* QM constants */
+#define QM_PQ_ELEMENT_SIZE     4 /* in bytes */
+
+/* Doorbell-Queue constants */
+#define DQ_RANGE_SHIFT         4
+#define DQ_RANGE_ALIGN         BIT(DQ_RANGE_SHIFT)
+
+/* ILT constants */
+#define ILT_DEFAULT_HW_P_SIZE          3
+#define ILT_PAGE_IN_BYTES(hw_p_size)   (1U << ((hw_p_size) + 12))
+#define ILT_CFG_REG(cli, reg)  PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
+
+/* ILT entry structure */
+#define ILT_ENTRY_PHY_ADDR_MASK                0x000FFFFFFFFFFFULL
+#define ILT_ENTRY_PHY_ADDR_SHIFT       0
+#define ILT_ENTRY_VALID_MASK           0x1ULL
+#define ILT_ENTRY_VALID_SHIFT          52
+#define ILT_ENTRY_IN_REGS              2
+#define ILT_REG_SIZE_IN_BYTES          4
+
+/* connection context union */
+union conn_context {
+       struct core_conn_context core_ctx;
+       struct eth_conn_context eth_ctx;
+};
+
+#define CONN_CXT_SIZE(p_hwfn) \
+       ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
+
+/* PF per protocl configuration object */
+struct qed_conn_type_cfg {
+       u32 cid_count;
+       u32 cid_start;
+};
+
+/* ILT Client configuration, Per connection type (protocol) resources. */
+#define ILT_CLI_PF_BLOCKS      (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define CDUC_BLK               (0)
+
+enum ilt_clients {
+       ILT_CLI_CDUC,
+       ILT_CLI_QM,
+       ILT_CLI_MAX
+};
+
+struct ilt_cfg_pair {
+       u32 reg;
+       u32 val;
+};
+
+struct qed_ilt_cli_blk {
+       u32 total_size; /* 0 means not active */
+       u32 real_size_in_page;
+       u32 start_line;
+};
+
+struct qed_ilt_client_cfg {
+       bool active;
+
+       /* ILT boundaries */
+       struct ilt_cfg_pair first;
+       struct ilt_cfg_pair last;
+       struct ilt_cfg_pair p_size;
+
+       /* ILT client blocks for PF */
+       struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
+       u32 pf_total_lines;
+};
+
+/* Per Path -
+ *      ILT shadow table
+ *      Protocol acquired CID lists
+ *      PF start line in ILT
+ */
+struct qed_dma_mem {
+       dma_addr_t p_phys;
+       void *p_virt;
+       size_t size;
+};
+
+struct qed_cid_acquired_map {
+       u32             start_cid;
+       u32             max_count;
+       unsigned long   *cid_map;
+};
+
+struct qed_cxt_mngr {
+       /* Per protocl configuration */
+       struct qed_conn_type_cfg        conn_cfg[MAX_CONN_TYPES];
+
+       /* computed ILT structure */
+       struct qed_ilt_client_cfg       clients[ILT_CLI_MAX];
+
+       /* Acquired CIDs */
+       struct qed_cid_acquired_map     acquired[MAX_CONN_TYPES];
+
+       /* ILT  shadow table */
+       struct qed_dma_mem              *ilt_shadow;
+       u32                             pf_start_line;
+};
+
+static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr)
+{
+       u32 type, pf_cids = 0;
+
+       for (type = 0; type < MAX_CONN_TYPES; type++)
+               pf_cids += p_mngr->conn_cfg[type].cid_count;
+
+       return pf_cids;
+}
+
+static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
+                           struct qed_qm_iids *iids)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       int type;
+
+       for (type = 0; type < MAX_CONN_TYPES; type++)
+               iids->cids += p_mngr->conn_cfg[type].cid_count;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids);
+}
+
+/* set the iids count per protocol */
+static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
+                                       enum protocol_type type,
+                                       u32 cid_count)
+{
+       struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+       struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
+
+       p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
+}
+
+static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
+                                struct qed_ilt_cli_blk *p_blk,
+                                u32 start_line, u32 total_size,
+                                u32 elem_size)
+{
+       u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+       /* verify thatits called only once for each block */
+       if (p_blk->total_size)
+               return;
+
+       p_blk->total_size = total_size;
+       p_blk->real_size_in_page = 0;
+       if (elem_size)
+               p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
+       p_blk->start_line = start_line;
+}
+
+static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
+                                struct qed_ilt_client_cfg *p_cli,
+                                struct qed_ilt_cli_blk *p_blk,
+                                u32 *p_line, enum ilt_clients client_id)
+{
+       if (!p_blk->total_size)
+               return;
+
+       if (!p_cli->active)
+               p_cli->first.val = *p_line;
+
+       p_cli->active = true;
+       *p_line += DIV_ROUND_UP(p_blk->total_size,
+                               p_blk->real_size_in_page);
+       p_cli->last.val = *p_line - 1;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+                  "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
+                  client_id, p_cli->first.val,
+                  p_cli->last.val, p_blk->total_size,
+                  p_blk->real_size_in_page, p_blk->start_line);
+}
+
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct qed_ilt_client_cfg *p_cli;
+       struct qed_ilt_cli_blk *p_blk;
+       u32 curr_line, total, pf_cids;
+       struct qed_qm_iids qm_iids;
+
+       memset(&qm_iids, 0, sizeof(qm_iids));
+
+       p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+                  "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
+                  p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
+
+       /* CDUC */
+       p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+       curr_line = p_mngr->pf_start_line;
+       p_cli->pf_total_lines = 0;
+
+       /* get the counters for the CDUC and QM clients  */
+       pf_cids = qed_cxt_cdu_iids(p_mngr);
+
+       p_blk = &p_cli->pf_blks[CDUC_BLK];
+
+       total = pf_cids * CONN_CXT_SIZE(p_hwfn);
+
+       qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+                            total, CONN_CXT_SIZE(p_hwfn));
+
+       qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+       p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+       /* QM */
+       p_cli = &p_mngr->clients[ILT_CLI_QM];
+       p_blk = &p_cli->pf_blks[0];
+
+       qed_cxt_qm_iids(p_hwfn, &qm_iids);
+       total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0,
+                                  p_hwfn->qm_info.num_pqs, 0);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+                  "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n",
+                  qm_iids.cids, p_hwfn->qm_info.num_pqs, total);
+
+       qed_ilt_cli_blk_fill(p_cli, p_blk,
+                            curr_line, total * 0x1000,
+                            QM_PQ_ELEMENT_SIZE);
+
+       qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
+       p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+       if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
+           RESC_NUM(p_hwfn, QED_ILT)) {
+               DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
+                      curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+#define for_each_ilt_valid_client(pos, clients)        \
+               for (pos = 0; pos < ILT_CLI_MAX; pos++)
+
+/* Total number of ILT lines used by this PF */
+static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
+{
+       u32 size = 0;
+       u32 i;
+
+       for_each_ilt_valid_client(i, ilt_clients) {
+               if (!ilt_clients[i].active)
+                       continue;
+               size += (ilt_clients[i].last.val -
+                        ilt_clients[i].first.val + 1);
+       }
+
+       return size;
+}
+
+static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 ilt_size, i;
+
+       ilt_size = qed_cxt_ilt_shadow_size(p_cli);
+
+       for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
+               struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
+
+               if (p_dma->p_virt)
+                       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                         p_dma->size, p_dma->p_virt,
+                                         p_dma->p_phys);
+               p_dma->p_virt = NULL;
+       }
+       kfree(p_mngr->ilt_shadow);
+}
+
+static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
+                            struct qed_ilt_cli_blk *p_blk,
+                            enum ilt_clients ilt_client,
+                            u32 start_line_offset)
+{
+       struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
+       u32 lines, line, sz_left;
+
+       if (!p_blk->total_size)
+               return 0;
+
+       sz_left = p_blk->total_size;
+       lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page);
+       line = p_blk->start_line + start_line_offset -
+              p_hwfn->p_cxt_mngr->pf_start_line;
+
+       for (; lines; lines--) {
+               dma_addr_t p_phys;
+               void *p_virt;
+               u32 size;
+
+               size = min_t(u32, sz_left,
+                            p_blk->real_size_in_page);
+               p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                           size,
+                                           &p_phys,
+                                           GFP_KERNEL);
+               if (!p_virt)
+                       return -ENOMEM;
+               memset(p_virt, 0, size);
+
+               ilt_shadow[line].p_phys = p_phys;
+               ilt_shadow[line].p_virt = p_virt;
+               ilt_shadow[line].size = size;
+
+               DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+                          "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
+                           line, (u64)p_phys, p_virt, size);
+
+               sz_left -= size;
+               line++;
+       }
+
+       return 0;
+}
+
+static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct qed_ilt_client_cfg *clients = p_mngr->clients;
+       struct qed_ilt_cli_blk *p_blk;
+       u32 size, i, j;
+       int rc;
+
+       size = qed_cxt_ilt_shadow_size(clients);
+       p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
+                                    GFP_KERNEL);
+       if (!p_mngr->ilt_shadow) {
+               DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
+               rc = -ENOMEM;
+               goto ilt_shadow_fail;
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+                  "Allocated 0x%x bytes for ilt shadow\n",
+                  (u32)(size * sizeof(struct qed_dma_mem)));
+
+       for_each_ilt_valid_client(i, clients) {
+               if (!clients[i].active)
+                       continue;
+               for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
+                       p_blk = &clients[i].pf_blks[j];
+                       rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
+                       if (rc != 0)
+                               goto ilt_shadow_fail;
+               }
+       }
+
+       return 0;
+
+ilt_shadow_fail:
+       qed_ilt_shadow_free(p_hwfn);
+       return rc;
+}
+
+static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 type;
+
+       for (type = 0; type < MAX_CONN_TYPES; type++) {
+               kfree(p_mngr->acquired[type].cid_map);
+               p_mngr->acquired[type].max_count = 0;
+               p_mngr->acquired[type].start_cid = 0;
+       }
+}
+
+static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 start_cid = 0;
+       u32 type;
+
+       for (type = 0; type < MAX_CONN_TYPES; type++) {
+               u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+               u32 size;
+
+               if (cid_cnt == 0)
+                       continue;
+
+               size = DIV_ROUND_UP(cid_cnt,
+                                   sizeof(unsigned long) * BITS_PER_BYTE) *
+                      sizeof(unsigned long);
+               p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
+               if (!p_mngr->acquired[type].cid_map)
+                       goto cid_map_fail;
+
+               p_mngr->acquired[type].max_count = cid_cnt;
+               p_mngr->acquired[type].start_cid = start_cid;
+
+               p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
+
+               DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+                          "Type %08x start: %08x count %08x\n",
+                          type, p_mngr->acquired[type].start_cid,
+                          p_mngr->acquired[type].max_count);
+               start_cid += cid_cnt;
+       }
+
+       return 0;
+
+cid_map_fail:
+       qed_cid_map_free(p_hwfn);
+       return -ENOMEM;
+}
+
+int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mngr;
+       u32 i;
+
+       p_mngr = kzalloc(sizeof(*p_mngr), GFP_ATOMIC);
+       if (!p_mngr) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
+               return -ENOMEM;
+       }
+
+       /* Initialize ILT client registers */
+       p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
+       p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
+       p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
+
+       p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
+       p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
+       p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
+
+       /* default ILT page size for all clients is 32K */
+       for (i = 0; i < ILT_CLI_MAX; i++)
+               p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+
+       /* Set the cxt mangr pointer priori to further allocations */
+       p_hwfn->p_cxt_mngr = p_mngr;
+
+       return 0;
+}
+
+int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
+{
+       int rc;
+
+       /* Allocate the ILT shadow table */
+       rc = qed_ilt_shadow_alloc(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
+               goto tables_alloc_fail;
+       }
+
+       /* Allocate and initialize the acquired cids bitmaps */
+       rc = qed_cid_map_alloc(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
+               goto tables_alloc_fail;
+       }
+
+       return 0;
+
+tables_alloc_fail:
+       qed_cxt_mngr_free(p_hwfn);
+       return rc;
+}
+
+void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
+{
+       if (!p_hwfn->p_cxt_mngr)
+               return;
+
+       qed_cid_map_free(p_hwfn);
+       qed_ilt_shadow_free(p_hwfn);
+       kfree(p_hwfn->p_cxt_mngr);
+
+       p_hwfn->p_cxt_mngr = NULL;
+}
+
+void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       int type;
+
+       /* Reset acquired cids */
+       for (type = 0; type < MAX_CONN_TYPES; type++) {
+               u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+
+               if (cid_cnt == 0)
+                       continue;
+
+               memset(p_mngr->acquired[type].cid_map, 0,
+                      DIV_ROUND_UP(cid_cnt,
+                                   sizeof(unsigned long) * BITS_PER_BYTE) *
+                      sizeof(unsigned long));
+       }
+}
+
+/* CDU Common */
+#define CDUC_CXT_SIZE_SHIFT \
+       CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
+
+#define CDUC_CXT_SIZE_MASK \
+       (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
+
+#define CDUC_BLOCK_WASTE_SHIFT \
+       CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
+
+#define CDUC_BLOCK_WASTE_MASK \
+       (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
+
+#define CDUC_NCIB_SHIFT        \
+       CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
+
+#define CDUC_NCIB_MASK \
+       (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
+
+static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
+{
+       u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
+
+       /* CDUC - connection configuration */
+       page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+       cxt_size = CONN_CXT_SIZE(p_hwfn);
+       elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+       block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+       SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
+       SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
+       SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
+       STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
+}
+
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_pf_rt_init_params params;
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       struct qed_qm_iids iids;
+
+       memset(&iids, 0, sizeof(iids));
+       qed_cxt_qm_iids(p_hwfn, &iids);
+
+       memset(&params, 0, sizeof(params));
+       params.port_id = p_hwfn->port_id;
+       params.pf_id = p_hwfn->rel_pf_id;
+       params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+       params.is_first_pf = p_hwfn->first_on_engine;
+       params.num_pf_cids = iids.cids;
+       params.start_pq = qm_info->start_pq;
+       params.num_pf_pqs = qm_info->num_pqs;
+       params.start_vport = qm_info->num_vports;
+       params.pf_wfq = qm_info->pf_wfq;
+       params.pf_rl = qm_info->pf_rl;
+       params.pq_params = qm_info->qm_pq_params;
+       params.vport_params = qm_info->qm_vport_params;
+
+       qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
+}
+
+/* CM PF */
+static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
+{
+       union qed_qm_pq_params pq_params;
+       u16 pq;
+
+       /* XCM pure-LB queue */
+       memset(&pq_params, 0, sizeof(pq_params));
+       pq_params.core.tc = LB_TC;
+       pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+       STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
+
+       return 0;
+}
+
+/* DQ PF */
+static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 dq_pf_max_cid = 0;
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
+
+       /* 5 - PF */
+       dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
+}
+
+static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ilt_client_cfg *ilt_clients;
+       int i;
+
+       ilt_clients = p_hwfn->p_cxt_mngr->clients;
+       for_each_ilt_valid_client(i, ilt_clients) {
+               if (!ilt_clients[i].active)
+                       continue;
+               STORE_RT_REG(p_hwfn,
+                            ilt_clients[i].first.reg,
+                            ilt_clients[i].first.val);
+               STORE_RT_REG(p_hwfn,
+                            ilt_clients[i].last.reg,
+                            ilt_clients[i].last.val);
+               STORE_RT_REG(p_hwfn,
+                            ilt_clients[i].p_size.reg,
+                            ilt_clients[i].p_size.val);
+       }
+}
+
+/* ILT (PSWRQ2) PF */
+static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ilt_client_cfg *clients;
+       struct qed_cxt_mngr *p_mngr;
+       struct qed_dma_mem *p_shdw;
+       u32 line, rt_offst, i;
+
+       qed_ilt_bounds_init(p_hwfn);
+
+       p_mngr = p_hwfn->p_cxt_mngr;
+       p_shdw = p_mngr->ilt_shadow;
+       clients = p_hwfn->p_cxt_mngr->clients;
+
+       for_each_ilt_valid_client(i, clients) {
+               if (!clients[i].active)
+                       continue;
+
+               /** Client's 1st val and RT array are absolute, ILT shadows'
+                *  lines are relative.
+                */
+               line = clients[i].first.val - p_mngr->pf_start_line;
+               rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
+                          clients[i].first.val * ILT_ENTRY_IN_REGS;
+
+               for (; line <= clients[i].last.val - p_mngr->pf_start_line;
+                    line++, rt_offst += ILT_ENTRY_IN_REGS) {
+                       u64 ilt_hw_entry = 0;
+
+                       /** p_virt could be NULL incase of dynamic
+                        *  allocation
+                        */
+                       if (p_shdw[line].p_virt) {
+                               SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+                               SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
+                                         (p_shdw[line].p_phys >> 12));
+
+                               DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+                                          "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
+                                          rt_offst, line, i,
+                                          (u64)(p_shdw[line].p_phys >> 12));
+                       }
+
+                       STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
+               }
+       }
+}
+
+void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
+{
+       qed_cdu_init_common(p_hwfn);
+}
+
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
+{
+       qed_qm_init_pf(p_hwfn);
+       qed_cm_init_pf(p_hwfn);
+       qed_dq_init_pf(p_hwfn);
+       qed_ilt_init_pf(p_hwfn);
+}
+
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+                       enum protocol_type type,
+                       u32 *p_cid)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 rel_cid;
+
+       if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
+               DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
+               return -EINVAL;
+       }
+
+       rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
+                                     p_mngr->acquired[type].max_count);
+
+       if (rel_cid >= p_mngr->acquired[type].max_count) {
+               DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
+                         type);
+               return -EINVAL;
+       }
+
+       __set_bit(rel_cid, p_mngr->acquired[type].cid_map);
+
+       *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
+
+       return 0;
+}
+
+static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
+                                     u32 cid,
+                                     enum protocol_type *p_type)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct qed_cid_acquired_map *p_map;
+       enum protocol_type p;
+       u32 rel_cid;
+
+       /* Iterate over protocols and find matching cid range */
+       for (p = 0; p < MAX_CONN_TYPES; p++) {
+               p_map = &p_mngr->acquired[p];
+
+               if (!p_map->cid_map)
+                       continue;
+               if (cid >= p_map->start_cid &&
+                   cid < p_map->start_cid + p_map->max_count)
+                       break;
+       }
+       *p_type = p;
+
+       if (p == MAX_CONN_TYPES) {
+               DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
+               return false;
+       }
+
+       rel_cid = cid - p_map->start_cid;
+       if (!test_bit(rel_cid, p_map->cid_map)) {
+               DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
+               return false;
+       }
+       return true;
+}
+
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
+                        u32 cid)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       enum protocol_type type;
+       bool b_acquired;
+       u32 rel_cid;
+
+       /* Test acquired and find matching per-protocol map */
+       b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
+
+       if (!b_acquired)
+               return;
+
+       rel_cid = cid - p_mngr->acquired[type].start_cid;
+       __clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
+}
+
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
+                        struct qed_cxt_info *p_info)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
+       enum protocol_type type;
+       bool b_acquired;
+
+       /* Test acquired and find matching per-protocol map */
+       b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
+
+       if (!b_acquired)
+               return -EINVAL;
+
+       /* set the protocl type */
+       p_info->type = type;
+
+       /* compute context virtual pointer */
+       hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+
+       conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
+       cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
+       line = p_info->iid / cxts_per_p;
+
+       /* Make sure context is allocated (dynamic allocation) */
+       if (!p_mngr->ilt_shadow[line].p_virt)
+               return -EINVAL;
+
+       p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
+                       p_info->iid % cxts_per_p * conn_cxt_size;
+
+       DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
+                  "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
+                  p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
+
+       return 0;
+}
+
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
+{
+       struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params;
+
+       /* Set the number of required CORE connections */
+       u32 core_cids = 1; /* SPQ */
+
+       qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids);
+
+       qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+                                   p_params->num_cons);
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
new file mode 100644 (file)
index 0000000..c8e1f5e
--- /dev/null
@@ -0,0 +1,139 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_CXT_H
+#define _QED_CXT_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_if.h>
+#include "qed_hsi.h"
+#include "qed.h"
+
+struct qed_cxt_info {
+       void                    *p_cxt;
+       u32                     iid;
+       enum protocol_type      type;
+};
+
+/**
+ * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ *
+ * @return int
+ */
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+                       enum protocol_type type,
+                       u32 *p_cid);
+
+/**
+ * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid
+ *
+ *
+ * @param p_hwfn
+ * @param p_info in/out
+ *
+ * @return int
+ */
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
+                        struct qed_cxt_info *p_info);
+
+enum qed_cxt_elem_type {
+       QED_ELEM_CXT,
+       QED_ELEM_TASK
+};
+
+/**
+ * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_free
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_setup - Reset the acquired CIDs
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path.
+ *
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
+ *
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_qm_init_pf - Initailze the QM PF phase, per path
+ *
+ * @param p_hwfn
+ */
+
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_release - Release a cid
+ *
+ * @param p_hwfn
+ * @param cid
+ */
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
+                        u32 cid);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
new file mode 100644 (file)
index 0000000..b9b7b7e
--- /dev/null
@@ -0,0 +1,1797 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/etherdevice.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+/* API common to all protocols */
+void qed_init_dp(struct qed_dev *cdev,
+                u32 dp_module, u8 dp_level)
+{
+       u32 i;
+
+       cdev->dp_level = dp_level;
+       cdev->dp_module = dp_module;
+       for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               p_hwfn->dp_level = dp_level;
+               p_hwfn->dp_module = dp_module;
+       }
+}
+
+void qed_init_struct(struct qed_dev *cdev)
+{
+       u8 i;
+
+       for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               p_hwfn->cdev = cdev;
+               p_hwfn->my_id = i;
+               p_hwfn->b_active = false;
+
+               mutex_init(&p_hwfn->dmae_info.mutex);
+       }
+
+       /* hwfn 0 is always active */
+       cdev->hwfns[0].b_active = true;
+
+       /* set the default cache alignment to 128 */
+       cdev->cache_shift = 7;
+}
+
+static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+       kfree(qm_info->qm_pq_params);
+       qm_info->qm_pq_params = NULL;
+       kfree(qm_info->qm_vport_params);
+       qm_info->qm_vport_params = NULL;
+       kfree(qm_info->qm_port_params);
+       qm_info->qm_port_params = NULL;
+}
+
+void qed_resc_free(struct qed_dev *cdev)
+{
+       int i;
+
+       kfree(cdev->fw_data);
+       cdev->fw_data = NULL;
+
+       kfree(cdev->reset_stats);
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               kfree(p_hwfn->p_tx_cids);
+               p_hwfn->p_tx_cids = NULL;
+               kfree(p_hwfn->p_rx_cids);
+               p_hwfn->p_rx_cids = NULL;
+       }
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               qed_cxt_mngr_free(p_hwfn);
+               qed_qm_info_free(p_hwfn);
+               qed_spq_free(p_hwfn);
+               qed_eq_free(p_hwfn, p_hwfn->p_eq);
+               qed_consq_free(p_hwfn, p_hwfn->p_consq);
+               qed_int_free(p_hwfn);
+               qed_dmae_info_free(p_hwfn);
+       }
+}
+
+static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       struct init_qm_port_params *p_qm_port;
+       u8 num_vports, i, vport_id, num_ports;
+       u16 num_pqs, multi_cos_tcs = 1;
+
+       memset(qm_info, 0, sizeof(*qm_info));
+
+       num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
+       num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
+
+       /* Sanity checking that setup requires legal number of resources */
+       if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
+               DP_ERR(p_hwfn,
+                      "Need too many Physical queues - 0x%04x when only %04x are available\n",
+                      num_pqs, RESC_NUM(p_hwfn, QED_PQ));
+               return -EINVAL;
+       }
+
+       /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
+        */
+       qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
+                                       num_pqs, GFP_ATOMIC);
+       if (!qm_info->qm_pq_params)
+               goto alloc_err;
+
+       qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
+                                          num_vports, GFP_ATOMIC);
+       if (!qm_info->qm_vport_params)
+               goto alloc_err;
+
+       qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
+                                         MAX_NUM_PORTS, GFP_ATOMIC);
+       if (!qm_info->qm_port_params)
+               goto alloc_err;
+
+       vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+
+       /* First init per-TC PQs */
+       for (i = 0; i < multi_cos_tcs; i++) {
+               struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
+
+               params->vport_id = vport_id;
+               params->tc_id = p_hwfn->hw_info.non_offload_tc;
+               params->wrr_group = 1;
+       }
+
+       /* Then init pure-LB PQ */
+       qm_info->pure_lb_pq = i;
+       qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+       qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
+       qm_info->qm_pq_params[i].wrr_group = 1;
+       i++;
+
+       qm_info->offload_pq = 0;
+       qm_info->num_pqs = num_pqs;
+       qm_info->num_vports = num_vports;
+
+       /* Initialize qm port parameters */
+       num_ports = p_hwfn->cdev->num_ports_in_engines;
+       for (i = 0; i < num_ports; i++) {
+               p_qm_port = &qm_info->qm_port_params[i];
+               p_qm_port->active = 1;
+               p_qm_port->num_active_phys_tcs = 4;
+               p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
+               p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
+       }
+
+       qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+
+       qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
+
+       qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
+
+       qm_info->pf_wfq = 0;
+       qm_info->pf_rl = 0;
+       qm_info->vport_rl_en = 1;
+
+       return 0;
+
+alloc_err:
+       DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
+       kfree(qm_info->qm_pq_params);
+       kfree(qm_info->qm_vport_params);
+       kfree(qm_info->qm_port_params);
+
+       return -ENOMEM;
+}
+
+int qed_resc_alloc(struct qed_dev *cdev)
+{
+       struct qed_consq *p_consq;
+       struct qed_eq *p_eq;
+       int i, rc = 0;
+
+       cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
+       if (!cdev->fw_data)
+               return -ENOMEM;
+
+       /* Allocate Memory for the Queue->CID mapping */
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               int tx_size = sizeof(struct qed_hw_cid_data) *
+                                    RESC_NUM(p_hwfn, QED_L2_QUEUE);
+               int rx_size = sizeof(struct qed_hw_cid_data) *
+                                    RESC_NUM(p_hwfn, QED_L2_QUEUE);
+
+               p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
+               if (!p_hwfn->p_tx_cids) {
+                       DP_NOTICE(p_hwfn,
+                                 "Failed to allocate memory for Tx Cids\n");
+                       goto alloc_err;
+               }
+
+               p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
+               if (!p_hwfn->p_rx_cids) {
+                       DP_NOTICE(p_hwfn,
+                                 "Failed to allocate memory for Rx Cids\n");
+                       goto alloc_err;
+               }
+       }
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               /* First allocate the context manager structure */
+               rc = qed_cxt_mngr_alloc(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* Set the HW cid/tid numbers (in the contest manager)
+                * Must be done prior to any further computations.
+                */
+               rc = qed_cxt_set_pf_params(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* Prepare and process QM requirements */
+               rc = qed_init_qm_info(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* Compute the ILT client partition */
+               rc = qed_cxt_cfg_ilt_compute(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* CID map / ILT shadow table / T2
+                * The talbes sizes are determined by the computations above
+                */
+               rc = qed_cxt_tables_alloc(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* SPQ, must follow ILT because initializes SPQ context */
+               rc = qed_spq_alloc(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* SP status block allocation */
+               p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
+                                                        RESERVED_PTT_DPC);
+
+               rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
+               if (rc)
+                       goto alloc_err;
+
+               /* EQ */
+               p_eq = qed_eq_alloc(p_hwfn, 256);
+
+               if (!p_eq)
+                       goto alloc_err;
+               p_hwfn->p_eq = p_eq;
+
+               p_consq = qed_consq_alloc(p_hwfn);
+               if (!p_consq)
+                       goto alloc_err;
+               p_hwfn->p_consq = p_consq;
+
+               /* DMA info initialization */
+               rc = qed_dmae_info_alloc(p_hwfn);
+               if (rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "Failed to allocate memory for dmae_info structure\n");
+                       goto alloc_err;
+               }
+       }
+
+       cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
+       if (!cdev->reset_stats) {
+               DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
+               goto alloc_err;
+       }
+
+       return 0;
+
+alloc_err:
+       qed_resc_free(cdev);
+       return rc;
+}
+
+void qed_resc_setup(struct qed_dev *cdev)
+{
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               qed_cxt_mngr_setup(p_hwfn);
+               qed_spq_setup(p_hwfn);
+               qed_eq_setup(p_hwfn, p_hwfn->p_eq);
+               qed_consq_setup(p_hwfn, p_hwfn->p_consq);
+
+               /* Read shadow of current MFW mailbox */
+               qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
+               memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
+                      p_hwfn->mcp_info->mfw_mb_cur,
+                      p_hwfn->mcp_info->mfw_mb_length);
+
+               qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+       }
+}
+
+#define FINAL_CLEANUP_CMD_OFFSET        (0)
+#define FINAL_CLEANUP_CMD (0x1)
+#define FINAL_CLEANUP_VALID_OFFSET      (6)
+#define FINAL_CLEANUP_VFPF_ID_SHIFT     (7)
+#define FINAL_CLEANUP_COMP (0x2)
+#define FINAL_CLEANUP_POLL_CNT          (100)
+#define FINAL_CLEANUP_POLL_TIME         (10)
+int qed_final_cleanup(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     u16 id)
+{
+       u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
+       int rc = -EBUSY;
+
+       addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET;
+
+       command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET;
+       command |= 1 << FINAL_CLEANUP_VALID_OFFSET;
+       command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT;
+       command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT;
+
+       /* Make sure notification is not set before initiating final cleanup */
+       if (REG_RD(p_hwfn, addr)) {
+               DP_NOTICE(
+                       p_hwfn,
+                       "Unexpected; Found final cleanup notification before initiating final cleanup\n");
+               REG_WR(p_hwfn, addr, 0);
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                  "Sending final cleanup for PFVF[%d] [Command %08x\n]",
+                  id, command);
+
+       qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
+
+       /* Poll until completion */
+       while (!REG_RD(p_hwfn, addr) && count--)
+               msleep(FINAL_CLEANUP_POLL_TIME);
+
+       if (REG_RD(p_hwfn, addr))
+               rc = 0;
+       else
+               DP_NOTICE(p_hwfn,
+                         "Failed to receive FW final cleanup notification\n");
+
+       /* Cleanup afterwards */
+       REG_WR(p_hwfn, addr, 0);
+
+       return rc;
+}
+
+static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
+{
+       int hw_mode = 0;
+
+       hw_mode = (1 << MODE_BB_A0);
+
+       switch (p_hwfn->cdev->num_ports_in_engines) {
+       case 1:
+               hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
+               break;
+       case 2:
+               hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
+               break;
+       case 4:
+               hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
+                         p_hwfn->cdev->num_ports_in_engines);
+               return;
+       }
+
+       switch (p_hwfn->cdev->mf_mode) {
+       case SF:
+               hw_mode |= 1 << MODE_SF;
+               break;
+       case MF_OVLAN:
+               hw_mode |= 1 << MODE_MF_SD;
+               break;
+       case MF_NPAR:
+               hw_mode |= 1 << MODE_MF_SI;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n");
+               hw_mode |= 1 << MODE_SF;
+       }
+
+       hw_mode |= 1 << MODE_ASIC;
+
+       p_hwfn->hw_info.hw_mode = hw_mode;
+}
+
+/* Init run time data for all PFs on an engine. */
+static void qed_init_cau_rt_data(struct qed_dev *cdev)
+{
+       u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
+       int i, sb_id;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               struct qed_igu_info *p_igu_info;
+               struct qed_igu_block *p_block;
+               struct cau_sb_entry sb_entry;
+
+               p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+               for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
+                    sb_id++) {
+                       p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+                       if (!p_block->is_pf)
+                               continue;
+
+                       qed_init_cau_sb_entry(p_hwfn, &sb_entry,
+                                             p_block->function_id,
+                                             0, 0);
+                       STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
+                                        sb_entry);
+               }
+       }
+}
+
+static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt,
+                             int hw_mode)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       struct qed_qm_common_rt_init_params params;
+       struct qed_dev *cdev = p_hwfn->cdev;
+       int rc = 0;
+
+       qed_init_cau_rt_data(cdev);
+
+       /* Program GTT windows */
+       qed_gtt_init(p_hwfn);
+
+       if (p_hwfn->mcp_info) {
+               if (p_hwfn->mcp_info->func_info.bandwidth_max)
+                       qm_info->pf_rl_en = 1;
+               if (p_hwfn->mcp_info->func_info.bandwidth_min)
+                       qm_info->pf_wfq_en = 1;
+       }
+
+       memset(&params, 0, sizeof(params));
+       params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
+       params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+       params.pf_rl_en = qm_info->pf_rl_en;
+       params.pf_wfq_en = qm_info->pf_wfq_en;
+       params.vport_rl_en = qm_info->vport_rl_en;
+       params.vport_wfq_en = qm_info->vport_wfq_en;
+       params.port_params = qm_info->qm_port_params;
+
+       qed_qm_common_rt_init(p_hwfn, &params);
+
+       qed_cxt_hw_init_common(p_hwfn);
+
+       /* Close gate from NIG to BRB/Storm; By default they are open, but
+        * we close them to prevent NIG from passing data to reset blocks.
+        * Should have been done in the ENGINE phase, but init-tool lacks
+        * proper port-pretend capabilities.
+        */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+       qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+       qed_port_unpretend(p_hwfn, p_ptt);
+
+       rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
+       if (rc != 0)
+               return rc;
+
+       qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
+       qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
+
+       /* Disable relaxed ordering in the PCI config space */
+       qed_wr(p_hwfn, p_ptt, 0x20b4,
+              qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
+
+       return rc;
+}
+
+static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           int hw_mode)
+{
+       int rc = 0;
+
+       rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
+                         hw_mode);
+       return rc;
+}
+
+static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt,
+                         int hw_mode,
+                         bool b_hw_start,
+                         enum qed_int_mode int_mode,
+                         bool allow_npar_tx_switch)
+{
+       u8 rel_pf_id = p_hwfn->rel_pf_id;
+       int rc = 0;
+
+       if (p_hwfn->mcp_info) {
+               struct qed_mcp_function_info *p_info;
+
+               p_info = &p_hwfn->mcp_info->func_info;
+               if (p_info->bandwidth_min)
+                       p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
+
+               /* Update rate limit once we'll actually have a link */
+               p_hwfn->qm_info.pf_rl = 100;
+       }
+
+       qed_cxt_hw_init_pf(p_hwfn);
+
+       qed_int_igu_init_rt(p_hwfn);
+
+       /* Set VLAN in NIG if needed */
+       if (hw_mode & (1 << MODE_MF_SD)) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
+               STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
+               STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
+                            p_hwfn->hw_info.ovlan);
+       }
+
+       /* Enable classification by MAC if needed */
+       if (hw_mode & MODE_MF_SI) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                          "Configuring TAGMAC_CLS_TYPE\n");
+               STORE_RT_REG(p_hwfn,
+                            NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
+       }
+
+       /* Protocl Configuration  */
+       STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
+       STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
+       STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
+
+       /* Cleanup chip from previous driver if such remains exist */
+       rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
+       if (rc != 0)
+               return rc;
+
+       /* PF Init sequence */
+       rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
+       if (rc)
+               return rc;
+
+       /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
+       rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
+       if (rc)
+               return rc;
+
+       /* Pure runtime initializations - directly to the HW  */
+       qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
+
+       if (b_hw_start) {
+               /* enable interrupts */
+               qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
+
+               /* send function start command */
+               rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
+               if (rc)
+                       DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
+       }
+       return rc;
+}
+
+static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
+                              struct qed_ptt *p_ptt,
+                              u8 enable)
+{
+       u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
+
+       /* Change PF in PXP */
+       qed_wr(p_hwfn, p_ptt,
+              PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
+
+       /* wait until value is set - try for 1 second every 50us */
+       for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
+               val = qed_rd(p_hwfn, p_ptt,
+                            PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+               if (val == set_val)
+                       break;
+
+               usleep_range(50, 60);
+       }
+
+       if (val != set_val) {
+               DP_NOTICE(p_hwfn,
+                         "PFID_ENABLE_MASTER wasn't changed after a second\n");
+               return -EAGAIN;
+       }
+
+       return 0;
+}
+
+static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
+                               struct qed_ptt *p_main_ptt)
+{
+       /* Read shadow of current MFW mailbox */
+       qed_mcp_read_mb(p_hwfn, p_main_ptt);
+       memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
+              p_hwfn->mcp_info->mfw_mb_cur,
+              p_hwfn->mcp_info->mfw_mb_length);
+}
+
+int qed_hw_init(struct qed_dev *cdev,
+               bool b_hw_start,
+               enum qed_int_mode int_mode,
+               bool allow_npar_tx_switch,
+               const u8 *bin_fw_data)
+{
+       struct qed_storm_stats *p_stat;
+       u32 load_code, param, *p_address;
+       int rc, mfw_rc, i;
+       u8 fw_vport = 0;
+
+       rc = qed_init_fw_data(cdev, bin_fw_data);
+       if (rc != 0)
+               return rc;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               rc = qed_fw_vport(p_hwfn, 0, &fw_vport);
+               if (rc != 0)
+                       return rc;
+
+               /* Enable DMAE in PXP */
+               rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
+
+               qed_calc_hw_mode(p_hwfn);
+
+               rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+                                     &load_code);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
+                       return rc;
+               }
+
+               qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
+
+               DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                          "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
+                          rc, load_code);
+
+               p_hwfn->first_on_engine = (load_code ==
+                                          FW_MSG_CODE_DRV_LOAD_ENGINE);
+
+               switch (load_code) {
+               case FW_MSG_CODE_DRV_LOAD_ENGINE:
+                       rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
+                                               p_hwfn->hw_info.hw_mode);
+                       if (rc)
+                               break;
+               /* Fall into */
+               case FW_MSG_CODE_DRV_LOAD_PORT:
+                       rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
+                                             p_hwfn->hw_info.hw_mode);
+                       if (rc)
+                               break;
+
+               /* Fall into */
+               case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+                       rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
+                                           p_hwfn->hw_info.hw_mode,
+                                           b_hw_start, int_mode,
+                                           allow_npar_tx_switch);
+                       break;
+               default:
+                       rc = -EINVAL;
+                       break;
+               }
+
+               if (rc)
+                       DP_NOTICE(p_hwfn,
+                                 "init phase failed for loadcode 0x%x (rc %d)\n",
+                                  load_code, rc);
+
+               /* ACK mfw regardless of success or failure of initialization */
+               mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+                                    DRV_MSG_CODE_LOAD_DONE,
+                                    0, &load_code, &param);
+               if (rc)
+                       return rc;
+               if (mfw_rc) {
+                       DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
+                       return mfw_rc;
+               }
+
+               p_hwfn->hw_init_done = true;
+
+               /* init PF stats */
+               p_stat = &p_hwfn->storm_stats;
+               p_stat->mstats.address = BAR0_MAP_REG_MSDM_RAM +
+                                        MSTORM_QUEUE_STAT_OFFSET(fw_vport);
+               p_stat->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+
+               p_stat->ustats.address = BAR0_MAP_REG_USDM_RAM +
+                                        USTORM_QUEUE_STAT_OFFSET(fw_vport);
+               p_stat->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+
+               p_stat->pstats.address = BAR0_MAP_REG_PSDM_RAM +
+                                        PSTORM_QUEUE_STAT_OFFSET(fw_vport);
+               p_stat->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+
+               p_address = &p_stat->tstats.address;
+               *p_address = BAR0_MAP_REG_TSDM_RAM +
+                            TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+               p_stat->tstats.len = sizeof(struct tstorm_per_port_stat);
+       }
+
+       return 0;
+}
+
+#define QED_HW_STOP_RETRY_LIMIT (10)
+int qed_hw_stop(struct qed_dev *cdev)
+{
+       int rc = 0, t_rc;
+       int i, j;
+
+       for_each_hwfn(cdev, j) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+               struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+               DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
+
+               /* mark the hw as uninitialized... */
+               p_hwfn->hw_init_done = false;
+
+               rc = qed_sp_pf_stop(p_hwfn);
+               if (rc)
+                       return rc;
+
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+               qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+               qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+               for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
+                       if ((!qed_rd(p_hwfn, p_ptt,
+                                    TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+                           (!qed_rd(p_hwfn, p_ptt,
+                                    TM_REG_PF_SCAN_ACTIVE_TASK)))
+                               break;
+
+                       usleep_range(1000, 2000);
+               }
+               if (i == QED_HW_STOP_RETRY_LIMIT)
+                       DP_NOTICE(p_hwfn,
+                                 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
+                                 (u8)qed_rd(p_hwfn, p_ptt,
+                                            TM_REG_PF_SCAN_ACTIVE_CONN),
+                                 (u8)qed_rd(p_hwfn, p_ptt,
+                                            TM_REG_PF_SCAN_ACTIVE_TASK));
+
+               /* Disable Attention Generation */
+               qed_int_igu_disable_int(p_hwfn, p_ptt);
+
+               qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
+               qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
+
+               qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
+
+               /* Need to wait 1ms to guarantee SBs are cleared */
+               usleep_range(1000, 2000);
+       }
+
+       /* Disable DMAE in PXP - in CMT, this should only be done for
+        * first hw-function, and only after all transactions have
+        * stopped for all active hw-functions.
+        */
+       t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
+                                  cdev->hwfns[0].p_main_ptt,
+                                  false);
+       if (t_rc != 0)
+               rc = t_rc;
+
+       return rc;
+}
+
+void qed_hw_stop_fastpath(struct qed_dev *cdev)
+{
+       int i, j;
+
+       for_each_hwfn(cdev, j) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+               struct qed_ptt *p_ptt   = p_hwfn->p_main_ptt;
+
+               DP_VERBOSE(p_hwfn,
+                          NETIF_MSG_IFDOWN,
+                          "Shutting down the fastpath\n");
+
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+               qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+               qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+               qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+               for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
+                       if ((!qed_rd(p_hwfn, p_ptt,
+                                    TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+                           (!qed_rd(p_hwfn, p_ptt,
+                                    TM_REG_PF_SCAN_ACTIVE_TASK)))
+                               break;
+
+                       usleep_range(1000, 2000);
+               }
+               if (i == QED_HW_STOP_RETRY_LIMIT)
+                       DP_NOTICE(p_hwfn,
+                                 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
+                                 (u8)qed_rd(p_hwfn, p_ptt,
+                                            TM_REG_PF_SCAN_ACTIVE_CONN),
+                                 (u8)qed_rd(p_hwfn, p_ptt,
+                                            TM_REG_PF_SCAN_ACTIVE_TASK));
+
+               qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
+
+               /* Need to wait 1ms to guarantee SBs are cleared */
+               usleep_range(1000, 2000);
+       }
+}
+
+void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
+{
+       /* Re-open incoming traffic */
+       qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+              NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+}
+
+static int qed_reg_assert(struct qed_hwfn *hwfn,
+                         struct qed_ptt *ptt, u32 reg,
+                         bool expected)
+{
+       u32 assert_val = qed_rd(hwfn, ptt, reg);
+
+       if (assert_val != expected) {
+               DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
+                         reg, expected);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int qed_hw_reset(struct qed_dev *cdev)
+{
+       int rc = 0;
+       u32 unload_resp, unload_param;
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
+
+               /* Check for incorrect states */
+               qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+                              QM_REG_USG_CNT_PF_TX, 0);
+               qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+                              QM_REG_USG_CNT_PF_OTHER, 0);
+
+               /* Disable PF in HW blocks */
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      TCFC_REG_STRONG_ENABLE_PF, 0);
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      CCFC_REG_STRONG_ENABLE_PF, 0);
+
+               /* Send unload command to MCP */
+               rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+                                DRV_MSG_CODE_UNLOAD_REQ,
+                                DRV_MB_PARAM_UNLOAD_WOL_MCP,
+                                &unload_resp, &unload_param);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
+                       unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
+               }
+
+               rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+                                DRV_MSG_CODE_UNLOAD_DONE,
+                                0, &unload_resp, &unload_param);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
+                       return rc;
+               }
+       }
+
+       return rc;
+}
+
+/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
+static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
+{
+       qed_ptt_pool_free(p_hwfn);
+       kfree(p_hwfn->hw_info.p_igu_info);
+}
+
+/* Setup bar access */
+static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
+{
+       int rc;
+
+       /* Allocate PTT pool */
+       rc = qed_ptt_pool_alloc(p_hwfn);
+       if (rc)
+               return rc;
+
+       /* Allocate the main PTT */
+       p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
+
+       /* clear indirect access */
+       qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
+       qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
+       qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
+       qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+
+       /* Clean Previous errors if such exist */
+       qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+              PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+              1 << p_hwfn->abs_pf_id);
+
+       /* enable internal target-read */
+       qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+              PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+       return 0;
+}
+
+static void get_function_id(struct qed_hwfn *p_hwfn)
+{
+       /* ME Register */
+       p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
+
+       p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
+
+       p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
+       p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+                                     PXP_CONCRETE_FID_PFID);
+       p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+                                   PXP_CONCRETE_FID_PORT);
+}
+
+static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
+{
+       u32 *feat_num = p_hwfn->hw_info.feat_num;
+       int num_features = 1;
+
+       feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
+                                               num_features,
+                                       RESC_NUM(p_hwfn, QED_L2_QUEUE));
+       DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+                  "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
+                  feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
+                  num_features);
+}
+
+static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
+{
+       u32 *resc_start = p_hwfn->hw_info.resc_start;
+       u32 *resc_num = p_hwfn->hw_info.resc_num;
+       int num_funcs, i;
+
+       num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB
+                                 : p_hwfn->cdev->num_ports_in_engines;
+
+       resc_num[QED_SB] = min_t(u32,
+                                (MAX_SB_PER_PATH_BB / num_funcs),
+                                qed_int_get_num_sbs(p_hwfn, NULL));
+       resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
+       resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
+       resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
+       resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
+       resc_num[QED_RL] = 8;
+       resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
+       resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
+                            num_funcs;
+       resc_num[QED_ILT] = 950;
+
+       for (i = 0; i < QED_MAX_RESC; i++)
+               resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
+
+       qed_hw_set_feat(p_hwfn);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+                  "The numbers for each resource are:\n"
+                  "SB = %d start = %d\n"
+                  "L2_QUEUE = %d start = %d\n"
+                  "VPORT = %d start = %d\n"
+                  "PQ = %d start = %d\n"
+                  "RL = %d start = %d\n"
+                  "MAC = %d start = %d\n"
+                  "VLAN = %d start = %d\n"
+                  "ILT = %d start = %d\n",
+                  p_hwfn->hw_info.resc_num[QED_SB],
+                  p_hwfn->hw_info.resc_start[QED_SB],
+                  p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
+                  p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
+                  p_hwfn->hw_info.resc_num[QED_VPORT],
+                  p_hwfn->hw_info.resc_start[QED_VPORT],
+                  p_hwfn->hw_info.resc_num[QED_PQ],
+                  p_hwfn->hw_info.resc_start[QED_PQ],
+                  p_hwfn->hw_info.resc_num[QED_RL],
+                  p_hwfn->hw_info.resc_start[QED_RL],
+                  p_hwfn->hw_info.resc_num[QED_MAC],
+                  p_hwfn->hw_info.resc_start[QED_MAC],
+                  p_hwfn->hw_info.resc_num[QED_VLAN],
+                  p_hwfn->hw_info.resc_start[QED_VLAN],
+                  p_hwfn->hw_info.resc_num[QED_ILT],
+                  p_hwfn->hw_info.resc_start[QED_ILT]);
+}
+
+static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
+                              struct qed_ptt *p_ptt)
+{
+       u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
+       u32 port_cfg_addr, link_temp, val, nvm_cfg_addr;
+       struct qed_mcp_link_params *link;
+
+       /* Read global nvm_cfg address */
+       nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+
+       /* Verify MCP has initialized it */
+       if (!nvm_cfg_addr) {
+               DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
+               return -EINVAL;
+       }
+
+       /* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
+       nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+
+       /* Read Vendor Id / Device Id */
+       addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+              offsetof(struct nvm_cfg1, glob) +
+              offsetof(struct nvm_cfg1_glob, pci_id);
+       p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) &
+                                   NVM_CFG1_GLOB_VENDOR_ID_MASK;
+
+       addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+              offsetof(struct nvm_cfg1, glob) +
+              offsetof(struct nvm_cfg1_glob, core_cfg);
+
+       core_cfg = qed_rd(p_hwfn, p_ptt, addr);
+
+       switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
+               NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
+                         core_cfg);
+               break;
+       }
+
+       addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+              offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) +
+              offsetof(struct nvm_cfg1_func, device_id);
+       val = qed_rd(p_hwfn, p_ptt, addr);
+
+       if (IS_MF(p_hwfn)) {
+               p_hwfn->hw_info.device_id =
+                       (val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >>
+                       NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET;
+       } else {
+               p_hwfn->hw_info.device_id =
+                       (val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >>
+                       NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET;
+       }
+
+       /* Read default link configuration */
+       link = &p_hwfn->mcp_info->link_input;
+       port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+                       offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+       link_temp = qed_rd(p_hwfn, p_ptt,
+                          port_cfg_addr +
+                          offsetof(struct nvm_cfg1_port, speed_cap_mask));
+       link->speed.advertised_speeds =
+               link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+
+       p_hwfn->mcp_info->link_capabilities.speed_capabilities =
+                                               link->speed.advertised_speeds;
+
+       link_temp = qed_rd(p_hwfn, p_ptt,
+                          port_cfg_addr +
+                          offsetof(struct nvm_cfg1_port, link_settings));
+       switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
+               NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
+               link->speed.autoneg = true;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
+               link->speed.forced_speed = 1000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
+               link->speed.forced_speed = 10000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
+               link->speed.forced_speed = 25000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
+               link->speed.forced_speed = 40000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
+               link->speed.forced_speed = 50000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
+               link->speed.forced_speed = 100000;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
+                         link_temp);
+       }
+
+       link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
+       link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
+       link->pause.autoneg = !!(link_temp &
+                                NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
+       link->pause.forced_rx = !!(link_temp &
+                                  NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
+       link->pause.forced_tx = !!(link_temp &
+                                  NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
+       link->loopback_mode = 0;
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                  "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
+                  link->speed.forced_speed, link->speed.advertised_speeds,
+                  link->speed.autoneg, link->pause.autoneg);
+
+       /* Read Multi-function information from shmem */
+       addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+              offsetof(struct nvm_cfg1, glob) +
+              offsetof(struct nvm_cfg1_glob, generic_cont0);
+
+       generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
+
+       mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
+                 NVM_CFG1_GLOB_MF_MODE_OFFSET;
+
+       switch (mf_mode) {
+       case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+               p_hwfn->cdev->mf_mode = MF_OVLAN;
+               break;
+       case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+               p_hwfn->cdev->mf_mode = MF_NPAR;
+               break;
+       case NVM_CFG1_GLOB_MF_MODE_FORCED_SF:
+               p_hwfn->cdev->mf_mode = SF;
+               break;
+       }
+       DP_INFO(p_hwfn, "Multi function mode is %08x\n",
+               p_hwfn->cdev->mf_mode);
+
+       return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+}
+
+static int
+qed_get_hw_info(struct qed_hwfn *p_hwfn,
+               struct qed_ptt *p_ptt,
+               enum qed_pci_personality personality)
+{
+       u32 port_mode;
+       int rc;
+
+       /* Read the port mode */
+       port_mode = qed_rd(p_hwfn, p_ptt,
+                          CNIG_REG_NW_PORT_MODE_BB_B0);
+
+       if (port_mode < 3) {
+               p_hwfn->cdev->num_ports_in_engines = 1;
+       } else if (port_mode <= 5) {
+               p_hwfn->cdev->num_ports_in_engines = 2;
+       } else {
+               DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
+                         p_hwfn->cdev->num_ports_in_engines);
+
+               /* Default num_ports_in_engines to something */
+               p_hwfn->cdev->num_ports_in_engines = 1;
+       }
+
+       qed_hw_get_nvm_info(p_hwfn, p_ptt);
+
+       rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
+       if (rc)
+               return rc;
+
+       if (qed_mcp_is_init(p_hwfn))
+               ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
+                               p_hwfn->mcp_info->func_info.mac);
+       else
+               eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
+
+       if (qed_mcp_is_init(p_hwfn)) {
+               if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
+                       p_hwfn->hw_info.ovlan =
+                               p_hwfn->mcp_info->func_info.ovlan;
+
+               qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+       }
+
+       if (qed_mcp_is_init(p_hwfn)) {
+               enum qed_pci_personality protocol;
+
+               protocol = p_hwfn->mcp_info->func_info.protocol;
+               p_hwfn->hw_info.personality = protocol;
+       }
+
+       qed_hw_get_resc(p_hwfn);
+
+       return rc;
+}
+
+static void qed_get_dev_info(struct qed_dev *cdev)
+{
+       u32 tmp;
+
+       cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+                                    MISCS_REG_CHIP_NUM);
+       cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+                                    MISCS_REG_CHIP_REV);
+       MASK_FIELD(CHIP_REV, cdev->chip_rev);
+
+       /* Learn number of HW-functions */
+       tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+                    MISCS_REG_CMT_ENABLED_FOR_PAIR);
+
+       if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) {
+               DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
+               cdev->num_hwfns = 2;
+       } else {
+               cdev->num_hwfns = 1;
+       }
+
+       cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+                                   MISCS_REG_CHIP_TEST_REG) >> 4;
+       MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
+       cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+                                      MISCS_REG_CHIP_METAL);
+       MASK_FIELD(CHIP_METAL, cdev->chip_metal);
+
+       DP_INFO(cdev->hwfns,
+               "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+               cdev->chip_num, cdev->chip_rev,
+               cdev->chip_bond_id, cdev->chip_metal);
+}
+
+static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
+                                void __iomem *p_regview,
+                                void __iomem *p_doorbells,
+                                enum qed_pci_personality personality)
+{
+       int rc = 0;
+
+       /* Split PCI bars evenly between hwfns */
+       p_hwfn->regview = p_regview;
+       p_hwfn->doorbells = p_doorbells;
+
+       /* Validate that chip access is feasible */
+       if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
+               DP_ERR(p_hwfn,
+                      "Reading the ME register returns all Fs; Preventing further chip access\n");
+               return -EINVAL;
+       }
+
+       get_function_id(p_hwfn);
+
+       rc = qed_hw_hwfn_prepare(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
+               goto err0;
+       }
+
+       /* First hwfn learns basic information, e.g., number of hwfns */
+       if (!p_hwfn->my_id)
+               qed_get_dev_info(p_hwfn->cdev);
+
+       /* Initialize MCP structure */
+       rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
+               goto err1;
+       }
+
+       /* Read the device configuration information from the HW and SHMEM */
+       rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to get HW information\n");
+               goto err2;
+       }
+
+       /* Allocate the init RT array and initialize the init-ops engine */
+       rc = qed_init_alloc(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
+               goto err2;
+       }
+
+       return rc;
+err2:
+       qed_mcp_free(p_hwfn);
+err1:
+       qed_hw_hwfn_free(p_hwfn);
+err0:
+       return rc;
+}
+
+static u32 qed_hw_bar_size(struct qed_dev *cdev,
+                          u8 bar_id)
+{
+       u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0);
+
+       return size / cdev->num_hwfns;
+}
+
+int qed_hw_prepare(struct qed_dev *cdev,
+                  int personality)
+{
+       int rc, i;
+
+       /* Store the precompiled init data ptrs */
+       qed_init_iro_array(cdev);
+
+       /* Initialize the first hwfn - will learn number of hwfns */
+       rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview,
+                                  cdev->doorbells, personality);
+       if (rc)
+               return rc;
+
+       personality = cdev->hwfns[0].hw_info.personality;
+
+       /* Initialize the rest of the hwfns */
+       for (i = 1; i < cdev->num_hwfns; i++) {
+               void __iomem *p_regview, *p_doorbell;
+
+               p_regview =  cdev->regview +
+                            i * qed_hw_bar_size(cdev, 0);
+               p_doorbell = cdev->doorbells +
+                            i * qed_hw_bar_size(cdev, 1);
+               rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview,
+                                          p_doorbell, personality);
+               if (rc) {
+                       /* Cleanup previously initialized hwfns */
+                       while (--i >= 0) {
+                               qed_init_free(&cdev->hwfns[i]);
+                               qed_mcp_free(&cdev->hwfns[i]);
+                               qed_hw_hwfn_free(&cdev->hwfns[i]);
+                       }
+                       return rc;
+               }
+       }
+
+       return 0;
+}
+
+void qed_hw_remove(struct qed_dev *cdev)
+{
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               qed_init_free(p_hwfn);
+               qed_hw_hwfn_free(p_hwfn);
+               qed_mcp_free(p_hwfn);
+       }
+}
+
+int qed_chain_alloc(struct qed_dev *cdev,
+                   enum qed_chain_use_mode intended_use,
+                   enum qed_chain_mode mode,
+                   u16 num_elems,
+                   size_t elem_size,
+                   struct qed_chain *p_chain)
+{
+       dma_addr_t p_pbl_phys = 0;
+       void *p_pbl_virt = NULL;
+       dma_addr_t p_phys = 0;
+       void *p_virt = NULL;
+       u16 page_cnt = 0;
+       size_t size;
+
+       if (mode == QED_CHAIN_MODE_SINGLE)
+               page_cnt = 1;
+       else
+               page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
+
+       size = page_cnt * QED_CHAIN_PAGE_SIZE;
+       p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+                                   size, &p_phys, GFP_KERNEL);
+       if (!p_virt) {
+               DP_NOTICE(cdev, "Failed to allocate chain mem\n");
+               goto nomem;
+       }
+
+       if (mode == QED_CHAIN_MODE_PBL) {
+               size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+               p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
+                                               size, &p_pbl_phys,
+                                               GFP_KERNEL);
+               if (!p_pbl_virt) {
+                       DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
+                       goto nomem;
+               }
+
+               qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
+                                  (u8)elem_size, intended_use,
+                                  p_pbl_phys, p_pbl_virt);
+       } else {
+               qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
+                              (u8)elem_size, intended_use, mode);
+       }
+
+       return 0;
+
+nomem:
+       dma_free_coherent(&cdev->pdev->dev,
+                         page_cnt * QED_CHAIN_PAGE_SIZE,
+                         p_virt, p_phys);
+       dma_free_coherent(&cdev->pdev->dev,
+                         page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
+                         p_pbl_virt, p_pbl_phys);
+
+       return -ENOMEM;
+}
+
+void qed_chain_free(struct qed_dev *cdev,
+                   struct qed_chain *p_chain)
+{
+       size_t size;
+
+       if (!p_chain->p_virt_addr)
+               return;
+
+       if (p_chain->mode == QED_CHAIN_MODE_PBL) {
+               size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+               dma_free_coherent(&cdev->pdev->dev, size,
+                                 p_chain->pbl.p_virt_table,
+                                 p_chain->pbl.p_phys_table);
+       }
+
+       size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
+       dma_free_coherent(&cdev->pdev->dev, size,
+                         p_chain->p_virt_addr,
+                         p_chain->p_phys_addr);
+}
+
+static void __qed_get_vport_stats(struct qed_dev *cdev,
+                                 struct qed_eth_stats  *stats)
+{
+       int i, j;
+
+       memset(stats, 0, sizeof(*stats));
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               struct eth_mstorm_per_queue_stat mstats;
+               struct eth_ustorm_per_queue_stat ustats;
+               struct eth_pstorm_per_queue_stat pstats;
+               struct tstorm_per_port_stat tstats;
+               struct port_stats port_stats;
+               struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+
+               if (!p_ptt) {
+                       DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+                       continue;
+               }
+
+               memset(&mstats, 0, sizeof(mstats));
+               qed_memcpy_from(p_hwfn, p_ptt, &mstats,
+                               p_hwfn->storm_stats.mstats.address,
+                               p_hwfn->storm_stats.mstats.len);
+
+               memset(&ustats, 0, sizeof(ustats));
+               qed_memcpy_from(p_hwfn, p_ptt, &ustats,
+                               p_hwfn->storm_stats.ustats.address,
+                               p_hwfn->storm_stats.ustats.len);
+
+               memset(&pstats, 0, sizeof(pstats));
+               qed_memcpy_from(p_hwfn, p_ptt, &pstats,
+                               p_hwfn->storm_stats.pstats.address,
+                               p_hwfn->storm_stats.pstats.len);
+
+               memset(&tstats, 0, sizeof(tstats));
+               qed_memcpy_from(p_hwfn, p_ptt, &tstats,
+                               p_hwfn->storm_stats.tstats.address,
+                               p_hwfn->storm_stats.tstats.len);
+
+               memset(&port_stats, 0, sizeof(port_stats));
+
+               if (p_hwfn->mcp_info)
+                       qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
+                                       p_hwfn->mcp_info->port_addr +
+                                       offsetof(struct public_port, stats),
+                                       sizeof(port_stats));
+               qed_ptt_release(p_hwfn, p_ptt);
+
+               stats->no_buff_discards +=
+                       HILO_64_REGPAIR(mstats.no_buff_discard);
+               stats->packet_too_big_discard +=
+                       HILO_64_REGPAIR(mstats.packet_too_big_discard);
+               stats->ttl0_discard +=
+                       HILO_64_REGPAIR(mstats.ttl0_discard);
+               stats->tpa_coalesced_pkts +=
+                       HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+               stats->tpa_coalesced_events +=
+                       HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+               stats->tpa_aborts_num +=
+                       HILO_64_REGPAIR(mstats.tpa_aborts_num);
+               stats->tpa_coalesced_bytes +=
+                       HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+
+               stats->rx_ucast_bytes +=
+                       HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+               stats->rx_mcast_bytes +=
+                       HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+               stats->rx_bcast_bytes +=
+                       HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+               stats->rx_ucast_pkts +=
+                       HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+               stats->rx_mcast_pkts +=
+                       HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+               stats->rx_bcast_pkts +=
+                       HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+
+               stats->mftag_filter_discards +=
+                       HILO_64_REGPAIR(tstats.mftag_filter_discard);
+               stats->mac_filter_discards +=
+                       HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+
+               stats->tx_ucast_bytes +=
+                       HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+               stats->tx_mcast_bytes +=
+                       HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+               stats->tx_bcast_bytes +=
+                       HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+               stats->tx_ucast_pkts +=
+                       HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+               stats->tx_mcast_pkts +=
+                       HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+               stats->tx_bcast_pkts +=
+                       HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+               stats->tx_err_drop_pkts +=
+                       HILO_64_REGPAIR(pstats.error_drop_pkts);
+               stats->rx_64_byte_packets       += port_stats.pmm.r64;
+               stats->rx_127_byte_packets      += port_stats.pmm.r127;
+               stats->rx_255_byte_packets      += port_stats.pmm.r255;
+               stats->rx_511_byte_packets      += port_stats.pmm.r511;
+               stats->rx_1023_byte_packets     += port_stats.pmm.r1023;
+               stats->rx_1518_byte_packets     += port_stats.pmm.r1518;
+               stats->rx_1522_byte_packets     += port_stats.pmm.r1522;
+               stats->rx_2047_byte_packets     += port_stats.pmm.r2047;
+               stats->rx_4095_byte_packets     += port_stats.pmm.r4095;
+               stats->rx_9216_byte_packets     += port_stats.pmm.r9216;
+               stats->rx_16383_byte_packets    += port_stats.pmm.r16383;
+               stats->rx_crc_errors        += port_stats.pmm.rfcs;
+               stats->rx_mac_crtl_frames       += port_stats.pmm.rxcf;
+               stats->rx_pause_frames    += port_stats.pmm.rxpf;
+               stats->rx_pfc_frames        += port_stats.pmm.rxpp;
+               stats->rx_align_errors    += port_stats.pmm.raln;
+               stats->rx_carrier_errors        += port_stats.pmm.rfcr;
+               stats->rx_oversize_packets      += port_stats.pmm.rovr;
+               stats->rx_jabbers              += port_stats.pmm.rjbr;
+               stats->rx_undersize_packets     += port_stats.pmm.rund;
+               stats->rx_fragments          += port_stats.pmm.rfrg;
+               stats->tx_64_byte_packets       += port_stats.pmm.t64;
+               stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
+               stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
+               stats->tx_256_to_511_byte_packets  += port_stats.pmm.t511;
+               stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
+               stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
+               stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
+               stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
+               stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
+               stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
+               stats->tx_pause_frames    += port_stats.pmm.txpf;
+               stats->tx_pfc_frames        += port_stats.pmm.txpp;
+               stats->tx_lpi_entry_count       += port_stats.pmm.tlpiec;
+               stats->tx_total_collisions      += port_stats.pmm.tncl;
+               stats->rx_mac_bytes          += port_stats.pmm.rbyte;
+               stats->rx_mac_uc_packets        += port_stats.pmm.rxuca;
+               stats->rx_mac_mc_packets        += port_stats.pmm.rxmca;
+               stats->rx_mac_bc_packets        += port_stats.pmm.rxbca;
+               stats->rx_mac_frames_ok  += port_stats.pmm.rxpok;
+               stats->tx_mac_bytes          += port_stats.pmm.tbyte;
+               stats->tx_mac_uc_packets        += port_stats.pmm.txuca;
+               stats->tx_mac_mc_packets        += port_stats.pmm.txmca;
+               stats->tx_mac_bc_packets        += port_stats.pmm.txbca;
+               stats->tx_mac_ctrl_frames       += port_stats.pmm.txcf;
+
+               for (j = 0; j < 8; j++) {
+                       stats->brb_truncates += port_stats.brb.brb_truncate[j];
+                       stats->brb_discards += port_stats.brb.brb_discard[j];
+               }
+       }
+}
+
+void qed_get_vport_stats(struct qed_dev *cdev,
+                        struct qed_eth_stats *stats)
+{
+       u32 i;
+
+       if (!cdev) {
+               memset(stats, 0, sizeof(*stats));
+               return;
+       }
+
+       __qed_get_vport_stats(cdev, stats);
+
+       if (!cdev->reset_stats)
+               return;
+
+       /* Reduce the statistics baseline */
+       for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
+               ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
+}
+
+/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
+void qed_reset_vport_stats(struct qed_dev *cdev)
+{
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               struct eth_mstorm_per_queue_stat mstats;
+               struct eth_ustorm_per_queue_stat ustats;
+               struct eth_pstorm_per_queue_stat pstats;
+               struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+
+               if (!p_ptt) {
+                       DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+                       continue;
+               }
+
+               memset(&mstats, 0, sizeof(mstats));
+               qed_memcpy_to(p_hwfn, p_ptt,
+                             p_hwfn->storm_stats.mstats.address,
+                             &mstats,
+                             p_hwfn->storm_stats.mstats.len);
+
+               memset(&ustats, 0, sizeof(ustats));
+               qed_memcpy_to(p_hwfn, p_ptt,
+                             p_hwfn->storm_stats.ustats.address,
+                             &ustats,
+                             p_hwfn->storm_stats.ustats.len);
+
+               memset(&pstats, 0, sizeof(pstats));
+               qed_memcpy_to(p_hwfn, p_ptt,
+                             p_hwfn->storm_stats.pstats.address,
+                             &pstats,
+                             p_hwfn->storm_stats.pstats.len);
+
+               qed_ptt_release(p_hwfn, p_ptt);
+       }
+
+       /* PORT statistics are not necessarily reset, so we need to
+        * read and create a baseline for future statistics.
+        */
+       if (!cdev->reset_stats)
+               DP_INFO(cdev, "Reset stats not allocated\n");
+       else
+               __qed_get_vport_stats(cdev, cdev->reset_stats);
+}
+
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
+                   u16 src_id, u16 *dst_id)
+{
+       if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
+               u16 min, max;
+
+               min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+               max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
+               DP_NOTICE(p_hwfn,
+                         "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
+                         src_id, min, max);
+
+               return -EINVAL;
+       }
+
+       *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
+
+       return 0;
+}
+
+int qed_fw_vport(struct qed_hwfn *p_hwfn,
+                u8 src_id, u8 *dst_id)
+{
+       if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
+               u8 min, max;
+
+               min = (u8)RESC_START(p_hwfn, QED_VPORT);
+               max = min + RESC_NUM(p_hwfn, QED_VPORT);
+               DP_NOTICE(p_hwfn,
+                         "vport id [%d] is not valid, available indices [%d - %d]\n",
+                         src_id, min, max);
+
+               return -EINVAL;
+       }
+
+       *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
+
+       return 0;
+}
+
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
+                  u8 src_id, u8 *dst_id)
+{
+       if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
+               u8 min, max;
+
+               min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
+               max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
+               DP_NOTICE(p_hwfn,
+                         "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
+                         src_id, min, max);
+
+               return -EINVAL;
+       }
+
+       *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
new file mode 100644 (file)
index 0000000..e29a3ba
--- /dev/null
@@ -0,0 +1,283 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_DEV_API_H
+#define _QED_DEV_API_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qed_int.h"
+
+/**
+ * @brief qed_init_dp - initialize the debug level
+ *
+ * @param cdev
+ * @param dp_module
+ * @param dp_level
+ */
+void qed_init_dp(struct qed_dev *cdev,
+                u32 dp_module,
+                u8 dp_level);
+
+/**
+ * @brief qed_init_struct - initialize the device structure to
+ *        its defaults
+ *
+ * @param cdev
+ */
+void qed_init_struct(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_free -
+ *
+ * @param cdev
+ */
+void qed_resc_free(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_alloc -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_resc_alloc(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_setup -
+ *
+ * @param cdev
+ */
+void qed_resc_setup(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_init -
+ *
+ * @param cdev
+ * @param b_hw_start
+ * @param int_mode - interrupt mode [msix, inta, etc.] to use.
+ * @param allow_npar_tx_switch - npar tx switching to be used
+ *       for vports configured for tx-switching.
+ * @param bin_fw_data - binary fw data pointer in binary fw file.
+ *                     Pass NULL if not using binary fw file.
+ *
+ * @return int
+ */
+int qed_hw_init(struct qed_dev *cdev,
+               bool b_hw_start,
+               enum qed_int_mode int_mode,
+               bool allow_npar_tx_switch,
+               const u8 *bin_fw_data);
+
+/**
+ * @brief qed_hw_stop -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_hw_stop(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_stop_fastpath -should be called incase
+ *             slowpath is still required for the device,
+ *             but fastpath is not.
+ *
+ * @param cdev
+ *
+ */
+void qed_hw_stop_fastpath(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_start_fastpath -restart fastpath traffic,
+ *             only if hw_stop_fastpath was called
+ *
+ * @param cdev
+ *
+ */
+void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_hw_reset -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_hw_reset(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_prepare -
+ *
+ * @param cdev
+ * @param personality - personality to initialize
+ *
+ * @return int
+ */
+int qed_hw_prepare(struct qed_dev *cdev,
+                  int personality);
+
+/**
+ * @brief qed_hw_remove -
+ *
+ * @param cdev
+ */
+void qed_hw_remove(struct qed_dev *cdev);
+
+/**
+ * @brief qed_ptt_acquire - Allocate a PTT window
+ *
+ * Should be called at the entry point to the driver (at the beginning of an
+ * exported function)
+ *
+ * @param p_hwfn
+ *
+ * @return struct qed_ptt
+ */
+struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_release - Release PTT Window
+ *
+ * Should be called at the end of a flow - at the end of the function that
+ * acquired the PTT.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_ptt_release(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt);
+void qed_get_vport_stats(struct qed_dev *cdev,
+                        struct qed_eth_stats   *stats);
+void qed_reset_vport_stats(struct qed_dev *cdev);
+
+enum qed_dmae_address_type_t {
+       QED_DMAE_ADDRESS_HOST_VIRT,
+       QED_DMAE_ADDRESS_HOST_PHYS,
+       QED_DMAE_ADDRESS_GRC
+};
+
+/* value of flags If QED_DMAE_FLAG_RW_REPL_SRC flag is set and the
+ * source is a block of length DMAE_MAX_RW_SIZE and the
+ * destination is larger, the source block will be duplicated as
+ * many times as required to fill the destination block. This is
+ * used mostly to write a zeroed buffer to destination address
+ * using DMA
+ */
+#define QED_DMAE_FLAG_RW_REPL_SRC       0x00000001
+#define QED_DMAE_FLAG_COMPLETION_DST    0x00000008
+
+struct qed_dmae_params {
+       u32     flags; /* consists of QED_DMAE_FLAG_* values */
+};
+
+/**
+ * @brief qed_dmae_host2grc - copy data from source addr to
+ * dmae registers using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param grc_addr (dmae_data_offset)
+ * @param size_in_dwords
+ * @param flags (one of the flags defined above)
+ */
+int
+qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt,
+                 u64 source_addr,
+                 u32 grc_addr,
+                 u32 size_in_dwords,
+                 u32 flags);
+
+/**
+ * @brief qed_chain_alloc - Allocate and initialize a chain
+ *
+ * @param p_hwfn
+ * @param intended_use
+ * @param mode
+ * @param num_elems
+ * @param elem_size
+ * @param p_chain
+ *
+ * @return int
+ */
+int
+qed_chain_alloc(struct qed_dev *cdev,
+               enum qed_chain_use_mode intended_use,
+               enum qed_chain_mode mode,
+               u16 num_elems,
+               size_t elem_size,
+               struct qed_chain *p_chain);
+
+/**
+ * @brief qed_chain_free - Free chain DMA memory
+ *
+ * @param p_hwfn
+ * @param p_chain
+ */
+void qed_chain_free(struct qed_dev *cdev,
+                   struct qed_chain *p_chain);
+
+/**
+ * @@brief qed_fw_l2_queue - Get absolute L2 queue ID
+ *
+ *  @param p_hwfn
+ *  @param src_id - relative to p_hwfn
+ *  @param dst_id - absolute per engine
+ *
+ *  @return int
+ */
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
+                   u16 src_id,
+                   u16 *dst_id);
+
+/**
+ * @@brief qed_fw_vport - Get absolute vport ID
+ *
+ *  @param p_hwfn
+ *  @param src_id - relative to p_hwfn
+ *  @param dst_id - absolute per engine
+ *
+ *  @return int
+ */
+int qed_fw_vport(struct qed_hwfn *p_hwfn,
+                u8 src_id,
+                u8 *dst_id);
+
+/**
+ * @@brief qed_fw_rss_eng - Get absolute RSS engine ID
+ *
+ *  @param p_hwfn
+ *  @param src_id - relative to p_hwfn
+ *  @param dst_id - absolute per engine
+ *
+ *  @return int
+ */
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
+                  u8 src_id,
+                  u8 *dst_id);
+
+/**
+ * *@brief Cleanup of previous driver remains prior to load
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param id - For PF, engine-relative. For VF, PF-relative.
+ *
+ * @return int
+ */
+int qed_final_cleanup(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     u16 id);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
new file mode 100644 (file)
index 0000000..b2f8e85
--- /dev/null
@@ -0,0 +1,5291 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_HSI_H
+#define _QED_HSI_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/eth_common.h>
+
+struct qed_hwfn;
+struct qed_ptt;
+/********************************/
+/* Add include to common target */
+/********************************/
+
+/* opcodes for the event ring */
+enum common_event_opcode {
+       COMMON_EVENT_PF_START,
+       COMMON_EVENT_PF_STOP,
+       COMMON_EVENT_RESERVED,
+       COMMON_EVENT_RESERVED2,
+       COMMON_EVENT_RESERVED3,
+       COMMON_EVENT_RESERVED4,
+       COMMON_EVENT_RESERVED5,
+       MAX_COMMON_EVENT_OPCODE
+};
+
+/* Common Ramrod Command IDs */
+enum common_ramrod_cmd_id {
+       COMMON_RAMROD_UNUSED,
+       COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
+       COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
+       COMMON_RAMROD_RESERVED,
+       COMMON_RAMROD_RESERVED2,
+       COMMON_RAMROD_RESERVED3,
+       MAX_COMMON_RAMROD_CMD_ID
+};
+
+/* The core storm context for the Ystorm */
+struct ystorm_core_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/* The core storm context for the Pstorm */
+struct pstorm_core_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/* Core Slowpath Connection storm context of Xstorm */
+struct xstorm_core_conn_st_ctx {
+       __le32          spq_base_lo /* SPQ Ring Base Address low dword */;
+       __le32          spq_base_hi /* SPQ Ring Base Address high dword */;
+       struct regpair  consolid_base_addr;
+       __le16          spq_cons /* SPQ Ring Consumer */;
+       __le16          consolid_cons /* Consolidation Ring Consumer */;
+       __le32          reserved0[55] /* Pad to 15 cycles */;
+};
+
+struct xstorm_core_conn_ag_ctx {
+       u8      reserved0 /* cdu_validation */;
+       u8      core_state /* state */;
+       u8      flags0;
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT        0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT           1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT           2
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT        3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT           4
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT           5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK            0x1   /* bit6 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT           6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK            0x1   /* bit7 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT           7
+       u8 flags1;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK            0x1   /* bit8 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT           0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK            0x1   /* bit9 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT           1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK            0x1   /* bit10 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT           2
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK                0x1   /* bit11 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT               3
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK                0x1   /* bit12 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT               4
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK                0x1   /* bit13 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT               5
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK       0x1   /* bit14 */
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT      6
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK         0x1   /* bit15 */
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT        7
+       u8 flags2;
+#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK                  0x3   /* timer0cf */
+#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT                 0
+#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK                  0x3   /* timer1cf */
+#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT                 2
+#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK                  0x3   /* timer2cf */
+#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT                 4
+#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT                 6
+       u8 flags3;
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK                  0x3   /* cf4 */
+#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT                 0
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK                  0x3   /* cf5 */
+#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT                 2
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK                  0x3   /* cf6 */
+#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT                 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK                  0x3   /* cf7 */
+#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT                 6
+       u8 flags4;
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK                  0x3   /* cf8 */
+#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT                 0
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK                  0x3   /* cf9 */
+#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT                 2
+#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK                 0x3   /* cf10 */
+#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT                4
+#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK                 0x3   /* cf11 */
+#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT                6
+       u8 flags5;
+#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK                 0x3   /* cf12 */
+#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT                0
+#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK                 0x3   /* cf13 */
+#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT                2
+#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK                 0x3   /* cf14 */
+#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT                4
+#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK                 0x3   /* cf15 */
+#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT                6
+       u8 flags6;
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK     0x3   /* cf16 */
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT    0
+#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK                 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT                2
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK                0x3   /* cf18 */
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT               4
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK         0x3   /* cf19 */
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT        6
+       u8 flags7;
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK             0x3   /* cf20 */
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT            0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK           0x3   /* cf21 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT          2
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK            0x3   /* cf22 */
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT           4
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK                0x1   /* cf0en */
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT               6
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK                0x1   /* cf1en */
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT               7
+       u8 flags8;
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK                0x1   /* cf2en */
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT               0
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK                0x1   /* cf3en */
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT               1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK                0x1   /* cf4en */
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT               2
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK                0x1   /* cf5en */
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT               3
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK                0x1   /* cf6en */
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT               4
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK                0x1   /* cf7en */
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT               5
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK                0x1   /* cf8en */
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT               6
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK                0x1   /* cf9en */
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT               7
+       u8 flags9;
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK               0x1   /* cf10en */
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT              0
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK               0x1   /* cf11en */
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT              1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK               0x1   /* cf12en */
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT              2
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK               0x1   /* cf13en */
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT              3
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK               0x1   /* cf14en */
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT              4
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK               0x1   /* cf15en */
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT              5
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK  0x1   /* cf16en */
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK               0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT              7
+       u8 flags10;
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK             0x1   /* cf18en */
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT            0
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK      0x1   /* cf19en */
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT     1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK          0x1   /* cf20en */
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT         2
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK           0x1   /* cf21en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT          3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK         0x1   /* cf22en */
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT        4
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK               0x1   /* cf23en */
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT              5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK           0x1   /* rule0en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT          6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK           0x1   /* rule1en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT          7
+       u8 flags11;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK           0x1   /* rule2en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT          0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK           0x1   /* rule3en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT          1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK       0x1   /* rule4en */
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT      2
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK              0x1   /* rule5en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT             3
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK              0x1   /* rule6en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT             4
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK              0x1   /* rule7en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT             5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK         0x1   /* rule8en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT        6
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK              0x1   /* rule9en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT             7
+       u8 flags12;
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK             0x1   /* rule10en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT            0
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK             0x1   /* rule11en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT            1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK         0x1   /* rule12en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT        2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK         0x1   /* rule13en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT        3
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK             0x1   /* rule14en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT            4
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK             0x1   /* rule15en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT            5
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK             0x1   /* rule16en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT            6
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK             0x1   /* rule17en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT            7
+       u8 flags13;
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK             0x1   /* rule18en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT            0
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK             0x1   /* rule19en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT            1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK         0x1   /* rule20en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT        2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK         0x1   /* rule21en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT        3
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK         0x1   /* rule22en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT        4
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK         0x1   /* rule23en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT        5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK         0x1   /* rule24en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT        6
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK         0x1   /* rule25en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT        7
+       u8 flags14;
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK                0x1   /* bit16 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT               0
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK                0x1   /* bit17 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT               1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK                0x1   /* bit18 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT               2
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK                0x1   /* bit19 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT               3
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK                0x1   /* bit20 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT               4
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK                0x1   /* bit21 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT               5
+#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK                 0x3   /* cf23 */
+#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT                6
+       u8      byte2 /* byte2 */;
+       __le16  physical_q0 /* physical_q0 */;
+       __le16  consolid_prod /* physical_q1 */;
+       __le16  reserved16 /* physical_q2 */;
+       __le16  tx_bd_cons /* word3 */;
+       __le16  tx_bd_or_spq_prod /* word4 */;
+       __le16  word5 /* word5 */;
+       __le16  conn_dpi /* conn_dpi */;
+       u8      byte3 /* byte3 */;
+       u8      byte4 /* byte4 */;
+       u8      byte5 /* byte5 */;
+       u8      byte6 /* byte6 */;
+       __le32  reg0 /* reg0 */;
+       __le32  reg1 /* reg1 */;
+       __le32  reg2 /* reg2 */;
+       __le32  reg3 /* reg3 */;
+       __le32  reg4 /* reg4 */;
+       __le32  reg5 /* cf_array0 */;
+       __le32  reg6 /* cf_array1 */;
+       __le16  word7 /* word7 */;
+       __le16  word8 /* word8 */;
+       __le16  word9 /* word9 */;
+       __le16  word10 /* word10 */;
+       __le32  reg7 /* reg7 */;
+       __le32  reg8 /* reg8 */;
+       __le32  reg9 /* reg9 */;
+       u8      byte7 /* byte7 */;
+       u8      byte8 /* byte8 */;
+       u8      byte9 /* byte9 */;
+       u8      byte10 /* byte10 */;
+       u8      byte11 /* byte11 */;
+       u8      byte12 /* byte12 */;
+       u8      byte13 /* byte13 */;
+       u8      byte14 /* byte14 */;
+       u8      byte15 /* byte15 */;
+       u8      byte16 /* byte16 */;
+       __le16  word11 /* word11 */;
+       __le32  reg10 /* reg10 */;
+       __le32  reg11 /* reg11 */;
+       __le32  reg12 /* reg12 */;
+       __le32  reg13 /* reg13 */;
+       __le32  reg14 /* reg14 */;
+       __le32  reg15 /* reg15 */;
+       __le32  reg16 /* reg16 */;
+       __le32  reg17 /* reg17 */;
+       __le32  reg18 /* reg18 */;
+       __le32  reg19 /* reg19 */;
+       __le16  word12 /* word12 */;
+       __le16  word13 /* word13 */;
+       __le16  word14 /* word14 */;
+       __le16  word15 /* word15 */;
+};
+
+/* The core storm context for the Mstorm */
+struct mstorm_core_conn_st_ctx {
+       __le32 reserved[24];
+};
+
+/* The core storm context for the Ustorm */
+struct ustorm_core_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/* core connection context */
+struct core_conn_context {
+       struct ystorm_core_conn_st_ctx  ystorm_st_context;
+       struct regpair                  ystorm_st_padding[2] /* padding */;
+       struct pstorm_core_conn_st_ctx  pstorm_st_context;
+       struct regpair                  pstorm_st_padding[2];
+       struct xstorm_core_conn_st_ctx  xstorm_st_context;
+       struct xstorm_core_conn_ag_ctx  xstorm_ag_context;
+       struct mstorm_core_conn_st_ctx  mstorm_st_context;
+       struct regpair                  mstorm_st_padding[2];
+       struct ustorm_core_conn_st_ctx  ustorm_st_context;
+       struct regpair                  ustorm_st_padding[2] /* padding */;
+};
+
+struct eth_mstorm_per_queue_stat {
+       struct regpair  ttl0_discard;
+       struct regpair  packet_too_big_discard;
+       struct regpair  no_buff_discard;
+       struct regpair  not_active_discard;
+       struct regpair  tpa_coalesced_pkts;
+       struct regpair  tpa_coalesced_events;
+       struct regpair  tpa_aborts_num;
+       struct regpair  tpa_coalesced_bytes;
+};
+
+struct eth_pstorm_per_queue_stat {
+       struct regpair  sent_ucast_bytes;
+       struct regpair  sent_mcast_bytes;
+       struct regpair  sent_bcast_bytes;
+       struct regpair  sent_ucast_pkts;
+       struct regpair  sent_mcast_pkts;
+       struct regpair  sent_bcast_pkts;
+       struct regpair  error_drop_pkts;
+};
+
+struct eth_ustorm_per_queue_stat {
+       struct regpair  rcv_ucast_bytes;
+       struct regpair  rcv_mcast_bytes;
+       struct regpair  rcv_bcast_bytes;
+       struct regpair  rcv_ucast_pkts;
+       struct regpair  rcv_mcast_pkts;
+       struct regpair  rcv_bcast_pkts;
+};
+
+/* Event Ring Next Page Address */
+struct event_ring_next_addr {
+       struct regpair  addr /* Next Page Address */;
+       __le32          reserved[2] /* Reserved */;
+};
+
+union event_ring_element {
+       struct event_ring_entry         entry /* Event Ring Entry */;
+       struct event_ring_next_addr     next_addr;
+};
+
+enum personality_type {
+       PERSONALITY_RESERVED,
+       PERSONALITY_RESERVED2,
+       PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp */,
+       PERSONALITY_RESERVED3,
+       PERSONALITY_ETH /* Ethernet */,
+       PERSONALITY_RESERVED4,
+       MAX_PERSONALITY_TYPE
+};
+
+struct pf_start_tunnel_config {
+       u8      set_vxlan_udp_port_flg;
+       u8      set_geneve_udp_port_flg;
+       u8      tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
+       u8      tx_enable_l2geneve;
+       u8      tx_enable_ipgeneve;
+       u8      tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
+       u8      tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
+       u8      tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
+       u8      tunnel_clss_l2geneve;
+       u8      tunnel_clss_ipgeneve;
+       u8      tunnel_clss_l2gre;
+       u8      tunnel_clss_ipgre;
+       __le16  vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
+       __le16  geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+};
+
+/* Ramrod data for PF start ramrod */
+struct pf_start_ramrod_data {
+       struct regpair                  event_ring_pbl_addr;
+       struct regpair                  consolid_q_pbl_addr;
+       struct pf_start_tunnel_config   tunnel_config;
+       __le16                          event_ring_sb_id;
+       u8                              base_vf_id;
+       u8                              num_vfs;
+       u8                              event_ring_num_pages;
+       u8                              event_ring_sb_index;
+       u8                              path_id;
+       u8                              warning_as_error;
+       u8                              dont_log_ramrods;
+       u8                              personality;
+       __le16                          log_type_mask;
+       u8                              mf_mode /* Multi function mode */;
+       u8                              integ_phase /* Integration phase */;
+       u8                              allow_npar_tx_switching;
+       u8                              inner_to_outer_pri_map[8];
+       u8                              pri_map_valid;
+       u32                             outer_tag;
+       u8                              reserved0[4];
+};
+
+enum ports_mode {
+       ENGX2_PORTX1 /* 2 engines x 1 port */,
+       ENGX2_PORTX2 /* 2 engines x 2 ports */,
+       ENGX1_PORTX1 /* 1 engine  x 1 port */,
+       ENGX1_PORTX2 /* 1 engine  x 2 ports */,
+       ENGX1_PORTX4 /* 1 engine  x 4 ports */,
+       MAX_PORTS_MODE
+};
+
+/* Ramrod Header of SPQE */
+struct ramrod_header {
+       __le32  cid /* Slowpath Connection CID */;
+       u8      cmd_id /* Ramrod Cmd (Per Protocol Type) */;
+       u8      protocol_id /* Ramrod Protocol ID */;
+       __le16  echo /* Ramrod echo */;
+};
+
+/* Slowpath Element (SPQE) */
+struct slow_path_element {
+       struct ramrod_header    hdr /* Ramrod Header */;
+       struct regpair          data_ptr;
+};
+
+struct tstorm_per_port_stat {
+       struct regpair  trunc_error_discard;
+       struct regpair  mac_error_discard;
+       struct regpair  mftag_filter_discard;
+       struct regpair  eth_mac_filter_discard;
+       struct regpair  ll2_mac_filter_discard;
+       struct regpair  ll2_conn_disabled_discard;
+       struct regpair  iscsi_irregular_pkt;
+       struct regpair  fcoe_irregular_pkt;
+       struct regpair  roce_irregular_pkt;
+       struct regpair  eth_irregular_pkt;
+       struct regpair  toe_irregular_pkt;
+       struct regpair  preroce_irregular_pkt;
+};
+
+struct atten_status_block {
+       __le32  atten_bits;
+       __le32  atten_ack;
+       __le16  reserved0;
+       __le16  sb_index /* status block running index */;
+       __le32  reserved1;
+};
+
+enum block_addr {
+       GRCBASE_GRC             = 0x50000,
+       GRCBASE_MISCS           = 0x9000,
+       GRCBASE_MISC            = 0x8000,
+       GRCBASE_DBU             = 0xa000,
+       GRCBASE_PGLUE_B         = 0x2a8000,
+       GRCBASE_CNIG            = 0x218000,
+       GRCBASE_CPMU            = 0x30000,
+       GRCBASE_NCSI            = 0x40000,
+       GRCBASE_OPTE            = 0x53000,
+       GRCBASE_BMB             = 0x540000,
+       GRCBASE_PCIE            = 0x54000,
+       GRCBASE_MCP             = 0xe00000,
+       GRCBASE_MCP2            = 0x52000,
+       GRCBASE_PSWHST          = 0x2a0000,
+       GRCBASE_PSWHST2         = 0x29e000,
+       GRCBASE_PSWRD           = 0x29c000,
+       GRCBASE_PSWRD2          = 0x29d000,
+       GRCBASE_PSWWR           = 0x29a000,
+       GRCBASE_PSWWR2          = 0x29b000,
+       GRCBASE_PSWRQ           = 0x280000,
+       GRCBASE_PSWRQ2          = 0x240000,
+       GRCBASE_PGLCS           = 0x0,
+       GRCBASE_PTU             = 0x560000,
+       GRCBASE_DMAE            = 0xc000,
+       GRCBASE_TCM             = 0x1180000,
+       GRCBASE_MCM             = 0x1200000,
+       GRCBASE_UCM             = 0x1280000,
+       GRCBASE_XCM             = 0x1000000,
+       GRCBASE_YCM             = 0x1080000,
+       GRCBASE_PCM             = 0x1100000,
+       GRCBASE_QM              = 0x2f0000,
+       GRCBASE_TM              = 0x2c0000,
+       GRCBASE_DORQ            = 0x100000,
+       GRCBASE_BRB             = 0x340000,
+       GRCBASE_SRC             = 0x238000,
+       GRCBASE_PRS             = 0x1f0000,
+       GRCBASE_TSDM            = 0xfb0000,
+       GRCBASE_MSDM            = 0xfc0000,
+       GRCBASE_USDM            = 0xfd0000,
+       GRCBASE_XSDM            = 0xf80000,
+       GRCBASE_YSDM            = 0xf90000,
+       GRCBASE_PSDM            = 0xfa0000,
+       GRCBASE_TSEM            = 0x1700000,
+       GRCBASE_MSEM            = 0x1800000,
+       GRCBASE_USEM            = 0x1900000,
+       GRCBASE_XSEM            = 0x1400000,
+       GRCBASE_YSEM            = 0x1500000,
+       GRCBASE_PSEM            = 0x1600000,
+       GRCBASE_RSS             = 0x238800,
+       GRCBASE_TMLD            = 0x4d0000,
+       GRCBASE_MULD            = 0x4e0000,
+       GRCBASE_YULD            = 0x4c8000,
+       GRCBASE_XYLD            = 0x4c0000,
+       GRCBASE_PRM             = 0x230000,
+       GRCBASE_PBF_PB1         = 0xda0000,
+       GRCBASE_PBF_PB2         = 0xda4000,
+       GRCBASE_RPB             = 0x23c000,
+       GRCBASE_BTB             = 0xdb0000,
+       GRCBASE_PBF             = 0xd80000,
+       GRCBASE_RDIF            = 0x300000,
+       GRCBASE_TDIF            = 0x310000,
+       GRCBASE_CDU             = 0x580000,
+       GRCBASE_CCFC            = 0x2e0000,
+       GRCBASE_TCFC            = 0x2d0000,
+       GRCBASE_IGU             = 0x180000,
+       GRCBASE_CAU             = 0x1c0000,
+       GRCBASE_UMAC            = 0x51000,
+       GRCBASE_XMAC            = 0x210000,
+       GRCBASE_DBG             = 0x10000,
+       GRCBASE_NIG             = 0x500000,
+       GRCBASE_WOL             = 0x600000,
+       GRCBASE_BMBN            = 0x610000,
+       GRCBASE_IPC             = 0x20000,
+       GRCBASE_NWM             = 0x800000,
+       GRCBASE_NWS             = 0x700000,
+       GRCBASE_MS              = 0x6a0000,
+       GRCBASE_PHY_PCIE        = 0x618000,
+       GRCBASE_MISC_AEU        = 0x8000,
+       GRCBASE_BAR0_MAP        = 0x1c00000,
+       MAX_BLOCK_ADDR
+};
+
+enum block_id {
+       BLOCK_GRC,
+       BLOCK_MISCS,
+       BLOCK_MISC,
+       BLOCK_DBU,
+       BLOCK_PGLUE_B,
+       BLOCK_CNIG,
+       BLOCK_CPMU,
+       BLOCK_NCSI,
+       BLOCK_OPTE,
+       BLOCK_BMB,
+       BLOCK_PCIE,
+       BLOCK_MCP,
+       BLOCK_MCP2,
+       BLOCK_PSWHST,
+       BLOCK_PSWHST2,
+       BLOCK_PSWRD,
+       BLOCK_PSWRD2,
+       BLOCK_PSWWR,
+       BLOCK_PSWWR2,
+       BLOCK_PSWRQ,
+       BLOCK_PSWRQ2,
+       BLOCK_PGLCS,
+       BLOCK_PTU,
+       BLOCK_DMAE,
+       BLOCK_TCM,
+       BLOCK_MCM,
+       BLOCK_UCM,
+       BLOCK_XCM,
+       BLOCK_YCM,
+       BLOCK_PCM,
+       BLOCK_QM,
+       BLOCK_TM,
+       BLOCK_DORQ,
+       BLOCK_BRB,
+       BLOCK_SRC,
+       BLOCK_PRS,
+       BLOCK_TSDM,
+       BLOCK_MSDM,
+       BLOCK_USDM,
+       BLOCK_XSDM,
+       BLOCK_YSDM,
+       BLOCK_PSDM,
+       BLOCK_TSEM,
+       BLOCK_MSEM,
+       BLOCK_USEM,
+       BLOCK_XSEM,
+       BLOCK_YSEM,
+       BLOCK_PSEM,
+       BLOCK_RSS,
+       BLOCK_TMLD,
+       BLOCK_MULD,
+       BLOCK_YULD,
+       BLOCK_XYLD,
+       BLOCK_PRM,
+       BLOCK_PBF_PB1,
+       BLOCK_PBF_PB2,
+       BLOCK_RPB,
+       BLOCK_BTB,
+       BLOCK_PBF,
+       BLOCK_RDIF,
+       BLOCK_TDIF,
+       BLOCK_CDU,
+       BLOCK_CCFC,
+       BLOCK_TCFC,
+       BLOCK_IGU,
+       BLOCK_CAU,
+       BLOCK_UMAC,
+       BLOCK_XMAC,
+       BLOCK_DBG,
+       BLOCK_NIG,
+       BLOCK_WOL,
+       BLOCK_BMBN,
+       BLOCK_IPC,
+       BLOCK_NWM,
+       BLOCK_NWS,
+       BLOCK_MS,
+       BLOCK_PHY_PCIE,
+       BLOCK_MISC_AEU,
+       BLOCK_BAR0_MAP,
+       MAX_BLOCK_ID
+};
+
+enum command_type_bit {
+       IGU_COMMAND_TYPE_NOP    = 0,
+       IGU_COMMAND_TYPE_SET    = 1,
+       MAX_COMMAND_TYPE_BIT
+};
+
+struct dmae_cmd {
+       __le32 opcode;
+#define DMAE_CMD_SRC_MASK              0x1
+#define DMAE_CMD_SRC_SHIFT             0
+#define DMAE_CMD_DST_MASK              0x3
+#define DMAE_CMD_DST_SHIFT             1
+#define DMAE_CMD_C_DST_MASK            0x1
+#define DMAE_CMD_C_DST_SHIFT           3
+#define DMAE_CMD_CRC_RESET_MASK        0x1
+#define DMAE_CMD_CRC_RESET_SHIFT       4
+#define DMAE_CMD_SRC_ADDR_RESET_MASK   0x1
+#define DMAE_CMD_SRC_ADDR_RESET_SHIFT  5
+#define DMAE_CMD_DST_ADDR_RESET_MASK   0x1
+#define DMAE_CMD_DST_ADDR_RESET_SHIFT  6
+#define DMAE_CMD_COMP_FUNC_MASK        0x1
+#define DMAE_CMD_COMP_FUNC_SHIFT       7
+#define DMAE_CMD_COMP_WORD_EN_MASK     0x1
+#define DMAE_CMD_COMP_WORD_EN_SHIFT    8
+#define DMAE_CMD_COMP_CRC_EN_MASK      0x1
+#define DMAE_CMD_COMP_CRC_EN_SHIFT     9
+#define DMAE_CMD_COMP_CRC_OFFSET_MASK  0x7
+#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
+#define DMAE_CMD_RESERVED1_MASK        0x1
+#define DMAE_CMD_RESERVED1_SHIFT       13
+#define DMAE_CMD_ENDIANITY_MODE_MASK   0x3
+#define DMAE_CMD_ENDIANITY_MODE_SHIFT  14
+#define DMAE_CMD_ERR_HANDLING_MASK     0x3
+#define DMAE_CMD_ERR_HANDLING_SHIFT    16
+#define DMAE_CMD_PORT_ID_MASK          0x3
+#define DMAE_CMD_PORT_ID_SHIFT         18
+#define DMAE_CMD_SRC_PF_ID_MASK        0xF
+#define DMAE_CMD_SRC_PF_ID_SHIFT       20
+#define DMAE_CMD_DST_PF_ID_MASK        0xF
+#define DMAE_CMD_DST_PF_ID_SHIFT       24
+#define DMAE_CMD_SRC_VF_ID_VALID_MASK  0x1
+#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
+#define DMAE_CMD_DST_VF_ID_VALID_MASK  0x1
+#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
+#define DMAE_CMD_RESERVED2_MASK        0x3
+#define DMAE_CMD_RESERVED2_SHIFT       30
+       __le32  src_addr_lo;
+       __le32  src_addr_hi;
+       __le32  dst_addr_lo;
+       __le32  dst_addr_hi;
+       __le16  length /* Length in DW */;
+       __le16  opcode_b;
+#define DMAE_CMD_SRC_VF_ID_MASK        0xFF     /* Source VF id */
+#define DMAE_CMD_SRC_VF_ID_SHIFT       0
+#define DMAE_CMD_DST_VF_ID_MASK        0xFF     /* Destination VF id */
+#define DMAE_CMD_DST_VF_ID_SHIFT       8
+       __le32  comp_addr_lo /* PCIe completion address low or grc address */;
+       __le32  comp_addr_hi;
+       __le32  comp_val /* Value to write to copmletion address */;
+       __le32  crc32 /* crc16 result */;
+       __le32  crc_32_c /* crc32_c result */;
+       __le16  crc16 /* crc16 result */;
+       __le16  crc16_c /* crc16_c result */;
+       __le16  crc10 /* crc_t10 result */;
+       __le16  reserved;
+       __le16  xsum16 /* checksum16 result  */;
+       __le16  xsum8 /* checksum8 result  */;
+};
+
+struct igu_cleanup {
+       __le32 sb_id_and_flags;
+#define IGU_CLEANUP_RESERVED0_MASK     0x7FFFFFF
+#define IGU_CLEANUP_RESERVED0_SHIFT    0
+#define IGU_CLEANUP_CLEANUP_SET_MASK   0x1 /* cleanup clear - 0, set - 1 */
+#define IGU_CLEANUP_CLEANUP_SET_SHIFT  27
+#define IGU_CLEANUP_CLEANUP_TYPE_MASK  0x7
+#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
+#define IGU_CLEANUP_COMMAND_TYPE_MASK  0x1
+#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
+       __le32 reserved1;
+};
+
+union igu_command {
+       struct igu_prod_cons_update     prod_cons_update;
+       struct igu_cleanup              cleanup;
+};
+
+struct igu_command_reg_ctrl {
+       __le16  opaque_fid;
+       __le16  igu_command_reg_ctrl_fields;
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK  0xFFF
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
+#define IGU_COMMAND_REG_CTRL_RESERVED_MASK      0x7
+#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT     12
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK  0x1
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
+};
+
+struct igu_mapping_line {
+       __le32 igu_mapping_line_fields;
+#define IGU_MAPPING_LINE_VALID_MASK            0x1
+#define IGU_MAPPING_LINE_VALID_SHIFT           0
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK    0xFF
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT   1
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK  0xFF
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
+#define IGU_MAPPING_LINE_PF_VALID_MASK         0x1      /* PF-1, VF-0 */
+#define IGU_MAPPING_LINE_PF_VALID_SHIFT        17
+#define IGU_MAPPING_LINE_IPS_GROUP_MASK        0x3F
+#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT       18
+#define IGU_MAPPING_LINE_RESERVED_MASK         0xFF
+#define IGU_MAPPING_LINE_RESERVED_SHIFT        24
+};
+
+struct igu_msix_vector {
+       struct regpair  address;
+       __le32          data;
+       __le32          msix_vector_fields;
+#define IGU_MSIX_VECTOR_MASK_BIT_MASK      0x1
+#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT     0
+#define IGU_MSIX_VECTOR_RESERVED0_MASK     0x7FFF
+#define IGU_MSIX_VECTOR_RESERVED0_SHIFT    1
+#define IGU_MSIX_VECTOR_STEERING_TAG_MASK  0xFF
+#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
+#define IGU_MSIX_VECTOR_RESERVED1_MASK     0xFF
+#define IGU_MSIX_VECTOR_RESERVED1_SHIFT    24
+};
+
+enum init_modes {
+       MODE_BB_A0,
+       MODE_RESERVED,
+       MODE_RESERVED2,
+       MODE_ASIC,
+       MODE_RESERVED3,
+       MODE_RESERVED4,
+       MODE_RESERVED5,
+       MODE_SF,
+       MODE_MF_SD,
+       MODE_MF_SI,
+       MODE_PORTS_PER_ENG_1,
+       MODE_PORTS_PER_ENG_2,
+       MODE_PORTS_PER_ENG_4,
+       MODE_40G,
+       MODE_100G,
+       MODE_EAGLE_ENG1_WORKAROUND,
+       MAX_INIT_MODES
+};
+
+enum init_phases {
+       PHASE_ENGINE,
+       PHASE_PORT,
+       PHASE_PF,
+       PHASE_RESERVED,
+       PHASE_QM_PF,
+       MAX_INIT_PHASES
+};
+
+struct mstorm_core_conn_ag_ctx {
+       u8      byte0 /* cdu_validation */;
+       u8      byte1 /* state */;
+       u8      flags0;
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1       /* exist_in_qm0 */
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1       /* exist_in_qm1 */
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3       /* cf0 */
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3       /* cf1 */
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3       /* cf2 */
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1       /* cf0en */
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1       /* cf1en */
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1       /* cf2en */
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1       /* rule0en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1       /* rule1en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1       /* rule2en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1       /* rule3en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1       /* rule4en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+       __le16  word0 /* word0 */;
+       __le16  word1 /* word1 */;
+       __le32  reg0 /* reg0 */;
+       __le32  reg1 /* reg1 */;
+};
+
+/* per encapsulation type enabling flags */
+struct prs_reg_encapsulation_type_en {
+       u8 flags;
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK     0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT    0
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK      0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT     1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK            0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT           2
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK            0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT           3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK  0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK   0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT  5
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK                0x3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT               6
+};
+
+enum pxp_tph_st_hint {
+       TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
+       TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
+       TPH_ST_HINT_TARGET,
+       TPH_ST_HINT_TARGET_PRIO,
+       MAX_PXP_TPH_ST_HINT
+};
+
+/* QM hardware structure of enable bypass credit mask */
+struct qm_rf_bypass_mask {
+       u8 flags;
+#define QM_RF_BYPASS_MASK_LINEVOQ_MASK    0x1
+#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT   0
+#define QM_RF_BYPASS_MASK_RESERVED0_MASK  0x1
+#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
+#define QM_RF_BYPASS_MASK_PFWFQ_MASK      0x1
+#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT     2
+#define QM_RF_BYPASS_MASK_VPWFQ_MASK      0x1
+#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT     3
+#define QM_RF_BYPASS_MASK_PFRL_MASK       0x1
+#define QM_RF_BYPASS_MASK_PFRL_SHIFT      4
+#define QM_RF_BYPASS_MASK_VPQCNRL_MASK    0x1
+#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT   5
+#define QM_RF_BYPASS_MASK_FWPAUSE_MASK    0x1
+#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT   6
+#define QM_RF_BYPASS_MASK_RESERVED1_MASK  0x1
+#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
+};
+
+/* QM hardware structure of opportunistic credit mask */
+struct qm_rf_opportunistic_mask {
+       __le16 flags;
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK     0x1
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT    0
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK     0x1
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT    1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK       0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT      2
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK       0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT      3
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK        0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT       4
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK     0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT    5
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK     0x1
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT    6
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK   0x1
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT  7
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK  0x1
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK   0x7F
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT  9
+};
+
+/* QM hardware structure of QM map memory */
+struct qm_rf_pq_map {
+       u32 reg;
+#define QM_RF_PQ_MAP_PQ_VALID_MASK          0x1         /* PQ active */
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT         0
+#define QM_RF_PQ_MAP_RL_ID_MASK             0xFF        /* RL ID */
+#define QM_RF_PQ_MAP_RL_ID_SHIFT            1
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK          0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT         9
+#define QM_RF_PQ_MAP_VOQ_MASK               0x1F        /* VOQ */
+#define QM_RF_PQ_MAP_VOQ_SHIFT              18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK  0x3         /* WRR weight */
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_RL_VALID_MASK          0x1         /* RL active */
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT         25
+#define QM_RF_PQ_MAP_RESERVED_MASK          0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT         26
+};
+
+/* SDM operation gen command (generate aggregative interrupt) */
+struct sdm_op_gen {
+       __le32 command;
+#define SDM_OP_GEN_COMP_PARAM_MASK  0xFFFF      /* completion parameters 0-15 */
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE_MASK   0xF         /* completion type 16-19 */
+#define SDM_OP_GEN_COMP_TYPE_SHIFT  16
+#define SDM_OP_GEN_RESERVED_MASK    0xFFF       /* reserved 20-31 */
+#define SDM_OP_GEN_RESERVED_SHIFT   20
+};
+
+struct tstorm_core_conn_ag_ctx {
+       u8      byte0 /* cdu_validation */;
+       u8      byte1 /* state */;
+       u8      flags0;
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1       /* exist_in_qm0 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1       /* exist_in_qm1 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK     0x1       /* bit2 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT    2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK     0x1       /* bit3 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT    3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK     0x1       /* bit4 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT    4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK     0x1       /* bit5 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT    5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3       /* timer0cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     6
+       u8 flags1;
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3       /* timer1cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3       /* timer2cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3       /* timer_stop_all */
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT     4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3       /* cf4 */
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT     6
+       u8 flags2;
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3       /* cf5 */
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT     0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3       /* cf6 */
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT     2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK      0x3       /* cf7 */
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT     4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK      0x3       /* cf8 */
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT     6
+       u8 flags3;
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK      0x3       /* cf9 */
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT     0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK     0x3       /* cf10 */
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT    2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1       /* cf0en */
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1       /* cf1en */
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1       /* cf2en */
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1       /* cf3en */
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   7
+       u8 flags4;
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1       /* cf4en */
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1       /* cf5en */
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1       /* cf6en */
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK    0x1       /* cf7en */
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT   3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK    0x1       /* cf8en */
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT   4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK    0x1       /* cf9en */
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT   5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK   0x1       /* cf10en */
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT  6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1       /* rule0en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+       u8 flags5;
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1       /* rule1en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1       /* rule2en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1       /* rule3en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1       /* rule4en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1       /* rule5en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1       /* rule6en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1       /* rule7en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1       /* rule8en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+       __le32  reg0 /* reg0 */;
+       __le32  reg1 /* reg1 */;
+       __le32  reg2 /* reg2 */;
+       __le32  reg3 /* reg3 */;
+       __le32  reg4 /* reg4 */;
+       __le32  reg5 /* reg5 */;
+       __le32  reg6 /* reg6 */;
+       __le32  reg7 /* reg7 */;
+       __le32  reg8 /* reg8 */;
+       u8      byte2 /* byte2 */;
+       u8      byte3 /* byte3 */;
+       __le16  word0 /* word0 */;
+       u8      byte4 /* byte4 */;
+       u8      byte5 /* byte5 */;
+       __le16  word1 /* word1 */;
+       __le16  word2 /* conn_dpi */;
+       __le16  word3 /* word3 */;
+       __le32  reg9 /* reg9 */;
+       __le32  reg10 /* reg10 */;
+};
+
+struct ustorm_core_conn_ag_ctx {
+       u8      reserved /* cdu_validation */;
+       u8      byte1 /* state */;
+       u8      flags0;
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1       /* exist_in_qm0 */
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1       /* exist_in_qm1 */
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3       /* timer0cf */
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3       /* timer1cf */
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3       /* timer2cf */
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3       /* timer_stop_all */
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT     0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3       /* cf4 */
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT     2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3       /* cf5 */
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT     4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3       /* cf6 */
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT     6
+       u8 flags2;
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1       /* cf0en */
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1       /* cf1en */
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1       /* cf2en */
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1       /* cf3en */
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1       /* cf4en */
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1       /* cf5en */
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1       /* cf6en */
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1       /* rule0en */
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+       u8 flags3;
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1       /* rule1en */
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1       /* rule2en */
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1       /* rule3en */
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1       /* rule4en */
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1       /* rule5en */
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1       /* rule6en */
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1       /* rule7en */
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1       /* rule8en */
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+       u8      byte2 /* byte2 */;
+       u8      byte3 /* byte3 */;
+       __le16  word0 /* conn_dpi */;
+       __le16  word1 /* word1 */;
+       __le32  rx_producers /* reg0 */;
+       __le32  reg1 /* reg1 */;
+       __le32  reg2 /* reg2 */;
+       __le32  reg3 /* reg3 */;
+       __le16  word2 /* word2 */;
+       __le16  word3 /* word3 */;
+};
+
+struct ystorm_core_conn_ag_ctx {
+       u8      byte0 /* cdu_validation */;
+       u8      byte1 /* state */;
+       u8      flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1       /* exist_in_qm0 */
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1       /* exist_in_qm1 */
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3       /* cf0 */
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3       /* cf1 */
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3       /* cf2 */
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1       /* cf0en */
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1       /* cf1en */
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1       /* cf2en */
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1       /* rule0en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1       /* rule1en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1       /* rule2en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1       /* rule3en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1       /* rule4en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+       u8      byte2 /* byte2 */;
+       u8      byte3 /* byte3 */;
+       __le16  word0 /* word0 */;
+       __le32  reg0 /* reg0 */;
+       __le32  reg1 /* reg1 */;
+       __le16  word1 /* word1 */;
+       __le16  word2 /* word2 */;
+       __le16  word3 /* word3 */;
+       __le16  word4 /* word4 */;
+       __le32  reg2 /* reg2 */;
+       __le32  reg3 /* reg3 */;
+};
+
+/*********************************** Init ************************************/
+
+/* Width of GRC address in bits (addresses are specified in dwords) */
+#define GRC_ADDR_BITS                   23
+#define MAX_GRC_ADDR                    ((1 << GRC_ADDR_BITS) - 1)
+
+/* indicates an init that should be applied to any phase ID */
+#define ANY_PHASE_ID                    0xffff
+
+/* init pattern size in bytes */
+#define INIT_PATTERN_SIZE_BITS  4
+#define MAX_INIT_PATTERN_SIZE  BIT(INIT_PATTERN_SIZE_BITS)
+
+/* Max size in dwords of a zipped array */
+#define MAX_ZIPPED_SIZE                 8192
+
+/* Global PXP window */
+#define NUM_OF_PXP_WIN                  19
+#define PXP_WIN_DWORD_SIZE_BITS 10
+#define PXP_WIN_DWORD_SIZE             BIT(PXP_WIN_DWORD_SIZE_BITS)
+#define PXP_WIN_BYTE_SIZE_BITS  (PXP_WIN_DWORD_SIZE_BITS + 2)
+#define PXP_WIN_BYTE_SIZE               (PXP_WIN_DWORD_SIZE * 4)
+
+/********************************* GRC Dump **********************************/
+
+/* width of GRC dump register sequence length in bits */
+#define DUMP_SEQ_LEN_BITS                       8
+#define DUMP_SEQ_LEN_MAX_VAL            ((1 << DUMP_SEQ_LEN_BITS) - 1)
+
+/* width of GRC dump memory length in bits */
+#define DUMP_MEM_LEN_BITS                       18
+#define DUMP_MEM_LEN_MAX_VAL            ((1 << DUMP_MEM_LEN_BITS) - 1)
+
+/* width of register type ID in bits */
+#define REG_TYPE_ID_BITS                        6
+#define REG_TYPE_ID_MAX_VAL                     ((1 << REG_TYPE_ID_BITS) - 1)
+
+/* width of block ID in bits */
+#define BLOCK_ID_BITS                           8
+#define BLOCK_ID_MAX_VAL                        ((1 << BLOCK_ID_BITS) - 1)
+
+/******************************** Idle Check *********************************/
+
+/* max number of idle check predicate immediates */
+#define MAX_IDLE_CHK_PRED_IMM           3
+
+/* max number of idle check argument registers */
+#define MAX_IDLE_CHK_READ_REGS          3
+
+/* max number of idle check loops */
+#define MAX_IDLE_CHK_LOOPS                      0x10000
+
+/* max idle check address increment */
+#define MAX_IDLE_CHK_INCREMENT          0x10000
+
+/* inicates an undefined idle check line index */
+#define IDLE_CHK_UNDEFINED_LINE_IDX     0xffffff
+
+/* max number of register values following the idle check header */
+#define IDLE_CHK_MAX_DUMP_REGS          2
+
+/* arguments for IDLE_CHK_MACRO_TYPE_QM_RD_WR */
+#define IDLE_CHK_QM_RD_WR_PTR           0
+#define IDLE_CHK_QM_RD_WR_BANK          1
+
+/**************************************/
+/* HSI Functions constants and macros */
+/**************************************/
+
+/* Number of VLAN priorities */
+#define NUM_OF_VLAN_PRIORITIES                  8
+
+/* the MCP Trace meta data signautre is duplicated in the perl script that
+ * generats the NVRAM images.
+ */
+#define MCP_TRACE_META_IMAGE_SIGNATURE  0x669955aa
+
+/* Binary buffer header */
+struct bin_buffer_hdr {
+       u32     offset;
+       u32     length /* buffer length in bytes */;
+};
+
+/* binary buffer types */
+enum bin_buffer_type {
+       BIN_BUF_FW_VER_INFO /* fw_ver_info struct */,
+       BIN_BUF_INIT_CMD /* init commands */,
+       BIN_BUF_INIT_VAL /* init data */,
+       BIN_BUF_INIT_MODE_TREE /* init modes tree */,
+       BIN_BUF_IRO /* internal RAM offsets array */,
+       MAX_BIN_BUFFER_TYPE
+};
+
+/* Chip IDs */
+enum chip_ids {
+       CHIP_BB_A0 /* BB A0 chip ID */,
+       CHIP_BB_B0 /* BB B0 chip ID */,
+       CHIP_K2 /* AH chip ID */,
+       MAX_CHIP_IDS
+};
+
+enum idle_chk_severity_types {
+       IDLE_CHK_SEVERITY_ERROR /* idle check failure should cause an error */,
+       IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+       IDLE_CHK_SEVERITY_WARNING,
+       MAX_IDLE_CHK_SEVERITY_TYPES
+};
+
+struct init_array_raw_hdr {
+       __le32 data;
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK    0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT   0
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK  0xFFFFFFF       /* init array params */
+#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
+};
+
+struct init_array_standard_hdr {
+       __le32 data;
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK  0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK  0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
+};
+
+struct init_array_zipped_hdr {
+       __le32 data;
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK         0xF
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT        0
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK  0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
+};
+
+struct init_array_pattern_hdr {
+       __le32 data;
+#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK          0xF
+#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT         0
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK  0xF
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK   0xFFFFFF
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT  8
+};
+
+union init_array_hdr {
+       struct init_array_raw_hdr       raw /* raw init array header */;
+       struct init_array_standard_hdr  standard;
+       struct init_array_zipped_hdr    zipped /* zipped init array header */;
+       struct init_array_pattern_hdr   pattern /* pattern init array header */;
+};
+
+enum init_array_types {
+       INIT_ARR_STANDARD /* standard init array */,
+       INIT_ARR_ZIPPED /* zipped init array */,
+       INIT_ARR_PATTERN /* a repeated pattern */,
+       MAX_INIT_ARRAY_TYPES
+};
+
+/* init operation: callback */
+struct init_callback_op {
+       __le32  op_data;
+#define INIT_CALLBACK_OP_OP_MASK        0xF
+#define INIT_CALLBACK_OP_OP_SHIFT       0
+#define INIT_CALLBACK_OP_RESERVED_MASK  0xFFFFFFF
+#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
+       __le16  callback_id /* Callback ID */;
+       __le16  block_id /* Blocks ID */;
+};
+
+/* init comparison types */
+enum init_comparison_types {
+       INIT_COMPARISON_EQ /* init value is included in the init command */,
+       INIT_COMPARISON_OR /* init value is all zeros */,
+       INIT_COMPARISON_AND /* init value is an array of values */,
+       MAX_INIT_COMPARISON_TYPES
+};
+
+/* init operation: delay */
+struct init_delay_op {
+       __le32  op_data;
+#define INIT_DELAY_OP_OP_MASK        0xF
+#define INIT_DELAY_OP_OP_SHIFT       0
+#define INIT_DELAY_OP_RESERVED_MASK  0xFFFFFFF
+#define INIT_DELAY_OP_RESERVED_SHIFT 4
+       __le32  delay /* delay in us */;
+};
+
+/* init operation: if_mode */
+struct init_if_mode_op {
+       __le32 op_data;
+#define INIT_IF_MODE_OP_OP_MASK          0xF
+#define INIT_IF_MODE_OP_OP_SHIFT         0
+#define INIT_IF_MODE_OP_RESERVED1_MASK   0xFFF
+#define INIT_IF_MODE_OP_RESERVED1_SHIFT  4
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK  0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
+       __le16  reserved2;
+       __le16  modes_buf_offset;
+};
+
+/*  init operation: if_phase */
+struct init_if_phase_op {
+       __le32 op_data;
+#define INIT_IF_PHASE_OP_OP_MASK           0xF
+#define INIT_IF_PHASE_OP_OP_SHIFT          0
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK  0x1
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
+#define INIT_IF_PHASE_OP_RESERVED1_MASK    0x7FF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT   5
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK   0xFFFF
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT  16
+       __le32 phase_data;
+#define INIT_IF_PHASE_OP_PHASE_MASK        0xFF /* Init phase */
+#define INIT_IF_PHASE_OP_PHASE_SHIFT       0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK    0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT   8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK     0xFFFF /* Init phase ID */
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT    16
+};
+
+/* init mode operators */
+enum init_mode_ops {
+       INIT_MODE_OP_NOT /* init mode not operator */,
+       INIT_MODE_OP_OR /* init mode or operator */,
+       INIT_MODE_OP_AND /* init mode and operator */,
+       MAX_INIT_MODE_OPS
+};
+
+/* init operation: raw */
+struct init_raw_op {
+       __le32  op_data;
+#define INIT_RAW_OP_OP_MASK      0xF
+#define INIT_RAW_OP_OP_SHIFT     0
+#define INIT_RAW_OP_PARAM1_MASK  0xFFFFFFF      /* init param 1 */
+#define INIT_RAW_OP_PARAM1_SHIFT 4
+       __le32  param2 /* Init param 2 */;
+};
+
+/* init array params */
+struct init_op_array_params {
+       __le16  size /* array size in dwords */;
+       __le16  offset /* array start offset in dwords */;
+};
+
+/* Write init operation arguments */
+union init_write_args {
+       __le32                          inline_val;
+       __le32                          zeros_count;
+       __le32                          array_offset;
+       struct init_op_array_params     runtime;
+};
+
+/* init operation: write */
+struct init_write_op {
+       __le32 data;
+#define INIT_WRITE_OP_OP_MASK        0xF
+#define INIT_WRITE_OP_OP_SHIFT       0
+#define INIT_WRITE_OP_SOURCE_MASK    0x7
+#define INIT_WRITE_OP_SOURCE_SHIFT   4
+#define INIT_WRITE_OP_RESERVED_MASK  0x1
+#define INIT_WRITE_OP_RESERVED_SHIFT 7
+#define INIT_WRITE_OP_WIDE_BUS_MASK  0x1
+#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
+#define INIT_WRITE_OP_ADDRESS_MASK   0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_SHIFT  9
+       union init_write_args args /* Write init operation arguments */;
+};
+
+/* init operation: read */
+struct init_read_op {
+       __le32 op_data;
+#define INIT_READ_OP_OP_MASK         0xF
+#define INIT_READ_OP_OP_SHIFT        0
+#define INIT_READ_OP_POLL_COMP_MASK  0x7
+#define INIT_READ_OP_POLL_COMP_SHIFT 4
+#define INIT_READ_OP_RESERVED_MASK   0x1
+#define INIT_READ_OP_RESERVED_SHIFT  7
+#define INIT_READ_OP_POLL_MASK       0x1
+#define INIT_READ_OP_POLL_SHIFT      8
+#define INIT_READ_OP_ADDRESS_MASK    0x7FFFFF
+#define INIT_READ_OP_ADDRESS_SHIFT   9
+       __le32 expected_val;
+};
+
+/* Init operations union */
+union init_op {
+       struct init_raw_op      raw /* raw init operation */;
+       struct init_write_op    write /* write init operation */;
+       struct init_read_op     read /* read init operation */;
+       struct init_if_mode_op  if_mode /* if_mode init operation */;
+       struct init_if_phase_op if_phase /* if_phase init operation */;
+       struct init_callback_op callback /* callback init operation */;
+       struct init_delay_op    delay /* delay init operation */;
+};
+
+/* Init command operation types */
+enum init_op_types {
+       INIT_OP_READ /* GRC read init command */,
+       INIT_OP_WRITE /* GRC write init command */,
+       INIT_OP_IF_MODE,
+       INIT_OP_IF_PHASE,
+       INIT_OP_DELAY /* delay init command */,
+       INIT_OP_CALLBACK /* callback init command */,
+       MAX_INIT_OP_TYPES
+};
+
+/* init source types */
+enum init_source_types {
+       INIT_SRC_INLINE /* init value is included in the init command */,
+       INIT_SRC_ZEROS /* init value is all zeros */,
+       INIT_SRC_ARRAY /* init value is an array of values */,
+       INIT_SRC_RUNTIME /* init value is provided during runtime */,
+       MAX_INIT_SOURCE_TYPES
+};
+
+/* Internal RAM Offsets macro data */
+struct iro {
+       u32     base /* RAM field offset */;
+       u16     m1 /* multiplier 1 */;
+       u16     m2 /* multiplier 2 */;
+       u16     m3 /* multiplier 3 */;
+       u16     size /* RAM field size */;
+};
+
+/* QM per-port init parameters */
+struct init_qm_port_params {
+       u8      active /* Indicates if this port is active */;
+       u8      num_active_phys_tcs;
+       u16     num_pbf_cmd_lines;
+       u16     num_btb_blocks;
+       __le16  reserved;
+};
+
+/* QM per-PQ init parameters */
+struct init_qm_pq_params {
+       u8      vport_id /* VPORT ID */;
+       u8      tc_id /* TC ID */;
+       u8      wrr_group /* WRR group */;
+       u8      reserved;
+};
+
+/* QM per-vport init parameters */
+struct init_qm_vport_params {
+       u32     vport_rl;
+       u16     vport_wfq;
+       u16     first_tx_pq_id[NUM_OF_TCS];
+};
+
+/* Win 2 */
+#define GTT_BAR0_MAP_REG_IGU_CMD \
+       0x00f000UL
+/* Win 3 */
+#define GTT_BAR0_MAP_REG_TSDM_RAM \
+       0x010000UL
+/* Win 4 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM \
+       0x011000UL
+/* Win 5 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \
+       0x012000UL
+/* Win 6 */
+#define GTT_BAR0_MAP_REG_USDM_RAM \
+       0x013000UL
+/* Win 7 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \
+       0x014000UL
+/* Win 8 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \
+       0x015000UL
+/* Win 9 */
+#define GTT_BAR0_MAP_REG_XSDM_RAM \
+       0x016000UL
+/* Win 10 */
+#define GTT_BAR0_MAP_REG_YSDM_RAM \
+       0x017000UL
+/* Win 11 */
+#define GTT_BAR0_MAP_REG_PSDM_RAM \
+       0x018000UL
+
+/**
+ * @brief qed_qm_pf_mem_size - prepare QM ILT sizes
+ *
+ * Returns the required host memory size in 4KB units.
+ * Must be called before all QM init HSI functions.
+ *
+ * @param pf_id                        - physical function ID
+ * @param num_pf_cids  - number of connections used by this PF
+ * @param num_vf_cids  - number of connections used by VFs of this PF
+ * @param num_tids             - number of tasks used by this PF
+ * @param num_pf_pqs   - number of PQs used by this PF
+ * @param num_vf_pqs   - number of PQs used by VFs of this PF
+ *
+ * @return The required host memory size in 4KB units.
+ */
+u32 qed_qm_pf_mem_size(u8      pf_id,
+                      u32      num_pf_cids,
+                      u32      num_vf_cids,
+                      u32      num_tids,
+                      u16      num_pf_pqs,
+                      u16      num_vf_pqs);
+
+struct qed_qm_common_rt_init_params {
+       u8                              max_ports_per_engine;
+       u8                              max_phys_tcs_per_port;
+       bool                            pf_rl_en;
+       bool                            pf_wfq_en;
+       bool                            vport_rl_en;
+       bool                            vport_wfq_en;
+       struct init_qm_port_params      *port_params;
+};
+
+/**
+ * @brief qed_qm_common_rt_init - Prepare QM runtime init values for the
+ * engine phase.
+ *
+ * @param p_hwfn
+ * @param max_ports_per_engine - max number of ports per engine in HW
+ * @param max_phys_tcs_per_port        - max number of physical TCs per port in HW
+ * @param pf_rl_en                             - enable per-PF rate limiters
+ * @param pf_wfq_en                            - enable per-PF WFQ
+ * @param vport_rl_en                  - enable per-VPORT rate limiters
+ * @param vport_wfq_en                 - enable per-VPORT WFQ
+ * @param port_params                  - array of size MAX_NUM_PORTS with
+ *                                             arameters for each port
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_qm_common_rt_init(
+       struct qed_hwfn                         *p_hwfn,
+       struct qed_qm_common_rt_init_params     *p_params);
+
+struct qed_qm_pf_rt_init_params {
+       u8                              port_id;
+       u8                              pf_id;
+       u8                              max_phys_tcs_per_port;
+       bool                            is_first_pf;
+       u32                             num_pf_cids;
+       u32                             num_vf_cids;
+       u32                             num_tids;
+       u16                             start_pq;
+       u16                             num_pf_pqs;
+       u16                             num_vf_pqs;
+       u8                              start_vport;
+       u8                              num_vports;
+       u8                              pf_wfq;
+       u32                             pf_rl;
+       struct init_qm_pq_params        *pq_params;
+       struct init_qm_vport_params     *vport_params;
+};
+
+int qed_qm_pf_rt_init(struct qed_hwfn                  *p_hwfn,
+                     struct qed_ptt                    *p_ptt,
+                     struct qed_qm_pf_rt_init_params   *p_params);
+
+/**
+ * @brief qed_init_pf_rl  Initializes the rate limit of the specified PF
+ *
+ * @param p_hwfn
+ * @param p_ptt        - ptt window used for writing the registers
+ * @param pf_id        - PF ID
+ * @param pf_rl        - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_init_pf_rl(struct qed_hwfn     *p_hwfn,
+                  struct qed_ptt       *p_ptt,
+                  u8                   pf_id,
+                  u32                  pf_rl);
+
+/**
+ * @brief qed_init_vport_rl  Initializes the rate limit of the specified VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt                - ptt window used for writing the registers
+ * @param vport_id     - VPORT ID
+ * @param vport_rl     - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+
+int qed_init_vport_rl(struct qed_hwfn  *p_hwfn,
+                     struct qed_ptt    *p_ptt,
+                     u8                vport_id,
+                     u32               vport_rl);
+/**
+ * @brief qed_send_qm_stop_cmd  Sends a stop command to the QM
+ *
+ * @param p_hwfn
+ * @param p_ptt                 - ptt window used for writing the registers
+ * @param is_release_cmd - true for release, false for stop.
+ * @param is_tx_pq       - true for Tx PQs, false for Other PQs.
+ * @param start_pq       - first PQ ID to stop
+ * @param num_pqs        - Number of PQs to stop, starting from start_pq.
+ *
+ * @return bool, true if successful, false if timeout occurred while waiting
+ *                                     for QM command done.
+ */
+
+bool qed_send_qm_stop_cmd(struct qed_hwfn      *p_hwfn,
+                         struct qed_ptt        *p_ptt,
+                         bool                  is_release_cmd,
+                         bool                  is_tx_pq,
+                         u16                   start_pq,
+                         u16                   num_pqs);
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET                        (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE                  (IRO[0].size)
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id)               (IRO[1].base + \
+                                                        ((port_id) * \
+                                                         IRO[1].m1))
+#define TSTORM_PORT_STAT_SIZE                          (IRO[1].size)
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id)       (IRO[2].base +  \
+                                                        ((vf_id) *     \
+                                                         IRO[2].m1))
+#define USTORM_VF_PF_CHANNEL_READY_SIZE                        (IRO[2].size)
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_OFFSET                    (IRO[3].base)
+#define USTORM_FLR_FINAL_ACK_SIZE                      (IRO[3].size)
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_OFFSET(pf_id)                  (IRO[4].base +  \
+                                                        ((pf_id) *     \
+                                                         IRO[4].m1))
+#define USTORM_EQE_CONS_SIZE                           (IRO[4].size)
+/* Ustorm Completion ring consumer */
+#define USTORM_CQ_CONS_OFFSET(global_queue_id)         (IRO[5].base +  \
+                                                        ((global_queue_id) * \
+                                                         IRO[5].m1))
+#define USTORM_CQ_CONS_SIZE                            (IRO[5].size)
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET                  (IRO[6].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE                    (IRO[6].size)
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET                  (IRO[7].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE                    (IRO[7].size)
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET                  (IRO[8].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE                    (IRO[8].size)
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET                  (IRO[9].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE                    (IRO[9].size)
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET                  (IRO[10].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE                    (IRO[10].size)
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET                  (IRO[11].base)
+#define USTORM_INTEG_TEST_DATA_SIZE                    (IRO[11].size)
+/* Tstorm producers */
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id)   (IRO[12].base + \
+                                                        ((core_rx_queue_id) * \
+                                                         IRO[12].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE                       (IRO[12].size)
+/* Tstorm LiteL2 queue statistics */
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[13].base + \
+                                                            ((core_rx_q_id) * \
+                                                             IRO[13].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE            (IRO[13].size)
+/* Ustorm LiteL2 queue statistics */
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[14].base + \
+                                                            ((core_rx_q_id) * \
+                                                             IRO[14].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE            (IRO[14].size)
+/* Pstorm LiteL2 queue statistics */
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_txst_id) (IRO[15].base + \
+                                                            ((core_txst_id) * \
+                                                             IRO[15].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE            (IRO[15].size)
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[16].base + \
+                                                  ((stat_counter_id) * \
+                                                   IRO[16].m1))
+#define MSTORM_QUEUE_STAT_SIZE                         (IRO[16].size)
+/* Mstorm producers */
+#define MSTORM_PRODS_OFFSET(queue_id)                  (IRO[17].base + \
+                                                        ((queue_id) *  \
+                                                         IRO[17].m1))
+#define MSTORM_PRODS_SIZE                              (IRO[17].size)
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET                   (IRO[18].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE                     (IRO[18].size)
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id)      (IRO[19].base + \
+                                                       ((stat_counter_id) * \
+                                                        IRO[19].m1))
+#define USTORM_QUEUE_STAT_SIZE                         (IRO[19].size)
+/* Ustorm queue zone */
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id)         (IRO[20].base + \
+                                                        ((queue_id) *  \
+                                                         IRO[20].m1))
+#define USTORM_ETH_QUEUE_ZONE_SIZE                     (IRO[20].size)
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id)      (IRO[21].base + \
+                                                        ((stat_counter_id) * \
+                                                         IRO[21].m1))
+#define PSTORM_QUEUE_STAT_SIZE                         (IRO[21].size)
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET(pf_id)             (IRO[22].base + \
+                                                        ((pf_id) *     \
+                                                         IRO[22].m1))
+#define TSTORM_ETH_PRS_INPUT_SIZE                      (IRO[22].size)
+/* Ystorm queue zone */
+#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id)         (IRO[23].base + \
+                                                        ((queue_id) *  \
+                                                         IRO[23].m1))
+#define YSTORM_ETH_QUEUE_ZONE_SIZE                     (IRO[23].size)
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id)              (IRO[24].base + \
+                                                        ((rss_id) *    \
+                                                         IRO[24].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE                                (IRO[24].size)
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id)              (IRO[25].base + \
+                                                        ((rss_id) *    \
+                                                         IRO[25].m1))
+#define USTORM_TOE_CQ_PROD_SIZE                                (IRO[25].size)
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id)              (IRO[26].base + \
+                                                        ((pf_id) *     \
+                                                         IRO[26].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE                       (IRO[26].size)
+/* Tstorm cmdq-cons of given command queue-id */
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id)    (IRO[27].base + \
+                                                        ((cmdq_queue_id) * \
+                                                         IRO[27].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE                     (IRO[27].size)
+/* Mstorm rq-cons of given queue-id */
+#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id)                (IRO[28].base + \
+                                                        ((rq_queue_id) * \
+                                                         IRO[28].m1))
+#define MSTORM_SCSI_RQ_CONS_SIZE                       (IRO[28].size)
+/* Pstorm RoCE statistics */
+#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id)       (IRO[29].base + \
+                                                        ((stat_counter_id) * \
+                                                         IRO[29].m1))
+#define PSTORM_ROCE_STAT_SIZE                          (IRO[29].size)
+/* Tstorm RoCE statistics */
+#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id)       (IRO[30].base + \
+                                                        ((stat_counter_id) * \
+                                                         IRO[30].m1))
+#define TSTORM_ROCE_STAT_SIZE                          (IRO[30].size)
+
+static const struct iro iro_arr[31] = {
+       { 0x10,   0x0,   0x0,   0x0,   0x8     },
+       { 0x4448, 0x60,  0x0,   0x0,   0x60    },
+       { 0x498,  0x8,   0x0,   0x0,   0x4     },
+       { 0x494,  0x0,   0x0,   0x0,   0x4     },
+       { 0x10,   0x8,   0x0,   0x0,   0x2     },
+       { 0x90,   0x8,   0x0,   0x0,   0x2     },
+       { 0x4540, 0x0,   0x0,   0x0,   0xf8    },
+       { 0x39e0, 0x0,   0x0,   0x0,   0xf8    },
+       { 0x2598, 0x0,   0x0,   0x0,   0xf8    },
+       { 0x4350, 0x0,   0x0,   0x0,   0xf8    },
+       { 0x52d0, 0x0,   0x0,   0x0,   0xf8    },
+       { 0x7a48, 0x0,   0x0,   0x0,   0xf8    },
+       { 0x100,  0x8,   0x0,   0x0,   0x8     },
+       { 0x5808, 0x10,  0x0,   0x0,   0x10    },
+       { 0xb100, 0x30,  0x0,   0x0,   0x30    },
+       { 0x95c0, 0x30,  0x0,   0x0,   0x30    },
+       { 0x54f8, 0x40,  0x0,   0x0,   0x40    },
+       { 0x200,  0x10,  0x0,   0x0,   0x8     },
+       { 0x9e70, 0x0,   0x0,   0x0,   0x4     },
+       { 0x7ca0, 0x40,  0x0,   0x0,   0x30    },
+       { 0xd00,  0x8,   0x0,   0x0,   0x8     },
+       { 0x2790, 0x80,  0x0,   0x0,   0x38    },
+       { 0xa520, 0xf0,  0x0,   0x0,   0xf0    },
+       { 0x80,   0x8,   0x0,   0x0,   0x8     },
+       { 0xac0,  0x8,   0x0,   0x0,   0x8     },
+       { 0x2580, 0x8,   0x0,   0x0,   0x8     },
+       { 0x2500, 0x8,   0x0,   0x0,   0x8     },
+       { 0x440,  0x8,   0x0,   0x0,   0x2     },
+       { 0x1800, 0x8,   0x0,   0x0,   0x2     },
+       { 0x27c8, 0x80,  0x0,   0x0,   0x10    },
+       { 0x4710, 0x10,  0x0,   0x0,   0x10    },
+};
+
+/* Runtime array offsets */
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET                                0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET                                1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET                                2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET                                3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET                                4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET                                5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET                                6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET                                7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET                                8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET                                9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET                                10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET                                11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET                                12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET                                13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET                                14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET                                15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET                                  16
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET                              17
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET                              18
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET                               19
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET                               20
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET                            21
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET                           22
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET                             23
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET                                 760
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE                                   736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET                                 760
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE                                   736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET                                1496
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE                                  736
+#define CAU_REG_PI_MEMORY_RT_OFFSET                                     2232
+#define CAU_REG_PI_MEMORY_RT_SIZE                                       4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET                    6648
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET                      6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET                      6650
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET                         6651
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET                         6652
+#define PRS_REG_SEARCH_TCP_RT_OFFSET                                    6653
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET                                   6654
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET                                   6655
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET                           6656
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET                           6657
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET                               6658
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET                     6659
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET           6660
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET                      6661
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET                         6662
+#define SRC_REG_FIRSTFREE_RT_OFFSET                                     6663
+#define SRC_REG_FIRSTFREE_RT_SIZE                                       2
+#define SRC_REG_LASTFREE_RT_OFFSET                                      6665
+#define SRC_REG_LASTFREE_RT_SIZE                                        2
+#define SRC_REG_COUNTFREE_RT_OFFSET                                     6667
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET                              6668
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET                                6669
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET                                6670
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET                                  6671
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET                                  6672
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET                                 6673
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET                               6674
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET                                6675
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET                               6676
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET                                6677
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET                              6678
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET                               6679
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET                             6680
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET                              6681
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET                             6682
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET                              6683
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET                             6684
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET                              6685
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET                     6686
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET                   6687
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET                   6688
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET                               6689
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET                             6690
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET                             6691
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET                           6692
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET                         6693
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET                         6694
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET                                    6695
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET                                6696
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET                                    6697
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET                                    6698
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET                              6699
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET                              6700
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET                                 6701
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE                                   22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET                                   28701
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET                           28702
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET                              28703
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET                              28704
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET                              28705
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET                                 28706
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET                                 28707
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET                                 28708
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET                     28709
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET                     28710
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET                                28711
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE                                  416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET                                29127
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE                                  512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET                                    29639
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET                                    29640
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET                                    29641
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET                               29642
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET                               29643
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET                               29644
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET                               29645
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET                               29646
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET                               29647
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET                               29648
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET                               29649
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET                               29650
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET                               29651
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET                              29652
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET                              29653
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET                              29654
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET                              29655
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET                              29656
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET                              29657
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET                              29658
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET                              29659
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET                              29660
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET                              29661
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET                              29662
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET                              29663
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET                              29664
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET                              29665
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET                              29666
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET                              29667
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET                              29668
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET                              29669
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET                              29670
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET                              29671
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET                              29672
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET                              29673
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET                              29674
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET                              29675
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET                              29676
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET                              29677
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET                              29678
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET                              29679
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET                              29680
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET                              29681
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET                              29682
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET                              29683
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET                              29684
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET                              29685
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET                              29686
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET                              29687
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET                              29688
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET                              29689
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET                              29690
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET                              29691
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET                              29692
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET                              29693
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET                              29694
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET                              29695
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET                              29696
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET                              29697
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET                              29698
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET                              29699
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET                              29700
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET                              29701
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET                              29702
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET                              29703
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET                              29704
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET                              29705
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET                                29706
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE                                  128
+#define QM_REG_VOQCRDLINE_RT_OFFSET                                     29834
+#define QM_REG_VOQCRDLINE_RT_SIZE                                       20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET                                 29854
+#define QM_REG_VOQINITCRDLINE_RT_SIZE                                   20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET                             29874
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET                             29875
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET                              29876
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET                            29877
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET                           29878
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET                                29879
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET                                29880
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET                                29881
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET                                29882
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET                                29883
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET                                29884
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET                                29885
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET                                29886
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET                                29887
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET                                29888
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET                               29889
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET                               29890
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET                               29891
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET                               29892
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET                               29893
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET                               29894
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET                            29895
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET                            29896
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET                            29897
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET                            29898
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET                               29899
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET                               29900
+#define QM_REG_PQTX2PF_0_RT_OFFSET                                      29901
+#define QM_REG_PQTX2PF_1_RT_OFFSET                                      29902
+#define QM_REG_PQTX2PF_2_RT_OFFSET                                      29903
+#define QM_REG_PQTX2PF_3_RT_OFFSET                                      29904
+#define QM_REG_PQTX2PF_4_RT_OFFSET                                      29905
+#define QM_REG_PQTX2PF_5_RT_OFFSET                                      29906
+#define QM_REG_PQTX2PF_6_RT_OFFSET                                      29907
+#define QM_REG_PQTX2PF_7_RT_OFFSET                                      29908
+#define QM_REG_PQTX2PF_8_RT_OFFSET                                      29909
+#define QM_REG_PQTX2PF_9_RT_OFFSET                                      29910
+#define QM_REG_PQTX2PF_10_RT_OFFSET                                     29911
+#define QM_REG_PQTX2PF_11_RT_OFFSET                                     29912
+#define QM_REG_PQTX2PF_12_RT_OFFSET                                     29913
+#define QM_REG_PQTX2PF_13_RT_OFFSET                                     29914
+#define QM_REG_PQTX2PF_14_RT_OFFSET                                     29915
+#define QM_REG_PQTX2PF_15_RT_OFFSET                                     29916
+#define QM_REG_PQTX2PF_16_RT_OFFSET                                     29917
+#define QM_REG_PQTX2PF_17_RT_OFFSET                                     29918
+#define QM_REG_PQTX2PF_18_RT_OFFSET                                     29919
+#define QM_REG_PQTX2PF_19_RT_OFFSET                                     29920
+#define QM_REG_PQTX2PF_20_RT_OFFSET                                     29921
+#define QM_REG_PQTX2PF_21_RT_OFFSET                                     29922
+#define QM_REG_PQTX2PF_22_RT_OFFSET                                     29923
+#define QM_REG_PQTX2PF_23_RT_OFFSET                                     29924
+#define QM_REG_PQTX2PF_24_RT_OFFSET                                     29925
+#define QM_REG_PQTX2PF_25_RT_OFFSET                                     29926
+#define QM_REG_PQTX2PF_26_RT_OFFSET                                     29927
+#define QM_REG_PQTX2PF_27_RT_OFFSET                                     29928
+#define QM_REG_PQTX2PF_28_RT_OFFSET                                     29929
+#define QM_REG_PQTX2PF_29_RT_OFFSET                                     29930
+#define QM_REG_PQTX2PF_30_RT_OFFSET                                     29931
+#define QM_REG_PQTX2PF_31_RT_OFFSET                                     29932
+#define QM_REG_PQTX2PF_32_RT_OFFSET                                     29933
+#define QM_REG_PQTX2PF_33_RT_OFFSET                                     29934
+#define QM_REG_PQTX2PF_34_RT_OFFSET                                     29935
+#define QM_REG_PQTX2PF_35_RT_OFFSET                                     29936
+#define QM_REG_PQTX2PF_36_RT_OFFSET                                     29937
+#define QM_REG_PQTX2PF_37_RT_OFFSET                                     29938
+#define QM_REG_PQTX2PF_38_RT_OFFSET                                     29939
+#define QM_REG_PQTX2PF_39_RT_OFFSET                                     29940
+#define QM_REG_PQTX2PF_40_RT_OFFSET                                     29941
+#define QM_REG_PQTX2PF_41_RT_OFFSET                                     29942
+#define QM_REG_PQTX2PF_42_RT_OFFSET                                     29943
+#define QM_REG_PQTX2PF_43_RT_OFFSET                                     29944
+#define QM_REG_PQTX2PF_44_RT_OFFSET                                     29945
+#define QM_REG_PQTX2PF_45_RT_OFFSET                                     29946
+#define QM_REG_PQTX2PF_46_RT_OFFSET                                     29947
+#define QM_REG_PQTX2PF_47_RT_OFFSET                                     29948
+#define QM_REG_PQTX2PF_48_RT_OFFSET                                     29949
+#define QM_REG_PQTX2PF_49_RT_OFFSET                                     29950
+#define QM_REG_PQTX2PF_50_RT_OFFSET                                     29951
+#define QM_REG_PQTX2PF_51_RT_OFFSET                                     29952
+#define QM_REG_PQTX2PF_52_RT_OFFSET                                     29953
+#define QM_REG_PQTX2PF_53_RT_OFFSET                                     29954
+#define QM_REG_PQTX2PF_54_RT_OFFSET                                     29955
+#define QM_REG_PQTX2PF_55_RT_OFFSET                                     29956
+#define QM_REG_PQTX2PF_56_RT_OFFSET                                     29957
+#define QM_REG_PQTX2PF_57_RT_OFFSET                                     29958
+#define QM_REG_PQTX2PF_58_RT_OFFSET                                     29959
+#define QM_REG_PQTX2PF_59_RT_OFFSET                                     29960
+#define QM_REG_PQTX2PF_60_RT_OFFSET                                     29961
+#define QM_REG_PQTX2PF_61_RT_OFFSET                                     29962
+#define QM_REG_PQTX2PF_62_RT_OFFSET                                     29963
+#define QM_REG_PQTX2PF_63_RT_OFFSET                                     29964
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET                                   29965
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET                                   29966
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET                                   29967
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET                                   29968
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET                                   29969
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET                                   29970
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET                                   29971
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET                                   29972
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET                                   29973
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET                                   29974
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET                                  29975
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET                                  29976
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET                                  29977
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET                                  29978
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET                                  29979
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET                                  29980
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET                                 29981
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET                                 29982
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET                            29983
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET                            29984
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET                              29985
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET                              29986
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET                              29987
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET                              29988
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET                              29989
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET                              29990
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET                              29991
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET                              29992
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET                                   29993
+#define QM_REG_RLGLBLINCVAL_RT_SIZE                                     256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET                               30249
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE                                 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET                                      30505
+#define QM_REG_RLGLBLCRD_RT_SIZE                                        256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET                                   30761
+#define QM_REG_RLPFPERIOD_RT_OFFSET                                     30762
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET                                30763
+#define QM_REG_RLPFINCVAL_RT_OFFSET                                     30764
+#define QM_REG_RLPFINCVAL_RT_SIZE                                       16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET                                 30780
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE                                   16
+#define QM_REG_RLPFCRD_RT_OFFSET                                        30796
+#define QM_REG_RLPFCRD_RT_SIZE                                          16
+#define QM_REG_RLPFENABLE_RT_OFFSET                                     30812
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET                                  30813
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET                                    30814
+#define QM_REG_WFQPFWEIGHT_RT_SIZE                                      16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET                                30830
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE                                  16
+#define QM_REG_WFQPFCRD_RT_OFFSET                                       30846
+#define QM_REG_WFQPFCRD_RT_SIZE                                         160
+#define QM_REG_WFQPFENABLE_RT_OFFSET                                    31006
+#define QM_REG_WFQVPENABLE_RT_OFFSET                                    31007
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET                                   31008
+#define QM_REG_BASEADDRTXPQ_RT_SIZE                                     512
+#define QM_REG_TXPQMAP_RT_OFFSET                                        31520
+#define QM_REG_TXPQMAP_RT_SIZE                                          512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET                                    32032
+#define QM_REG_WFQVPWEIGHT_RT_SIZE                                      512
+#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET                                32544
+#define QM_REG_WFQVPUPPERBOUND_RT_SIZE                                  512
+#define QM_REG_WFQVPCRD_RT_OFFSET                                       33056
+#define QM_REG_WFQVPCRD_RT_SIZE                                         512
+#define QM_REG_WFQVPMAP_RT_OFFSET                                       33568
+#define QM_REG_WFQVPMAP_RT_SIZE                                         512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET                                   34080
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE                                     160
+#define NIG_REG_LLH_CLS_TYPE_DUALMODE_RT_OFFSET                         34240
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET                         34241
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET                         34242
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET                         34243
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET                         34244
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET                          34245
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET                      34246
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET                               34247
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE                                 4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET                          34251
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE                            4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET                            34255
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE                              4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET                               34259
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET                         34260
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE                           32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET                            34292
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE                              16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET                          34308
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE                            16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET                 34324
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE                   16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET                       34340
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE                         16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET                                  34356
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET                               34357
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET                               34358
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET                               34359
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET                           34360
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET                           34361
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET                           34362
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET                           34363
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET                        34364
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET                        34365
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET                        34366
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET                        34367
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET                            34368
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET                         34369
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET                          34370
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET                        34371
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET                           34372
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET                    34373
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET                        34374
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET                           34375
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET                    34376
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET                        34377
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET                           34378
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET                    34379
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET                        34380
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET                           34381
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET                    34382
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET                        34383
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET                           34384
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET                    34385
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET                        34386
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET                           34387
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET                    34388
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET                        34389
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET                           34390
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET                    34391
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET                        34392
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET                           34393
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET                    34394
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET                        34395
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET                           34396
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET                    34397
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET                        34398
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET                           34399
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET                    34400
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET                       34401
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET                          34402
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET                   34403
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET                       34404
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET                          34405
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET                   34406
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET                       34407
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET                          34408
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET                   34409
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET                       34410
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET                          34411
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET                   34412
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET                       34413
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET                          34414
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET                   34415
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET                       34416
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET                          34417
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET                   34418
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET                       34419
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET                          34420
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET                   34421
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET                       34422
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET                          34423
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET                   34424
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET                       34425
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET                          34426
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET                   34427
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET                       34428
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET                          34429
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET                   34430
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET                                    34431
+
+#define RUNTIME_ARRAY_SIZE 34432
+
+/* The eth storm context for the Ystorm */
+struct ystorm_eth_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/* The eth storm context for the Pstorm */
+struct pstorm_eth_conn_st_ctx {
+       __le32 reserved[8];
+};
+
+/* The eth storm context for the Xstorm */
+struct xstorm_eth_conn_st_ctx {
+       __le32 reserved[60];
+};
+
+struct xstorm_eth_conn_ag_ctx {
+       u8      reserved0 /* cdu_validation */;
+       u8      eth_state /* state */;
+       u8      flags0;
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT              1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT              2
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK               0x1 /* bit4 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT              4
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT              5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK               0x1 /* bit6 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT              6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK               0x1 /* bit7 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT              7
+       u8 flags1;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK               0x1 /* bit8 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT              0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK               0x1 /* bit9 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT              1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK               0x1 /* bit10 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT              2
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK                   0x1 /* bit11 */
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT                  3
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK                   0x1 /* bit12 */
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT                  4
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK                   0x1 /* bit13 */
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT                  5
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK          0x1 /* bit14 */
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK            0x1 /* bit15 */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
+       u8 flags2;
+#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK                     0x3 /* timer0cf */
+#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT                    0
+#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK                     0x3 /* timer1cf */
+#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT                    2
+#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK                     0x3 /* timer2cf */
+#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT                    4
+#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT                    6
+       u8 flags3;
+#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK                     0x3 /* cf4 */
+#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT                    0
+#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK                     0x3 /* cf5 */
+#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT                    2
+#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK                     0x3 /* cf6 */
+#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT                    4
+#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK                     0x3 /* cf7 */
+#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT                    6
+       u8 flags4;
+#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK                     0x3 /* cf8 */
+#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT                    0
+#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK                     0x3 /* cf9 */
+#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT                    2
+#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK                    0x3 /* cf10 */
+#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT                   4
+#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK                    0x3 /* cf11 */
+#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT                   6
+       u8 flags5;
+#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK                    0x3 /* cf12 */
+#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT                   0
+#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK                    0x3 /* cf13 */
+#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT                   2
+#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK                    0x3 /* cf14 */
+#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT                   4
+#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK                    0x3 /* cf15 */
+#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT                   6
+       u8 flags6;
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK        0x3 /* cf16 */
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK        0x3
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK                   0x3 /* cf18 */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT                  4
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK            0x3 /* cf19 */
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
+       u8 flags7;
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK                0x3 /* cf20 */
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK              0x3 /* cf21 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT             2
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK               0x3 /* cf22 */
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT              4
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK                   0x1 /* cf0en */
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT                  6
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK                   0x1 /* cf1en */
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT                  7
+       u8 flags8;
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK                   0x1 /* cf2en */
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                  0
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK                   0x1 /* cf3en */
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT                  1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK                   0x1 /* cf4en */
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT                  2
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK                   0x1 /* cf5en */
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT                  3
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK                   0x1 /* cf6en */
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT                  4
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK                   0x1 /* cf7en */
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT                  5
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK                   0x1 /* cf8en */
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT                  6
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK                   0x1 /* cf9en */
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT                  7
+       u8 flags9;
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK                  0x1 /* cf10en */
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT                 0
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK                  0x1 /* cf11en */
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT                 1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK                  0x1 /* cf12en */
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT                 2
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK                  0x1 /* cf13en */
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT                 3
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK                  0x1 /* cf14en */
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT                 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK                  0x1 /* cf15en */
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT                 5
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK     0x1 /* cf16en */
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK     0x1
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
+       u8 flags10;
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK                0x1 /* cf18en */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK         0x1 /* cf19en */
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK             0x1 /* cf20en */
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK              0x1 /* cf21en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT             3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK            0x1 /* cf22en */
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK  0x1 /* cf23en */
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK              0x1 /* rule0en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT             6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK              0x1 /* rule1en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT             7
+       u8 flags11;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK              0x1 /* rule2en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT             0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK              0x1 /* rule3en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT             1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK          0x1 /* rule4en */
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK                 0x1 /* rule5en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT                3
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK                 0x1 /* rule6en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT                4
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK                 0x1 /* rule7en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT                5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK            0x1 /* rule8en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK                 0x1 /* rule9en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT                7
+       u8 flags12;
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK                0x1 /* rule10en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK                0x1 /* rule11en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT               1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK            0x1 /* rule12en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK            0x1 /* rule13en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK                0x1 /* rule14en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT               4
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK                0x1 /* rule15en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT               5
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK                0x1 /* rule16en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT               6
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK                0x1 /* rule17en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT               7
+       u8 flags13;
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK                0x1 /* rule18en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK                0x1 /* rule19en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT               1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK            0x1 /* rule20en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK            0x1 /* rule21en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK            0x1 /* rule22en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK            0x1 /* rule23en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK            0x1 /* rule24en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK            0x1 /* rule25en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
+       u8 flags14;
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK        0x1 /* bit16 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK      0x1 /* bit17 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK    0x1 /* bit18 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK    0x1 /* bit19 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK          0x1 /* bit20 */
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK        0x1 /* bit21 */
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK              0x3 /* cf23 */
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
+       u8      edpm_event_id /* byte2 */;
+       __le16  physical_q0 /* physical_q0 */;
+       __le16  word1 /* physical_q1 */;
+       __le16  edpm_num_bds /* physical_q2 */;
+       __le16  tx_bd_cons /* word3 */;
+       __le16  tx_bd_prod /* word4 */;
+       __le16  go_to_bd_cons /* word5 */;
+       __le16  conn_dpi /* conn_dpi */;
+       u8      byte3 /* byte3 */;
+       u8      byte4 /* byte4 */;
+       u8      byte5 /* byte5 */;
+       u8      byte6 /* byte6 */;
+       __le32  reg0 /* reg0 */;
+       __le32  reg1 /* reg1 */;
+       __le32  reg2 /* reg2 */;
+       __le32  reg3 /* reg3 */;
+       __le32  reg4 /* reg4 */;
+       __le32  reg5 /* cf_array0 */;
+       __le32  reg6 /* cf_array1 */;
+       __le16  word7 /* word7 */;
+       __le16  word8 /* word8 */;
+       __le16  word9 /* word9 */;
+       __le16  word10 /* word10 */;
+       __le32  reg7 /* reg7 */;
+       __le32  reg8 /* reg8 */;
+       __le32  reg9 /* reg9 */;
+       u8      byte7 /* byte7 */;
+       u8      byte8 /* byte8 */;
+       u8      byte9 /* byte9 */;
+       u8      byte10 /* byte10 */;
+       u8      byte11 /* byte11 */;
+       u8      byte12 /* byte12 */;
+       u8      byte13 /* byte13 */;
+       u8      byte14 /* byte14 */;
+       u8      byte15 /* byte15 */;
+       u8      byte16 /* byte16 */;
+       __le16  word11 /* word11 */;
+       __le32  reg10 /* reg10 */;
+       __le32  reg11 /* reg11 */;
+       __le32  reg12 /* reg12 */;
+       __le32  reg13 /* reg13 */;
+       __le32  reg14 /* reg14 */;
+       __le32  reg15 /* reg15 */;
+       __le32  reg16 /* reg16 */;
+       __le32  reg17 /* reg17 */;
+       __le32  reg18 /* reg18 */;
+       __le32  reg19 /* reg19 */;
+       __le16  word12 /* word12 */;
+       __le16  word13 /* word13 */;
+       __le16  word14 /* word14 */;
+       __le16  word15 /* word15 */;
+};
+
+/* The eth storm context for the Tstorm */
+struct tstorm_eth_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/* The eth storm context for the Mstorm */
+struct mstorm_eth_conn_st_ctx {
+       __le32 reserved[8];
+};
+
+/* The eth storm context for the Ustorm */
+struct ustorm_eth_conn_st_ctx {
+       __le32 reserved[40];
+};
+
+/* eth connection context */
+struct eth_conn_context {
+       struct ystorm_eth_conn_st_ctx   ystorm_st_context;
+       struct regpair                  ystorm_st_padding[2] /* padding */;
+       struct pstorm_eth_conn_st_ctx   pstorm_st_context;
+       struct regpair                  pstorm_st_padding[2] /* padding */;
+       struct xstorm_eth_conn_st_ctx   xstorm_st_context;
+       struct xstorm_eth_conn_ag_ctx   xstorm_ag_context;
+       struct tstorm_eth_conn_st_ctx   tstorm_st_context;
+       struct regpair                  tstorm_st_padding[2] /* padding */;
+       struct mstorm_eth_conn_st_ctx   mstorm_st_context;
+       struct ustorm_eth_conn_st_ctx   ustorm_st_context;
+};
+
+enum eth_filter_action {
+       ETH_FILTER_ACTION_REMOVE,
+       ETH_FILTER_ACTION_ADD,
+       ETH_FILTER_ACTION_REPLACE,
+       MAX_ETH_FILTER_ACTION
+};
+
+struct eth_filter_cmd {
+       u8      type /* Filter Type (MAC/VLAN/Pair/VNI) */;
+       u8      vport_id /* the vport id */;
+       u8      action /* filter command action: add/remove/replace */;
+       u8      reserved0;
+       __le32  vni;
+       __le16  mac_lsb;
+       __le16  mac_mid;
+       __le16  mac_msb;
+       __le16  vlan_id;
+};
+
+struct eth_filter_cmd_header {
+       u8      rx;
+       u8      tx;
+       u8      cmd_cnt;
+       u8      assert_on_error;
+       u8      reserved1[4];
+};
+
+enum eth_filter_type {
+       ETH_FILTER_TYPE_MAC,
+       ETH_FILTER_TYPE_VLAN,
+       ETH_FILTER_TYPE_PAIR,
+       ETH_FILTER_TYPE_INNER_MAC,
+       ETH_FILTER_TYPE_INNER_VLAN,
+       ETH_FILTER_TYPE_INNER_PAIR,
+       ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR,
+       ETH_FILTER_TYPE_MAC_VNI_PAIR,
+       ETH_FILTER_TYPE_VNI,
+       MAX_ETH_FILTER_TYPE
+};
+
+enum eth_ramrod_cmd_id {
+       ETH_RAMROD_UNUSED,
+       ETH_RAMROD_VPORT_START /* VPort Start Ramrod */,
+       ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */,
+       ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */,
+       ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
+       ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
+       ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
+       ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
+       ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */,
+       ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */,
+       ETH_RAMROD_RESERVED,
+       ETH_RAMROD_RESERVED2,
+       ETH_RAMROD_RESERVED3,
+       ETH_RAMROD_RESERVED4,
+       ETH_RAMROD_RESERVED5,
+       ETH_RAMROD_RESERVED6,
+       ETH_RAMROD_RESERVED7,
+       ETH_RAMROD_RESERVED8,
+       MAX_ETH_RAMROD_CMD_ID
+};
+
+struct eth_vport_rss_config {
+       __le16 capabilities;
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK      0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT       0
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK      0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT       1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK    0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT   2
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK    0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT   3
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK    0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT   4
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK    0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT   5
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK  0x1
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_MASK     0x1
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_SHIFT    7
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_MASK     0x1
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_SHIFT    8
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK          0x7F
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT        9
+       u8      rss_id;
+       u8      rss_mode;
+       u8      update_rss_key;
+       u8      update_rss_ind_table;
+       u8      update_rss_capabilities;
+       u8      tbl_size;
+       __le32  reserved2[2];
+       __le16  indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
+       __le32  rss_key[ETH_RSS_KEY_SIZE_REGS];
+       __le32  reserved3[2];
+};
+
+enum eth_vport_rss_mode {
+       ETH_VPORT_RSS_MODE_DISABLED,
+       ETH_VPORT_RSS_MODE_REGULAR,
+       MAX_ETH_VPORT_RSS_MODE
+};
+
+struct eth_vport_rx_mode {
+       __le16 state;
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK    0x1
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT  0
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK        0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT       1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK  0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK    0x1
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT  3
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK        0x1
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT       4
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK        0x1
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT       5
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK              0x3FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT            6
+       __le16 reserved2[3];
+};
+
+struct eth_vport_tpa_param {
+       u64     reserved[2];
+};
+
+struct eth_vport_tx_mode {
+       __le16 state;
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK    0x1
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT   0
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK  0x1
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK    0x1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT   2
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK  0x1
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK  0x1
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_TX_MODE_RESERVED1_MASK        0x7FF
+#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT      5
+       __le16 reserved2[3];
+};
+
+struct rx_queue_start_ramrod_data {
+       __le16    rx_queue_id;
+       __le16    num_of_pbl_pages;
+       __le16    bd_max_bytes;
+       __le16    sb_id;
+       u8            sb_index;
+       u8            vport_id;
+       u8            default_rss_queue_flg;
+       u8            complete_cqe_flg;
+       u8            complete_event_flg;
+       u8            stats_counter_id;
+       u8            pin_context;
+       u8            pxp_tph_valid_bd;
+       u8            pxp_tph_valid_pkt;
+       u8            pxp_st_hint;
+       __le16    pxp_st_index;
+       u8            reserved[4];
+       struct regpair  cqe_pbl_addr;
+       struct regpair  bd_base;
+       struct regpair  sge_base;
+};
+
+struct rx_queue_stop_ramrod_data {
+       __le16  rx_queue_id;
+       u8      complete_cqe_flg;
+       u8      complete_event_flg;
+       u8      vport_id;
+       u8      reserved[3];
+};
+
+struct rx_queue_update_ramrod_data {
+       __le16    rx_queue_id;
+       u8            complete_cqe_flg;
+       u8            complete_event_flg;
+       u8            init_sge_ring_flg;
+       u8            vport_id;
+       u8            pxp_tph_valid_sge;
+       u8            pxp_st_hint;
+       __le16    pxp_st_index;
+       u8            reserved[6];
+       struct regpair  sge_base;
+};
+
+struct tx_queue_start_ramrod_data {
+       __le16  sb_id;
+       u8      sb_index;
+       u8      vport_id;
+       u8      tc;
+       u8      stats_counter_id;
+       __le16  qm_pq_id;
+       u8      flags;
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK  0x1
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK      0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT     1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK      0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT     2
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_MASK            0x1F
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_SHIFT          3
+       u8            pin_context;
+       u8            pxp_tph_valid_bd;
+       u8            pxp_tph_valid_pkt;
+       __le16    pxp_st_index;
+       u8            pxp_st_hint;
+       u8            reserved1[3];
+       __le16    queue_zone_id;
+       __le16    test_dup_count;
+       __le16    pbl_size;
+       struct regpair  pbl_base_addr;
+};
+
+struct tx_queue_stop_ramrod_data {
+       __le16 reserved[4];
+};
+
+struct vport_filter_update_ramrod_data {
+       struct eth_filter_cmd_header    filter_cmd_hdr;
+       struct eth_filter_cmd      filter_cmds[ETH_FILTER_RULES_COUNT];
+};
+
+struct vport_start_ramrod_data {
+       u8                            vport_id;
+       u8                            sw_fid;
+       __le16                    mtu;
+       u8                            drop_ttl0_en;
+       u8                            inner_vlan_removal_en;
+       struct eth_vport_rx_mode        rx_mode;
+       struct eth_vport_tx_mode        tx_mode;
+       struct eth_vport_tpa_param      tpa_param;
+       __le16                    sge_buff_size;
+       u8                            max_sges_num;
+       u8                            tx_switching_en;
+       u8                            anti_spoofing_en;
+       u8                            default_vlan_en;
+       u8                            handle_ptp_pkts;
+       u8                            silent_vlan_removal_en;
+       __le16                    default_vlan;
+       u8                            untagged;
+       u8                            reserved[7];
+};
+
+struct vport_stop_ramrod_data {
+       u8      vport_id;
+       u8      reserved[7];
+};
+
+struct vport_update_ramrod_data_cmn {
+       u8      vport_id;
+       u8      update_rx_active_flg;
+       u8      rx_active_flg;
+       u8      update_tx_active_flg;
+       u8      tx_active_flg;
+       u8      update_rx_mode_flg;
+       u8      update_tx_mode_flg;
+       u8      update_approx_mcast_flg;
+       u8      update_rss_flg;
+       u8      update_inner_vlan_removal_en_flg;
+       u8      inner_vlan_removal_en;
+       u8      update_tpa_param_flg;
+       u8      update_tpa_en_flg;
+       u8      update_sge_param_flg;
+       __le16  sge_buff_size;
+       u8      max_sges_num;
+       u8      update_tx_switching_en_flg;
+       u8      tx_switching_en;
+       u8      update_anti_spoofing_en_flg;
+       u8      anti_spoofing_en;
+       u8      update_handle_ptp_pkts;
+       u8      handle_ptp_pkts;
+       u8      update_default_vlan_en_flg;
+       u8      default_vlan_en;
+       u8      update_default_vlan_flg;
+       __le16  default_vlan;
+       u8      update_accept_any_vlan_flg;
+       u8      accept_any_vlan;
+       u8      silent_vlan_removal_en;
+       u8      reserved;
+};
+
+struct vport_update_ramrod_mcast {
+       __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+};
+
+struct vport_update_ramrod_data {
+       struct vport_update_ramrod_data_cmn     common;
+       struct eth_vport_rx_mode                rx_mode;
+       struct eth_vport_tx_mode                tx_mode;
+       struct eth_vport_tpa_param            tpa_param;
+       struct vport_update_ramrod_mcast        approx_mcast;
+       struct eth_vport_rss_config          rss_config;
+};
+
+struct mstorm_eth_conn_ag_ctx {
+       u8      byte0 /* cdu_validation */;
+       u8      byte1 /* state */;
+       u8      flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK  0x1   /* exist_in_qm0 */
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK          0x1   /* exist_in_qm1 */
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT         1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK           0x3   /* cf0 */
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT          2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK           0x3   /* cf1 */
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT          4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK           0x3   /* cf2 */
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT          6
+       u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK         0x1   /* cf0en */
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT        0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK         0x1   /* cf1en */
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT        1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK         0x1   /* cf2en */
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT        2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK       0x1   /* rule0en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT      3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK       0x1   /* rule1en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT      4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK       0x1   /* rule2en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT      5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK       0x1   /* rule3en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT      6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK       0x1   /* rule4en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT      7
+       __le16  word0 /* word0 */;
+       __le16  word1 /* word1 */;
+       __le32  reg0 /* reg0 */;
+       __le32  reg1 /* reg1 */;
+};
+
+struct tstorm_eth_conn_ag_ctx {
+       u8      byte0 /* cdu_validation */;
+       u8      byte1 /* state */;
+       u8      flags0;
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK      0x1       /* exist_in_qm0 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT     0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK      0x1       /* exist_in_qm1 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT     1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK      0x1       /* bit2 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT     2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK      0x1       /* bit3 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT     3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK      0x1       /* bit4 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT     4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK      0x1       /* bit5 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT     5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK       0x3       /* timer0cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT      6
+       u8 flags1;
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK       0x3       /* timer1cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT      0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK       0x3       /* timer2cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT      2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK       0x3       /* timer_stop_all */
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT      4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK       0x3       /* cf4 */
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT      6
+       u8 flags2;
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK       0x3       /* cf5 */
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT      0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK       0x3       /* cf6 */
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT      2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK       0x3       /* cf7 */
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT      4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK       0x3       /* cf8 */
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT      6
+       u8 flags3;
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK       0x3       /* cf9 */
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT      0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK      0x3       /* cf10 */
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT     2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK     0x1       /* cf0en */
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT    4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK     0x1       /* cf1en */
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT    5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK     0x1       /* cf2en */
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT    6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK     0x1       /* cf3en */
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT    7
+       u8 flags4;
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK     0x1       /* cf4en */
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT    0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK     0x1       /* cf5en */
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT    1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK     0x1       /* cf6en */
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT    2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK     0x1       /* cf7en */
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT    3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK     0x1       /* cf8en */
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT    4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK     0x1       /* cf9en */
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT    5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK    0x1       /* cf10en */
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT   6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK   0x1       /* rule0en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT  7
+       u8 flags5;
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK   0x1       /* rule1en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT  0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK   0x1       /* rule2en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT  1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK   0x1       /* rule3en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT  2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK   0x1       /* rule4en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT  3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK   0x1       /* rule5en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT  4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK  0x1       /* rule6en */
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK   0x1       /* rule7en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT  6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK   0x1       /* rule8en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT  7
+       __le32  reg0 /* reg0 */;
+       __le32  reg1 /* reg1 */;
+       __le32  reg2 /* reg2 */;
+       __le32  reg3 /* reg3 */;
+       __le32  reg4 /* reg4 */;
+       __le32  reg5 /* reg5 */;
+       __le32  reg6 /* reg6 */;
+       __le32  reg7 /* reg7 */;
+       __le32  reg8 /* reg8 */;
+       u8      byte2 /* byte2 */;
+       u8      byte3 /* byte3 */;
+       __le16  rx_bd_cons /* word0 */;
+       u8      byte4 /* byte4 */;
+       u8      byte5 /* byte5 */;
+       __le16  rx_bd_prod /* word1 */;
+       __le16  word2 /* conn_dpi */;
+       __le16  word3 /* word3 */;
+       __le32  reg9 /* reg9 */;
+       __le32  reg10 /* reg10 */;
+};
+
+struct ustorm_eth_conn_ag_ctx {
+       u8      byte0 /* cdu_validation */;
+       u8      byte1 /* state */;
+       u8      flags0;
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK                  0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT                 0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK                  0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT                 1
+#define USTORM_ETH_CONN_AG_CTX_CF0_MASK                   0x3   /* timer0cf */
+#define USTORM_ETH_CONN_AG_CTX_CF0_SHIFT                  2
+#define USTORM_ETH_CONN_AG_CTX_CF1_MASK                   0x3   /* timer1cf */
+#define USTORM_ETH_CONN_AG_CTX_CF1_SHIFT                  4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK                   0x3   /* timer2cf */
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT                  6
+       u8 flags1;
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK                   0x3
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT                  0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK             0x3   /* cf4 */
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT            2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK             0x3   /* cf5 */
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT            4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK     0x3   /* cf6 */
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT    6
+       u8 flags2;
+#define USTORM_ETH_CONN_AG_CTX_CF0EN_MASK                 0x1   /* cf0en */
+#define USTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT                0
+#define USTORM_ETH_CONN_AG_CTX_CF1EN_MASK                 0x1   /* cf1en */
+#define USTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT                1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK                 0x1   /* cf2en */
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK                 0x1   /* cf3en */
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT                3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK          0x1   /* cf4en */
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT         4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK          0x1   /* cf5en */
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT         5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK  0x1   /* cf6en */
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK               0x1   /* rule0en */
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT              7
+       u8 flags3;
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK               0x1   /* rule1en */
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT              0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK               0x1   /* rule2en */
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT              1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK               0x1   /* rule3en */
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT              2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK               0x1   /* rule4en */
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT              3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK               0x1   /* rule5en */
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT              4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK               0x1   /* rule6en */
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT              5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK               0x1   /* rule7en */
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT              6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK               0x1   /* rule8en */
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT              7
+       u8      byte2 /* byte2 */;
+       u8      byte3 /* byte3 */;
+       __le16  word0 /* conn_dpi */;
+       __le16  tx_bd_cons /* word1 */;
+       __le32  reg0 /* reg0 */;
+       __le32  reg1 /* reg1 */;
+       __le32  reg2 /* reg2 */;
+       __le32  reg3 /* reg3 */;
+       __le16  tx_drv_bd_cons /* word2 */;
+       __le16  rx_drv_cqe_cons /* word3 */;
+};
+
+struct xstorm_eth_hw_conn_ag_ctx {
+       u8      reserved0 /* cdu_validation */;
+       u8      eth_state /* state */;
+       u8      flags0;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT              1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT              2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT              4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT              5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT              6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT              7
+       u8 flags1;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT              0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT              1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT              2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT                  3
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT                  5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK          0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
+       u8 flags2;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT                    4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT                    6
+       u8 flags3;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT                    4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT                    6
+       u8 flags4;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT                   4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT                   6
+       u8 flags5;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT                   0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT                   2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT                   4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT                   6
+       u8 flags6;
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK        0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK        0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK                   0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK            0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
+       u8 flags7;
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK                0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK              0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT             2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK               0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT              4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT                  6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT                  7
+       u8 flags8;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT                  0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT                  1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT                  2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT                  3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT                  5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT                  6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT                  7
+       u8 flags9;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT                 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT                 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT                 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT                 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT                 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT                 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK     0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK     0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
+       u8 flags10;
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK         0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK             0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT             3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT             6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT             7
+       u8 flags11;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT             0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT             1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK          0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK                 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT                3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK                 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT                4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK                 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT                5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK                 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT                7
+       u8 flags12;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT               1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT               4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT               5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT               6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT               7
+       u8 flags13;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT               1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
+       u8 flags14;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK      0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK          0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK              0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
+       u8      edpm_event_id /* byte2 */;
+       __le16  physical_q0 /* physical_q0 */;
+       __le16  word1 /* physical_q1 */;
+       __le16  edpm_num_bds /* physical_q2 */;
+       __le16  tx_bd_cons /* word3 */;
+       __le16  tx_bd_prod /* word4 */;
+       __le16  go_to_bd_cons /* word5 */;
+       __le16  conn_dpi /* conn_dpi */;
+};
+
+#define VF_MAX_STATIC 192       /* In case of K2 */
+
+#define MCP_GLOB_PATH_MAX       2
+#define MCP_PORT_MAX            2       /* Global */
+#define MCP_GLOB_PORT_MAX       4       /* Global */
+#define MCP_GLOB_FUNC_MAX       16      /* Global */
+
+typedef u32 offsize_t;                  /* In DWORDS !!! */
+/* Offset from the beginning of the MCP scratchpad */
+#define OFFSIZE_OFFSET_SHIFT    0
+#define OFFSIZE_OFFSET_MASK     0x0000ffff
+/* Size of specific element (not the whole array if any) */
+#define OFFSIZE_SIZE_SHIFT      16
+#define OFFSIZE_SIZE_MASK       0xffff0000
+
+/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
+#define SECTION_OFFSET(_offsize)        ((((_offsize &             \
+                                           OFFSIZE_OFFSET_MASK) >> \
+                                          OFFSIZE_OFFSET_SHIFT) << 2))
+
+/* QED_SECTION_SIZE is calculating the size in bytes out of offsize */
+#define QED_SECTION_SIZE(_offsize)              (((_offsize &           \
+                                                  OFFSIZE_SIZE_MASK) >> \
+                                                 OFFSIZE_SIZE_SHIFT) << 2)
+
+/* SECTION_ADDR returns the GRC addr of a section, given offsize and index
+ * within section.
+ */
+#define SECTION_ADDR(_offsize, idx)     (MCP_REG_SCRATCH +         \
+                                        SECTION_OFFSET(_offsize) + \
+                                        (QED_SECTION_SIZE(_offsize) * idx))
+
+/* SECTION_OFFSIZE_ADDR returns the GRC addr to the offsize address.
+ * Use offsetof, since the OFFSETUP collide with the firmware definition
+ */
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) (_pub_base +              \
+                                                  offsetof(struct           \
+                                                           mcp_public_data, \
+                                                           sections[_section]))
+/* PHY configuration */
+struct pmm_phy_cfg {
+       u32     speed;
+#define PMM_SPEED_AUTONEG   0
+
+       u32     pause;  /* bitmask */
+#define PMM_PAUSE_NONE          0x0
+#define PMM_PAUSE_AUTONEG       0x1
+#define PMM_PAUSE_RX            0x2
+#define PMM_PAUSE_TX            0x4
+
+       u32     adv_speed;  /* Default should be the speed_cap_mask */
+       u32     loopback_mode;
+#define PMM_LOOPBACK_NONE               0
+#define PMM_LOOPBACK_INT_PHY    1
+#define PMM_LOOPBACK_EXT_PHY    2
+#define PMM_LOOPBACK_EXT                3
+#define PMM_LOOPBACK_MAC                4
+
+       /* features */
+       u32 feature_config_flags;
+};
+
+struct port_mf_cfg {
+       u32     dynamic_cfg; /* device control channel */
+#define PORT_MF_CFG_OV_TAG_MASK              0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT             0
+#define PORT_MF_CFG_OV_TAG_DEFAULT         PORT_MF_CFG_OV_TAG_MASK
+
+       u32     reserved[1];
+};
+
+/* DO NOT add new fields in the middle
+ * MUST be synced with struct pmm_stats_map
+ */
+struct pmm_stats {
+       u64     r64;    /* 0x00 (Offset 0x00 ) RX 64-byte frame counter*/
+       u64     r127;   /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter*/
+       u64     r255;
+       u64     r511;
+       u64     r1023;
+       u64     r1518;
+       u64     r1522;
+       u64     r2047;
+       u64     r4095;
+       u64     r9216;
+       u64     r16383;
+       u64     rfcs;   /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/
+       u64     rxcf;   /* 0x10 (Offset 0x60 ) RX control frame counter*/
+       u64     rxpf;   /* 0x11 (Offset 0x68 ) RX pause frame counter*/
+       u64     rxpp;   /* 0x12 (Offset 0x70 ) RX PFC frame counter*/
+       u64     raln;   /* 0x16 (Offset 0x78 ) RX alignment error counter*/
+       u64     rfcr;   /* 0x19 (Offset 0x80 ) RX false carrier counter */
+       u64     rovr;   /* 0x1A (Offset 0x88 ) RX oversized frame counter*/
+       u64     rjbr;   /* 0x1B (Offset 0x90 ) RX jabber frame counter */
+       u64     rund;   /* 0x34 (Offset 0x98 ) RX undersized frame counter */
+       u64     rfrg;   /* 0x35 (Offset 0xa0 ) RX fragment counter */
+       u64     t64;    /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */
+       u64     t127;
+       u64     t255;
+       u64     t511;
+       u64     t1023;
+       u64     t1518;
+       u64     t2047;
+       u64     t4095;
+       u64     t9216;
+       u64     t16383;
+       u64     txpf;   /* 0x50 (Offset 0xf8 ) TX pause frame counter */
+       u64     txpp;   /* 0x51 (Offset 0x100) TX PFC frame counter */
+       u64     tlpiec;
+       u64     tncl;
+       u64     rbyte;  /* 0x3d (Offset 0x118) RX byte counter */
+       u64     rxuca;  /* 0x0c (Offset 0x120) RX UC frame counter */
+       u64     rxmca;  /* 0x0d (Offset 0x128) RX MC frame counter */
+       u64     rxbca;  /* 0x0e (Offset 0x130) RX BC frame counter */
+       u64     rxpok;
+       u64     tbyte;  /* 0x6f (Offset 0x140) TX byte counter */
+       u64     txuca;  /* 0x4d (Offset 0x148) TX UC frame counter */
+       u64     txmca;  /* 0x4e (Offset 0x150) TX MC frame counter */
+       u64     txbca;  /* 0x4f (Offset 0x158) TX BC frame counter */
+       u64     txcf;   /* 0x54 (Offset 0x160) TX control frame counter */
+};
+
+struct brb_stats {
+       u64     brb_truncate[8];
+       u64     brb_discard[8];
+};
+
+struct port_stats {
+       struct brb_stats        brb;
+       struct pmm_stats        pmm;
+};
+
+#define CMT_TEAM0 0
+#define CMT_TEAM1 1
+#define CMT_TEAM_MAX 2
+
+struct couple_mode_teaming {
+       u8 port_cmt[MCP_GLOB_PORT_MAX];
+#define PORT_CMT_IN_TEAM               BIT(0)
+
+#define PORT_CMT_PORT_ROLE             BIT(1)
+#define PORT_CMT_PORT_INACTIVE      (0 << 1)
+#define PORT_CMT_PORT_ACTIVE           BIT(1)
+
+#define PORT_CMT_TEAM_MASK             BIT(2)
+#define PORT_CMT_TEAM0              (0 << 2)
+#define PORT_CMT_TEAM1                 BIT(2)
+};
+
+/**************************************
+*     LLDP and DCBX HSI structures
+**************************************/
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
+#define DCBX_MAX_APP_PROTOCOL           32
+#define MAX_SYSTEM_LLDP_TLV_DATA    32
+
+enum lldp_agent_e {
+       LLDP_NEAREST_BRIDGE = 0,
+       LLDP_NEAREST_NON_TPMR_BRIDGE,
+       LLDP_NEAREST_CUSTOMER_BRIDGE,
+       LLDP_MAX_LLDP_AGENTS
+};
+
+struct lldp_config_params_s {
+       u32 config;
+#define LLDP_CONFIG_TX_INTERVAL_MASK        0x000000ff
+#define LLDP_CONFIG_TX_INTERVAL_SHIFT       0
+#define LLDP_CONFIG_HOLD_MASK               0x00000f00
+#define LLDP_CONFIG_HOLD_SHIFT              8
+#define LLDP_CONFIG_MAX_CREDIT_MASK         0x0000f000
+#define LLDP_CONFIG_MAX_CREDIT_SHIFT        12
+#define LLDP_CONFIG_ENABLE_RX_MASK          0x40000000
+#define LLDP_CONFIG_ENABLE_RX_SHIFT         30
+#define LLDP_CONFIG_ENABLE_TX_MASK          0x80000000
+#define LLDP_CONFIG_ENABLE_TX_SHIFT         31
+       u32     local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+       u32     local_port_id[LLDP_PORT_ID_STAT_LEN];
+};
+
+struct lldp_status_params_s {
+       u32     prefix_seq_num;
+       u32     status; /* TBD */
+
+       /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
+       u32     peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+
+       /* Holds remote Port ID TLV header, subtype and 9B of payload. */
+       u32     peer_port_id[LLDP_PORT_ID_STAT_LEN];
+       u32     suffix_seq_num;
+};
+
+struct dcbx_ets_feature {
+       u32 flags;
+#define DCBX_ETS_ENABLED_MASK                   0x00000001
+#define DCBX_ETS_ENABLED_SHIFT                  0
+#define DCBX_ETS_WILLING_MASK                   0x00000002
+#define DCBX_ETS_WILLING_SHIFT                  1
+#define DCBX_ETS_ERROR_MASK                     0x00000004
+#define DCBX_ETS_ERROR_SHIFT                    2
+#define DCBX_ETS_CBS_MASK                       0x00000008
+#define DCBX_ETS_CBS_SHIFT                      3
+#define DCBX_ETS_MAX_TCS_MASK                   0x000000f0
+#define DCBX_ETS_MAX_TCS_SHIFT                  4
+       u32     pri_tc_tbl[1];
+#define DCBX_ISCSI_OOO_TC                       4
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET         (DCBX_ISCSI_OOO_TC + 1)
+       u32     tc_bw_tbl[2];
+       u32     tc_tsa_tbl[2];
+#define DCBX_ETS_TSA_STRICT                     0
+#define DCBX_ETS_TSA_CBS                        1
+#define DCBX_ETS_TSA_ETS                        2
+};
+
+struct dcbx_app_priority_entry {
+       u32 entry;
+#define DCBX_APP_PRI_MAP_MASK       0x000000ff
+#define DCBX_APP_PRI_MAP_SHIFT      0
+#define DCBX_APP_PRI_0              0x01
+#define DCBX_APP_PRI_1              0x02
+#define DCBX_APP_PRI_2              0x04
+#define DCBX_APP_PRI_3              0x08
+#define DCBX_APP_PRI_4              0x10
+#define DCBX_APP_PRI_5              0x20
+#define DCBX_APP_PRI_6              0x40
+#define DCBX_APP_PRI_7              0x80
+#define DCBX_APP_SF_MASK            0x00000300
+#define DCBX_APP_SF_SHIFT           8
+#define DCBX_APP_SF_ETHTYPE         0
+#define DCBX_APP_SF_PORT            1
+#define DCBX_APP_PROTOCOL_ID_MASK   0xffff0000
+#define DCBX_APP_PROTOCOL_ID_SHIFT  16
+};
+
+/* FW structure in BE */
+struct dcbx_app_priority_feature {
+       u32 flags;
+#define DCBX_APP_ENABLED_MASK           0x00000001
+#define DCBX_APP_ENABLED_SHIFT          0
+#define DCBX_APP_WILLING_MASK           0x00000002
+#define DCBX_APP_WILLING_SHIFT          1
+#define DCBX_APP_ERROR_MASK             0x00000004
+#define DCBX_APP_ERROR_SHIFT            2
+/* Not in use
+ * #define DCBX_APP_DEFAULT_PRI_MASK       0x00000f00
+ * #define DCBX_APP_DEFAULT_PRI_SHIFT      8
+ */
+#define DCBX_APP_MAX_TCS_MASK           0x0000f000
+#define DCBX_APP_MAX_TCS_SHIFT          12
+#define DCBX_APP_NUM_ENTRIES_MASK       0x00ff0000
+#define DCBX_APP_NUM_ENTRIES_SHIFT      16
+       struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+/* FW structure in BE */
+struct dcbx_features {
+       /* PG feature */
+       struct dcbx_ets_feature ets;
+
+       /* PFC feature */
+       u32                     pfc;
+#define DCBX_PFC_PRI_EN_BITMAP_MASK             0x000000ff
+#define DCBX_PFC_PRI_EN_BITMAP_SHIFT            0
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_0            0x01
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_1            0x02
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_2            0x04
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_3            0x08
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_4            0x10
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_5            0x20
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_6            0x40
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_7            0x80
+
+#define DCBX_PFC_FLAGS_MASK                     0x0000ff00
+#define DCBX_PFC_FLAGS_SHIFT                    8
+#define DCBX_PFC_CAPS_MASK                      0x00000f00
+#define DCBX_PFC_CAPS_SHIFT                     8
+#define DCBX_PFC_MBC_MASK                       0x00004000
+#define DCBX_PFC_MBC_SHIFT                      14
+#define DCBX_PFC_WILLING_MASK                   0x00008000
+#define DCBX_PFC_WILLING_SHIFT                  15
+#define DCBX_PFC_ENABLED_MASK                   0x00010000
+#define DCBX_PFC_ENABLED_SHIFT                  16
+#define DCBX_PFC_ERROR_MASK                     0x00020000
+#define DCBX_PFC_ERROR_SHIFT                    17
+
+       /* APP feature */
+       struct dcbx_app_priority_feature app;
+};
+
+struct dcbx_local_params {
+       u32 config;
+#define DCBX_CONFIG_VERSION_MASK            0x00000003
+#define DCBX_CONFIG_VERSION_SHIFT           0
+#define DCBX_CONFIG_VERSION_DISABLED        0
+#define DCBX_CONFIG_VERSION_IEEE            1
+#define DCBX_CONFIG_VERSION_CEE             2
+
+       u32                     flags;
+       struct dcbx_features    features;
+};
+
+struct dcbx_mib {
+       u32     prefix_seq_num;
+       u32     flags;
+       struct dcbx_features    features;
+       u32                     suffix_seq_num;
+};
+
+struct lldp_system_tlvs_buffer_s {
+       u16     valid;
+       u16     length;
+       u32     data[MAX_SYSTEM_LLDP_TLV_DATA];
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C      G L O B A L   */
+/*                                    */
+/**************************************/
+struct public_global {
+       u32                             max_path;
+#define MAX_PATH_BIG_BEAR       2
+#define MAX_PATH_K2             1
+       u32                             max_ports;
+#define MODE_1P 1
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
+       u32                             debug_mb_offset;
+       u32                             phymod_dbg_mb_offset;
+       struct couple_mode_teaming      cmt;
+       s32                             internal_temperature;
+       u32                             mfw_ver;
+       u32                             running_bundle_id;
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C      P A T H       */
+/*                                    */
+/**************************************/
+
+/****************************************************************************
+* Shared Memory 2 Region                                                   *
+****************************************************************************/
+/* The fw_flr_ack is actually built in the following way:                   */
+/* 8 bit:  PF ack                                                           */
+/* 128 bit: VF ack                                                           */
+/* 8 bit:  ios_dis_ack                                                      */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* u32. The fw must have the VF right after the PF since this is how it     */
+/* access arrays(it expects always the VF to reside after the PF, and that  */
+/* makes the calculation much easier for it. )                              */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition    */
+/* above                                                                    */
+/****************************************************************************/
+struct fw_flr_mb {
+       u32     aggint;
+       u32     opgen_addr;
+       u32     accum_ack;  /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */
+#define ACCUM_ACK_PF_BASE       0
+#define ACCUM_ACK_PF_SHIFT      0
+
+#define ACCUM_ACK_VF_BASE       8
+#define ACCUM_ACK_VF_SHIFT      3
+
+#define ACCUM_ACK_IOV_DIS_BASE  256
+#define ACCUM_ACK_IOV_DIS_SHIFT 8
+};
+
+struct public_path {
+       struct fw_flr_mb        flr_mb;
+       u32                     mcp_vf_disabled[VF_MAX_STATIC / 32];
+
+       u32                     process_kill;
+#define PROCESS_KILL_COUNTER_MASK               0x0000ffff
+#define PROCESS_KILL_COUNTER_SHIFT              0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK          0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT         16
+#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C      P O R T       */
+/*                                    */
+/**************************************/
+
+/****************************************************************************
+* Driver <-> FW Mailbox                                                    *
+****************************************************************************/
+
+struct public_port {
+       u32 validity_map;   /* 0x0 (4*2 = 0x8) */
+
+       /* validity bits */
+#define MCP_VALIDITY_PCI_CFG                    0x00100000
+#define MCP_VALIDITY_MB                         0x00200000
+#define MCP_VALIDITY_DEV_INFO                   0x00400000
+#define MCP_VALIDITY_RESERVED                   0x00000007
+
+       /* One licensing bit should be set */
+#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK     0x00000038
+#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT    0x00000008
+#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT  0x00000010
+#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT       0x00000020
+
+       /* Active MFW */
+#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN         0x00000000
+#define MCP_VALIDITY_ACTIVE_MFW_MASK            0x000001c0
+#define MCP_VALIDITY_ACTIVE_MFW_NCSI            0x00000040
+#define MCP_VALIDITY_ACTIVE_MFW_NONE            0x000001c0
+
+       u32 link_status;
+#define LINK_STATUS_LINK_UP \
+       0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK                       0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD           BIT(1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD            (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G                        (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G                        (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G                        (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G                        (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G                       (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G                        (8 << 1)
+
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED                      0x00000020
+
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE                     0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED                     0x00000080
+
+#define LINK_STATUS_PFC_ENABLED        \
+       0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE        0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE        0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE            0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE            0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE            0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE            0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE           0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE            0x00010000
+
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK      0x000C0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE      (0 << 18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE       BIT(18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE       (2 << 18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE                     (3 << 18)
+
+#define LINK_STATUS_SFP_TX_FAULT \
+       0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED                     0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED                     0x00400000
+
+       u32                     link_status1;
+       u32                     ext_phy_fw_version;
+       u32                     drv_phy_cfg_addr;
+
+       u32                     port_stx;
+
+       u32                     stat_nig_timer;
+
+       struct port_mf_cfg      port_mf_config;
+       struct port_stats       stats;
+
+       u32                     media_type;
+#define MEDIA_UNSPECIFIED       0x0
+#define MEDIA_SFPP_10G_FIBER    0x1
+#define MEDIA_XFP_FIBER         0x2
+#define MEDIA_DA_TWINAX         0x3
+#define MEDIA_BASE_T            0x4
+#define MEDIA_SFP_1G_FIBER      0x5
+#define MEDIA_KR                0xf0
+#define MEDIA_NOT_PRESENT       0xff
+
+       u32 lfa_status;
+#define LFA_LINK_FLAP_REASON_OFFSET             0
+#define LFA_LINK_FLAP_REASON_MASK               0x000000ff
+#define LFA_NO_REASON                                   (0 << 0)
+#define LFA_LINK_DOWN                                  BIT(0)
+#define LFA_FORCE_INIT                                  BIT(1)
+#define LFA_LOOPBACK_MISMATCH                           BIT(2)
+#define LFA_SPEED_MISMATCH                              BIT(3)
+#define LFA_FLOW_CTRL_MISMATCH                          BIT(4)
+#define LFA_ADV_SPEED_MISMATCH                          BIT(5)
+#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET        8
+#define LINK_FLAP_AVOIDANCE_COUNT_MASK          0x0000ff00
+#define LINK_FLAP_COUNT_OFFSET                  16
+#define LINK_FLAP_COUNT_MASK                    0x00ff0000
+
+       u32                                     link_change_count;
+
+       /* LLDP params */
+       struct lldp_config_params_s             lldp_config_params[
+               LLDP_MAX_LLDP_AGENTS];
+       struct lldp_status_params_s             lldp_status_params[
+               LLDP_MAX_LLDP_AGENTS];
+       struct lldp_system_tlvs_buffer_s        system_lldp_tlvs_buf;
+
+       /* DCBX related MIB */
+       struct dcbx_local_params                local_admin_dcbx_mib;
+       struct dcbx_mib                         remote_dcbx_mib;
+       struct dcbx_mib                         operational_dcbx_mib;
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C      F U N C       */
+/*                                    */
+/**************************************/
+
+struct public_func {
+       u32     iscsi_boot_signature;
+       u32     iscsi_boot_block_offset;
+
+       u32     reserved[8];
+
+       u32     config;
+
+       /* E/R/I/D */
+       /* function 0 of each port cannot be hidden */
+#define FUNC_MF_CFG_FUNC_HIDE                   0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING          0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT    0x00000001
+
+#define FUNC_MF_CFG_PROTOCOL_MASK               0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_SHIFT              4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET           0x00000000
+#define FUNC_MF_CFG_PROTOCOL_ISCSI              0x00000010
+#define FUNC_MF_CFG_PROTOCOL_FCOE               0x00000020
+#define FUNC_MF_CFG_PROTOCOL_ROCE               0x00000030
+#define FUNC_MF_CFG_PROTOCOL_MAX                0x00000030
+
+       /* MINBW, MAXBW */
+       /* value range - 0..100, increments in 1 %  */
+#define FUNC_MF_CFG_MIN_BW_MASK                 0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_SHIFT                8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT              0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK                 0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_SHIFT                16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT              0x00640000
+
+       u32     status;
+#define FUNC_STATUS_VLINK_DOWN                  0x00000001
+
+       u32     mac_upper;  /* MAC */
+#define FUNC_MF_CFG_UPPERMAC_MASK               0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_SHIFT              0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT            FUNC_MF_CFG_UPPERMAC_MASK
+       u32     mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT            0xffffffff
+
+       u32     fcoe_wwn_port_name_upper;
+       u32     fcoe_wwn_port_name_lower;
+
+       u32     fcoe_wwn_node_name_upper;
+       u32     fcoe_wwn_node_name_lower;
+
+       u32     ovlan_stag; /* tags */
+#define FUNC_MF_CFG_OV_STAG_MASK              0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_SHIFT             0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT           FUNC_MF_CFG_OV_STAG_MASK
+
+       u32     pf_allocation;  /* vf per pf */
+
+       u32     preserve_data;  /* Will be used bt CCM */
+
+       u32     driver_last_activity_ts;
+
+       u32     drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */
+
+       u32     drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK        0x0000ffff
+#define DRV_ID_PDA_COMP_VER_SHIFT       0
+
+#define DRV_ID_MCP_HSI_VER_MASK         0x00ff0000
+#define DRV_ID_MCP_HSI_VER_SHIFT        16
+#define DRV_ID_MCP_HSI_VER_CURRENT     BIT(DRV_ID_MCP_HSI_VER_SHIFT)
+
+#define DRV_ID_DRV_TYPE_MASK            0xff000000
+#define DRV_ID_DRV_TYPE_SHIFT           24
+#define DRV_ID_DRV_TYPE_UNKNOWN         (0 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX          BIT(DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_WINDOWS         (2 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_DIAG            (3 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_PREBOOT         (4 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_SOLARIS         (5 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_VMWARE          (6 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_FREEBSD         (7 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_AIX             (8 << DRV_ID_DRV_TYPE_SHIFT)
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C       M B          */
+/*                                    */
+/**************************************/
+/* This is the only section that the driver can write to, and each */
+/* Basically each driver request to set feature parameters,
+ * will be done using a different command, which will be linked
+ * to a specific data structure from the union below.
+ * For huge strucuture, the common blank structure should be used.
+ */
+
+struct mcp_mac {
+       u32     mac_upper;  /* Upper 16 bits are always zeroes */
+       u32     mac_lower;
+};
+
+struct mcp_val64 {
+       u32     lo;
+       u32     hi;
+};
+
+struct mcp_file_att {
+       u32     nvm_start_addr;
+       u32     len;
+};
+
+#define MCP_DRV_VER_STR_SIZE 16
+#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
+#define MCP_DRV_NVM_BUF_LEN 32
+struct drv_version_stc {
+       u32     version;
+       u8      name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+union drv_union_data {
+       u32                     ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
+       struct mcp_mac          wol_mac;
+
+       struct pmm_phy_cfg      drv_phy_cfg;
+
+       struct mcp_val64        val64; /* For PHY / AVS commands */
+
+       u8                      raw_data[MCP_DRV_NVM_BUF_LEN];
+
+       struct mcp_file_att     file_att;
+
+       u32                     ack_vf_disabled[VF_MAX_STATIC / 32];
+
+       struct drv_version_stc  drv_version;
+};
+
+struct public_drv_mb {
+       u32 drv_mb_header;
+#define DRV_MSG_CODE_MASK                       0xffff0000
+#define DRV_MSG_CODE_LOAD_REQ                   0x10000000
+#define DRV_MSG_CODE_LOAD_DONE                  0x11000000
+#define DRV_MSG_CODE_UNLOAD_REQ                 0x20000000
+#define DRV_MSG_CODE_UNLOAD_DONE                0x21000000
+#define DRV_MSG_CODE_INIT_PHY                   0x22000000
+       /* Params - FORCE - Reinitialize the link regardless of LFA */
+       /*        - DONT_CARE - Don't flap the link if up */
+#define DRV_MSG_CODE_LINK_RESET                 0x23000000
+
+#define DRV_MSG_CODE_SET_LLDP                   0x24000000
+#define DRV_MSG_CODE_SET_DCBX                   0x25000000
+
+#define DRV_MSG_CODE_NIG_DRAIN                  0x30000000
+
+#define DRV_MSG_CODE_INITIATE_FLR               0x02000000
+#define DRV_MSG_CODE_VF_DISABLED_DONE           0xc0000000
+#define DRV_MSG_CODE_CFG_VF_MSIX                0xc0010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN         0x00010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_DATA          0x00020000
+#define DRV_MSG_CODE_NVM_GET_FILE_ATT           0x00030000
+#define DRV_MSG_CODE_NVM_READ_NVRAM             0x00050000
+#define DRV_MSG_CODE_NVM_WRITE_NVRAM            0x00060000
+#define DRV_MSG_CODE_NVM_DEL_FILE               0x00080000
+#define DRV_MSG_CODE_MCP_RESET                  0x00090000
+#define DRV_MSG_CODE_SET_SECURE_MODE            0x000a0000
+#define DRV_MSG_CODE_PHY_RAW_READ               0x000b0000
+#define DRV_MSG_CODE_PHY_RAW_WRITE              0x000c0000
+#define DRV_MSG_CODE_PHY_CORE_READ              0x000d0000
+#define DRV_MSG_CODE_PHY_CORE_WRITE             0x000e0000
+#define DRV_MSG_CODE_SET_VERSION                0x000f0000
+
+#define DRV_MSG_SEQ_NUMBER_MASK                 0x0000ffff
+
+       u32 drv_mb_param;
+
+       /* UNLOAD_REQ params */
+#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN         0x00000000
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP             0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED        0x00000002
+#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED         0x00000003
+
+       /* UNLOAD_DONE_params */
+#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER        0x00000001
+
+       /* INIT_PHY params */
+#define DRV_MB_PARAM_INIT_PHY_FORCE             0x00000001
+#define DRV_MB_PARAM_INIT_PHY_DONT_CARE         0x00000002
+
+       /* LLDP / DCBX params*/
+#define DRV_MB_PARAM_LLDP_SEND_MASK             0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_SHIFT            0
+#define DRV_MB_PARAM_LLDP_AGENT_MASK            0x00000006
+#define DRV_MB_PARAM_LLDP_AGENT_SHIFT           1
+#define DRV_MB_PARAM_DCBX_NOTIFY_MASK           0x00000008
+#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT          3
+
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK   0x000000FF
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT  0
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW     0x1
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE   0x2
+
+#define DRV_MB_PARAM_NVM_OFFSET_SHIFT           0
+#define DRV_MB_PARAM_NVM_OFFSET_MASK            0x00FFFFFF
+#define DRV_MB_PARAM_NVM_LEN_SHIFT              24
+#define DRV_MB_PARAM_NVM_LEN_MASK               0xFF000000
+
+#define DRV_MB_PARAM_PHY_ADDR_SHIFT             0
+#define DRV_MB_PARAM_PHY_ADDR_MASK              0x1FF0FFFF
+#define DRV_MB_PARAM_PHY_LANE_SHIFT             16
+#define DRV_MB_PARAM_PHY_LANE_MASK              0x000F0000
+#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT      29
+#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK       0x20000000
+#define DRV_MB_PARAM_PHY_PORT_SHIFT             30
+#define DRV_MB_PARAM_PHY_PORT_MASK              0xc0000000
+
+/* configure vf MSIX params*/
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT    0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK     0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT   8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK    0x0000FF00
+
+       u32 fw_mb_header;
+#define FW_MSG_CODE_MASK                        0xffff0000
+#define FW_MSG_CODE_DRV_LOAD_ENGINE             0x10100000
+#define FW_MSG_CODE_DRV_LOAD_PORT               0x10110000
+#define FW_MSG_CODE_DRV_LOAD_FUNCTION           0x10120000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA        0x10200000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI        0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG       0x10220000
+#define FW_MSG_CODE_DRV_LOAD_DONE               0x11100000
+#define FW_MSG_CODE_DRV_UNLOAD_ENGINE           0x20110000
+#define FW_MSG_CODE_DRV_UNLOAD_PORT             0x20120000
+#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION         0x20130000
+#define FW_MSG_CODE_DRV_UNLOAD_DONE             0x21100000
+#define FW_MSG_CODE_INIT_PHY_DONE               0x21200000
+#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS   0x21300000
+#define FW_MSG_CODE_LINK_RESET_DONE             0x23000000
+#define FW_MSG_CODE_SET_LLDP_DONE               0x24000000
+#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT  0x24010000
+#define FW_MSG_CODE_SET_DCBX_DONE               0x25000000
+#define FW_MSG_CODE_NIG_DRAIN_DONE              0x30000000
+#define FW_MSG_CODE_VF_DISABLED_DONE            0xb0000000
+#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE        0xb0010000
+#define FW_MSG_CODE_FLR_ACK                     0x02000000
+#define FW_MSG_CODE_FLR_NACK                    0x02100000
+
+#define FW_MSG_CODE_NVM_OK                      0x00010000
+#define FW_MSG_CODE_NVM_INVALID_MODE            0x00020000
+#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED       0x00030000
+#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE 0x00040000
+#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND       0x00050000
+#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND          0x00060000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000
+#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC     0x00090000
+#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR     0x000a0000
+#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE     0x000b0000
+#define FW_MSG_CODE_NVM_FILE_NOT_FOUND          0x000c0000
+#define FW_MSG_CODE_NVM_OPERATION_FAILED        0x000d0000
+#define FW_MSG_CODE_NVM_FAILED_UNALIGNED        0x000e0000
+#define FW_MSG_CODE_NVM_BAD_OFFSET              0x000f0000
+#define FW_MSG_CODE_NVM_BAD_SIGNATURE           0x00100000
+#define FW_MSG_CODE_NVM_FILE_READ_ONLY          0x00200000
+#define FW_MSG_CODE_NVM_UNKNOWN_FILE            0x00300000
+#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK      0x00400000
+#define FW_MSG_CODE_MCP_RESET_REJECT            0x00600000
+#define FW_MSG_CODE_PHY_OK                      0x00110000
+#define FW_MSG_CODE_PHY_ERROR                   0x00120000
+#define FW_MSG_CODE_SET_SECURE_MODE_ERROR       0x00130000
+#define FW_MSG_CODE_SET_SECURE_MODE_OK          0x00140000
+#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR         0x00150000
+
+#define FW_MSG_SEQ_NUMBER_MASK                  0x0000ffff
+
+       u32     fw_mb_param;
+
+       u32     drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK                      0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK              0xffff0000
+#define DRV_PULSE_ALWAYS_ALIVE                  0x00008000
+       u32 mcp_pulse_mb;
+#define MCP_PULSE_SEQ_MASK                      0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE                  0x00008000
+#define MCP_EVENT_MASK                          0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ        0x00010000
+
+       union drv_union_data union_data;
+};
+
+/* MFW - DRV MB */
+/**********************************************************************
+* Description
+*   Incremental Aggregative
+*   8-bit MFW counter per message
+*   8-bit ack-counter per message
+* Capabilities
+*   Provides up to 256 aggregative message per type
+*   Provides 4 message types in dword
+*   Message type pointers to byte offset
+*   Backward Compatibility by using sizeof for the counters.
+*   No lock requires for 32bit messages
+* Limitations:
+* In case of messages greater than 32bit, a dedicated mechanism(e.g lock)
+* is required to prevent data corruption.
+**********************************************************************/
+enum MFW_DRV_MSG_TYPE {
+       MFW_DRV_MSG_LINK_CHANGE,
+       MFW_DRV_MSG_FLR_FW_ACK_FAILED,
+       MFW_DRV_MSG_VF_DISABLED,
+       MFW_DRV_MSG_LLDP_DATA_UPDATED,
+       MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
+       MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
+       MFW_DRV_MSG_ERROR_RECOVERY,
+       MFW_DRV_MSG_MAX
+};
+
+#define MFW_DRV_MSG_MAX_DWORDS(msgs)    (((msgs - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id)       (msg_id >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id)      ((msg_id & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id)        (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+
+struct public_mfw_mb {
+       u32     sup_msgs;
+       u32     msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+       u32     ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C       D A T A      */
+/*                                    */
+/**************************************/
+enum public_sections {
+       PUBLIC_DRV_MB,          /* Points to the first drv_mb of path0 */
+       PUBLIC_MFW_MB,          /* Points to the first mfw_mb of path0 */
+       PUBLIC_GLOBAL,
+       PUBLIC_PATH,
+       PUBLIC_PORT,
+       PUBLIC_FUNC,
+       PUBLIC_MAX_SECTIONS
+};
+
+struct drv_ver_info_stc {
+       u32     ver;
+       u8      name[32];
+};
+
+struct mcp_public_data {
+       /* The sections fields is an array */
+       u32                     num_sections;
+       offsize_t               sections[PUBLIC_MAX_SECTIONS];
+       struct public_drv_mb    drv_mb[MCP_GLOB_FUNC_MAX];
+       struct public_mfw_mb    mfw_mb[MCP_GLOB_FUNC_MAX];
+       struct public_global    global;
+       struct public_path      path[MCP_GLOB_PATH_MAX];
+       struct public_port      port[MCP_GLOB_PORT_MAX];
+       struct public_func      func[MCP_GLOB_FUNC_MAX];
+       struct drv_ver_info_stc drv_info;
+};
+
+struct nvm_cfg_mac_address {
+       u32     mac_addr_hi;
+#define NVM_CFG_MAC_ADDRESS_HI_MASK                             0x0000FFFF
+#define NVM_CFG_MAC_ADDRESS_HI_OFFSET                           0
+
+       u32     mac_addr_lo;
+};
+
+/******************************************
+* nvm_cfg1 structs
+******************************************/
+
+struct nvm_cfg1_glob {
+       u32 generic_cont0;                                      /* 0x0 */
+#define NVM_CFG1_GLOB_BOARD_SWAP_MASK                           0x0000000F
+#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET                         0
+#define NVM_CFG1_GLOB_BOARD_SWAP_NONE                           0x0
+#define NVM_CFG1_GLOB_BOARD_SWAP_PATH                           0x1
+#define NVM_CFG1_GLOB_BOARD_SWAP_PORT                           0x2
+#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH                           0x3
+#define NVM_CFG1_GLOB_MF_MODE_MASK                              0x00000FF0
+#define NVM_CFG1_GLOB_MF_MODE_OFFSET                            4
+#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED                        0x0
+#define NVM_CFG1_GLOB_MF_MODE_FORCED_SF                         0x1
+#define NVM_CFG1_GLOB_MF_MODE_SPIO4                             0x2
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0                           0x3
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5                           0x4
+#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0                           0x5
+#define NVM_CFG1_GLOB_MF_MODE_BD                                0x6
+#define NVM_CFG1_GLOB_MF_MODE_UFP                               0x7
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK              0x00001000
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET            12
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED          0x0
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED           0x1
+#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK                       0x001FE000
+#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET                     13
+#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK                      0x1FE00000
+#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET                    21
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK                         0x20000000
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET                       29
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED                     0x0
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED                      0x1
+#define NVM_CFG1_GLOB_ENABLE_ATC_MASK                           0x40000000
+#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET                         30
+#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED                       0x0
+#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED                        0x1
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK                       0x80000000
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET                     31
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED                   0x0
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED                    0x1
+
+       u32     engineering_change[3];                          /* 0x4 */
+
+       u32     manufacturing_id;                               /* 0x10 */
+
+       u32     serial_number[4];                               /* 0x14 */
+
+       u32     pcie_cfg;                                       /* 0x24 */
+#define NVM_CFG1_GLOB_PCI_GEN_MASK                              0x00000003
+#define NVM_CFG1_GLOB_PCI_GEN_OFFSET                            0
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1                          0x0
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2                          0x1
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3                          0x2
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK                   0x00000004
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET                 2
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED               0x0
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED                0x1
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK                         0x00000018
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET                       3
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED               0x0
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_DISABLED                 0x1
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED                  0x2
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_DISABLED              0x3
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_MASK               0x00000020
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_OFFSET             5
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_DISABLED           0x0
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_ENABLED            0x1
+#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK                 0x000003C0
+#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET               6
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK                     0x00001C00
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET                   10
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW                       0x0
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB                      0x1
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB                    0x2
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB                    0x3
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK                     0x001FE000
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET                   13
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK                     0x1FE00000
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET                   21
+#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK                      0x60000000
+#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET                    29
+
+       u32 mgmt_traffic;                                       /* 0x28 */
+#define NVM_CFG1_GLOB_RESERVED60_MASK                           0x00000001
+#define NVM_CFG1_GLOB_RESERVED60_OFFSET                         0
+#define NVM_CFG1_GLOB_RESERVED60_100KHZ                         0x0
+#define NVM_CFG1_GLOB_RESERVED60_400KHZ                         0x1
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK                     0x000001FE
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET                   1
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK                     0x0001FE00
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET                   9
+#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK                        0x01FE0000
+#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET                      17
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK                        0x06000000
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET                      25
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED                    0x0
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII                        0x1
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII                       0x2
+
+       u32 core_cfg;                                           /* 0x2C */
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK                    0x000000FF
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET                  0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G                0x0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G                0x1
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G               0x2
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F              0x3
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E              0x4
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G                0x5
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G                0xB
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G                0xC
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G                0xD
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK             0x00000100
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET           8
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED         0x0
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED          0x1
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK            0x00000200
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET          9
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED        0x0
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED         0x1
+#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK                      0x0003FC00
+#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET                    10
+#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK                     0x03FC0000
+#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET                   18
+#define NVM_CFG1_GLOB_AVS_MODE_MASK                             0x1C000000
+#define NVM_CFG1_GLOB_AVS_MODE_OFFSET                           26
+#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP                       0x0
+#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP                        0x1
+#define NVM_CFG1_GLOB_AVS_MODE_DISABLED                         0x3
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK                 0x60000000
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET               29
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED             0x0
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED              0x1
+
+       u32 e_lane_cfg1;                                        /* 0x30 */
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK                        0x0000000F
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET                      0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK                        0x000000F0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET                      4
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK                        0x00000F00
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET                      8
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK                        0x0000F000
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET                      12
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK                        0x000F0000
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET                      16
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK                        0x00F00000
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET                      20
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK                        0x0F000000
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET                      24
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK                        0xF0000000
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET                      28
+
+       u32 e_lane_cfg2;                                        /* 0x34 */
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK                    0x00000001
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET                  0
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK                    0x00000002
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET                  1
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK                    0x00000004
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET                  2
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK                    0x00000008
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET                  3
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK                    0x00000010
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET                  4
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK                    0x00000020
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET                  5
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK                    0x00000040
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET                  6
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK                    0x00000080
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET                  7
+#define NVM_CFG1_GLOB_SMBUS_MODE_MASK                           0x00000F00
+#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET                         8
+#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED                       0x0
+#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ                         0x1
+#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ                         0x2
+#define NVM_CFG1_GLOB_NCSI_MASK                                 0x0000F000
+#define NVM_CFG1_GLOB_NCSI_OFFSET                               12
+#define NVM_CFG1_GLOB_NCSI_DISABLED                             0x0
+#define NVM_CFG1_GLOB_NCSI_ENABLED                              0x1
+
+       u32 f_lane_cfg1;                                        /* 0x38 */
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK                        0x0000000F
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET                      0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK                        0x000000F0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET                      4
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK                        0x00000F00
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET                      8
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK                        0x0000F000
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET                      12
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK                        0x000F0000
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET                      16
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK                        0x00F00000
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET                      20
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK                        0x0F000000
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET                      24
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK                        0xF0000000
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET                      28
+
+       u32 f_lane_cfg2;                                        /* 0x3C */
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK                    0x00000001
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET                  0
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK                    0x00000002
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET                  1
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK                    0x00000004
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET                  2
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK                    0x00000008
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET                  3
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK                    0x00000010
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET                  4
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK                    0x00000020
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET                  5
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK                    0x00000040
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET                  6
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK                    0x00000080
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET                  7
+
+       u32 eagle_preemphasis;                                  /* 0x40 */
+#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK                         0x000000FF
+#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET                       0
+#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK                         0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET                       8
+#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK                         0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET                       16
+#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK                         0xFF000000
+#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET                       24
+
+       u32 eagle_driver_current;                               /* 0x44 */
+#define NVM_CFG1_GLOB_LANE0_AMP_MASK                            0x000000FF
+#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET                          0
+#define NVM_CFG1_GLOB_LANE1_AMP_MASK                            0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET                          8
+#define NVM_CFG1_GLOB_LANE2_AMP_MASK                            0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET                          16
+#define NVM_CFG1_GLOB_LANE3_AMP_MASK                            0xFF000000
+#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET                          24
+
+       u32 falcon_preemphasis;                                 /* 0x48 */
+#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK                         0x000000FF
+#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET                       0
+#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK                         0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET                       8
+#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK                         0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET                       16
+#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK                         0xFF000000
+#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET                       24
+
+       u32 falcon_driver_current;                              /* 0x4C */
+#define NVM_CFG1_GLOB_LANE0_AMP_MASK                            0x000000FF
+#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET                          0
+#define NVM_CFG1_GLOB_LANE1_AMP_MASK                            0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET                          8
+#define NVM_CFG1_GLOB_LANE2_AMP_MASK                            0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET                          16
+#define NVM_CFG1_GLOB_LANE3_AMP_MASK                            0xFF000000
+#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET                          24
+
+       u32     pci_id;                                         /* 0x50 */
+#define NVM_CFG1_GLOB_VENDOR_ID_MASK                            0x0000FFFF
+#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET                          0
+
+       u32     pci_subsys_id;                                  /* 0x54 */
+#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK                  0x0000FFFF
+#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET                0
+#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK                  0xFFFF0000
+#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET                16
+
+       u32     bar;                                            /* 0x58 */
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK                   0x0000000F
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET                 0
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED               0x0
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K                     0x1
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K                     0x2
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K                     0x3
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K                    0x4
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K                    0x5
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K                    0x6
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K                   0x7
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K                   0x8
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K                   0x9
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M                     0xA
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M                     0xB
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M                     0xC
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M                     0xD
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M                    0xE
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M                    0xF
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK                     0x000000F0
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET                   4
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED                 0x0
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K                       0x1
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K                       0x2
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K                      0x3
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K                      0x4
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K                      0x5
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K                     0x6
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K                     0x7
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K                     0x8
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M                       0x9
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M                       0xA
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M                       0xB
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M                       0xC
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M                      0xD
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M                      0xE
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M                      0xF
+#define NVM_CFG1_GLOB_BAR2_SIZE_MASK                            0x00000F00
+#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET                          8
+#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED                        0x0
+#define NVM_CFG1_GLOB_BAR2_SIZE_64K                             0x1
+#define NVM_CFG1_GLOB_BAR2_SIZE_128K                            0x2
+#define NVM_CFG1_GLOB_BAR2_SIZE_256K                            0x3
+#define NVM_CFG1_GLOB_BAR2_SIZE_512K                            0x4
+#define NVM_CFG1_GLOB_BAR2_SIZE_1M                              0x5
+#define NVM_CFG1_GLOB_BAR2_SIZE_2M                              0x6
+#define NVM_CFG1_GLOB_BAR2_SIZE_4M                              0x7
+#define NVM_CFG1_GLOB_BAR2_SIZE_8M                              0x8
+#define NVM_CFG1_GLOB_BAR2_SIZE_16M                             0x9
+#define NVM_CFG1_GLOB_BAR2_SIZE_32M                             0xA
+#define NVM_CFG1_GLOB_BAR2_SIZE_64M                             0xB
+#define NVM_CFG1_GLOB_BAR2_SIZE_128M                            0xC
+#define NVM_CFG1_GLOB_BAR2_SIZE_256M                            0xD
+#define NVM_CFG1_GLOB_BAR2_SIZE_512M                            0xE
+#define NVM_CFG1_GLOB_BAR2_SIZE_1G                              0xF
+
+       u32 eagle_txfir_main;                                   /* 0x5C */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK                     0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET                   0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK                     0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET                   8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK                     0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET                   16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK                     0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET                   24
+
+       u32 eagle_txfir_post;                                   /* 0x60 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK                     0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET                   0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK                     0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET                   8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK                     0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET                   16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK                     0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET                   24
+
+       u32 falcon_txfir_main;                                  /* 0x64 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK                     0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET                   0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK                     0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET                   8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK                     0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET                   16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK                     0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET                   24
+
+       u32 falcon_txfir_post;                                  /* 0x68 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK                     0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET                   0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK                     0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET                   8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK                     0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET                   16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK                     0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET                   24
+
+       u32 manufacture_ver;                                    /* 0x6C */
+#define NVM_CFG1_GLOB_MANUF0_VER_MASK                           0x0000003F
+#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET                         0
+#define NVM_CFG1_GLOB_MANUF1_VER_MASK                           0x00000FC0
+#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET                         6
+#define NVM_CFG1_GLOB_MANUF2_VER_MASK                           0x0003F000
+#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET                         12
+#define NVM_CFG1_GLOB_MANUF3_VER_MASK                           0x00FC0000
+#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET                         18
+#define NVM_CFG1_GLOB_MANUF4_VER_MASK                           0x3F000000
+#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET                         24
+
+       u32 manufacture_time;                                   /* 0x70 */
+#define NVM_CFG1_GLOB_MANUF0_TIME_MASK                          0x0000003F
+#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET                        0
+#define NVM_CFG1_GLOB_MANUF1_TIME_MASK                          0x00000FC0
+#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET                        6
+#define NVM_CFG1_GLOB_MANUF2_TIME_MASK                          0x0003F000
+#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET                        12
+
+       u32 led_global_settings;                                /* 0x74 */
+#define NVM_CFG1_GLOB_LED_SWAP_0_MASK                           0x0000000F
+#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET                         0
+#define NVM_CFG1_GLOB_LED_SWAP_1_MASK                           0x000000F0
+#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET                         4
+#define NVM_CFG1_GLOB_LED_SWAP_2_MASK                           0x00000F00
+#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET                         8
+#define NVM_CFG1_GLOB_LED_SWAP_3_MASK                           0x0000F000
+#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET                         12
+
+       u32     generic_cont1;                                  /* 0x78 */
+#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK                         0x000003FF
+#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET                       0
+
+       u32     mbi_version;                                    /* 0x7C */
+#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK                        0x000000FF
+#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET                      0
+#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK                        0x0000FF00
+#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET                      8
+#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK                        0x00FF0000
+#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET                      16
+
+       u32     mbi_date;                                       /* 0x80 */
+
+       u32     misc_sig;                                       /* 0x84 */
+
+       /*  Define the GPIO mapping to switch i2c mux */
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK                   0x000000FF
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET                 0
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK                   0x0000FF00
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET                 8
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA                      0x0
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0                   0x1
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1                   0x2
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2                   0x3
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3                   0x4
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4                   0x5
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5                   0x6
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6                   0x7
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7                   0x8
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8                   0x9
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9                   0xA
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10                  0xB
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11                  0xC
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12                  0xD
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13                  0xE
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14                  0xF
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15                  0x10
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16                  0x11
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17                  0x12
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18                  0x13
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19                  0x14
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20                  0x15
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21                  0x16
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22                  0x17
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23                  0x18
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24                  0x19
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25                  0x1A
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26                  0x1B
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27                  0x1C
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28                  0x1D
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29                  0x1E
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30                  0x1F
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31                  0x20
+
+       u32 reserved[46];                                       /* 0x88 */
+};
+
+struct nvm_cfg1_path {
+       u32 reserved[30];                                       /* 0x0 */
+};
+
+struct nvm_cfg1_port {
+       u32 power_dissipated;                                   /* 0x0 */
+#define NVM_CFG1_PORT_POWER_DIS_D0_MASK                         0x000000FF
+#define NVM_CFG1_PORT_POWER_DIS_D0_OFFSET                       0
+#define NVM_CFG1_PORT_POWER_DIS_D1_MASK                         0x0000FF00
+#define NVM_CFG1_PORT_POWER_DIS_D1_OFFSET                       8
+#define NVM_CFG1_PORT_POWER_DIS_D2_MASK                         0x00FF0000
+#define NVM_CFG1_PORT_POWER_DIS_D2_OFFSET                       16
+#define NVM_CFG1_PORT_POWER_DIS_D3_MASK                         0xFF000000
+#define NVM_CFG1_PORT_POWER_DIS_D3_OFFSET                       24
+
+       u32 power_consumed;                                     /* 0x4 */
+#define NVM_CFG1_PORT_POWER_CONS_D0_MASK                        0x000000FF
+#define NVM_CFG1_PORT_POWER_CONS_D0_OFFSET                      0
+#define NVM_CFG1_PORT_POWER_CONS_D1_MASK                        0x0000FF00
+#define NVM_CFG1_PORT_POWER_CONS_D1_OFFSET                      8
+#define NVM_CFG1_PORT_POWER_CONS_D2_MASK                        0x00FF0000
+#define NVM_CFG1_PORT_POWER_CONS_D2_OFFSET                      16
+#define NVM_CFG1_PORT_POWER_CONS_D3_MASK                        0xFF000000
+#define NVM_CFG1_PORT_POWER_CONS_D3_OFFSET                      24
+
+       u32 generic_cont0;                                      /* 0x8 */
+#define NVM_CFG1_PORT_LED_MODE_MASK                             0x000000FF
+#define NVM_CFG1_PORT_LED_MODE_OFFSET                           0
+#define NVM_CFG1_PORT_LED_MODE_MAC1                             0x0
+#define NVM_CFG1_PORT_LED_MODE_PHY1                             0x1
+#define NVM_CFG1_PORT_LED_MODE_PHY2                             0x2
+#define NVM_CFG1_PORT_LED_MODE_PHY3                             0x3
+#define NVM_CFG1_PORT_LED_MODE_MAC2                             0x4
+#define NVM_CFG1_PORT_LED_MODE_PHY4                             0x5
+#define NVM_CFG1_PORT_LED_MODE_PHY5                             0x6
+#define NVM_CFG1_PORT_LED_MODE_PHY6                             0x7
+#define NVM_CFG1_PORT_LED_MODE_MAC3                             0x8
+#define NVM_CFG1_PORT_LED_MODE_PHY7                             0x9
+#define NVM_CFG1_PORT_LED_MODE_PHY8                             0xA
+#define NVM_CFG1_PORT_LED_MODE_PHY9                             0xB
+#define NVM_CFG1_PORT_LED_MODE_MAC4                             0xC
+#define NVM_CFG1_PORT_LED_MODE_PHY10                            0xD
+#define NVM_CFG1_PORT_LED_MODE_PHY11                            0xE
+#define NVM_CFG1_PORT_LED_MODE_PHY12                            0xF
+#define NVM_CFG1_PORT_ROCE_PRIORITY_MASK                        0x0000FF00
+#define NVM_CFG1_PORT_ROCE_PRIORITY_OFFSET                      8
+#define NVM_CFG1_PORT_DCBX_MODE_MASK                            0x000F0000
+#define NVM_CFG1_PORT_DCBX_MODE_OFFSET                          16
+#define NVM_CFG1_PORT_DCBX_MODE_DISABLED                        0x0
+#define NVM_CFG1_PORT_DCBX_MODE_IEEE                            0x1
+#define NVM_CFG1_PORT_DCBX_MODE_CEE                             0x2
+#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC                         0x3
+
+       u32     pcie_cfg;                                       /* 0xC */
+#define NVM_CFG1_PORT_RESERVED15_MASK                           0x00000007
+#define NVM_CFG1_PORT_RESERVED15_OFFSET                         0
+
+       u32     features;                                       /* 0x10 */
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK           0x00000001
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET         0
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED       0x0
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED        0x1
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK                     0x00000002
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET                   1
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED                 0x0
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED                  0x1
+
+       u32 speed_cap_mask;                                     /* 0x14 */
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK            0x0000FFFF
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET          0
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G              0x1
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G             0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G             0x8
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G             0x10
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G             0x20
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G            0x40
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK            0xFFFF0000
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET          16
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G              0x1
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G             0x2
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G             0x8
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G             0x10
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G             0x20
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G            0x40
+
+       u32 link_settings;                                      /* 0x18 */
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK                       0x0000000F
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET                     0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG                    0x0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G                         0x1
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G                        0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G                        0x4
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G                        0x5
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G                        0x6
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G                       0x7
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK                     0x00000070
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET                   4
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG                  0x1
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX                       0x2
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX                       0x4
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK                       0x00000780
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET                     7
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG                    0x0
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G                         0x1
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G                        0x2
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G                        0x4
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G                        0x5
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G                        0x6
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G                       0x7
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK                     0x00003800
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET                   11
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG                  0x1
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX                       0x2
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX                       0x4
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK      0x00004000
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET    14
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED  0x0
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED   0x1
+
+       u32 phy_cfg;                                            /* 0x1C */
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK                  0x0000FFFF
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET                0
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG                 0x1
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER             0x2
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER                 0x4
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN       0x8
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN        0x10
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK                 0x00FF0000
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET               16
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS               0x0
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR                   0x2
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2                  0x3
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4                  0x4
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI                  0x8
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI                  0x9
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X                0xB
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII                0xC
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI                0xD
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI                 0xE
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI                0xF
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI                 0x10
+#define NVM_CFG1_PORT_AN_MODE_MASK                              0xFF000000
+#define NVM_CFG1_PORT_AN_MODE_OFFSET                            24
+#define NVM_CFG1_PORT_AN_MODE_NONE                              0x0
+#define NVM_CFG1_PORT_AN_MODE_CL73                              0x1
+#define NVM_CFG1_PORT_AN_MODE_CL37                              0x2
+#define NVM_CFG1_PORT_AN_MODE_CL73_BAM                          0x3
+#define NVM_CFG1_PORT_AN_MODE_CL37_BAM                          0x4
+#define NVM_CFG1_PORT_AN_MODE_HPAM                              0x5
+#define NVM_CFG1_PORT_AN_MODE_SGMII                             0x6
+
+       u32 mgmt_traffic;                                       /* 0x20 */
+#define NVM_CFG1_PORT_RESERVED61_MASK                           0x0000000F
+#define NVM_CFG1_PORT_RESERVED61_OFFSET                         0
+#define NVM_CFG1_PORT_RESERVED61_DISABLED                       0x0
+#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_RMII                 0x1
+#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_SMBUS                0x2
+
+       u32 ext_phy;                                            /* 0x24 */
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK                    0x000000FF
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET                  0
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE                    0x0
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844                0x1
+#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK                 0x0000FF00
+#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET               8
+
+       u32 mba_cfg1;                                           /* 0x28 */
+#define NVM_CFG1_PORT_MBA_MASK                                  0x00000001
+#define NVM_CFG1_PORT_MBA_OFFSET                                0
+#define NVM_CFG1_PORT_MBA_DISABLED                              0x0
+#define NVM_CFG1_PORT_MBA_ENABLED                               0x1
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_MASK                        0x00000006
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_OFFSET                      1
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_AUTO                        0x0
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_BBS                         0x1
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT18H                      0x2
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT19H                      0x3
+#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK                       0x00000078
+#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET                     3
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK                    0x00000080
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET                  7
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S                  0x0
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B                  0x1
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK                0x00000100
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET              8
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED            0x0
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED             0x1
+#define NVM_CFG1_PORT_RESERVED5_MASK                            0x0001FE00
+#define NVM_CFG1_PORT_RESERVED5_OFFSET                          9
+#define NVM_CFG1_PORT_RESERVED5_DISABLED                        0x0
+#define NVM_CFG1_PORT_RESERVED5_2K                              0x1
+#define NVM_CFG1_PORT_RESERVED5_4K                              0x2
+#define NVM_CFG1_PORT_RESERVED5_8K                              0x3
+#define NVM_CFG1_PORT_RESERVED5_16K                             0x4
+#define NVM_CFG1_PORT_RESERVED5_32K                             0x5
+#define NVM_CFG1_PORT_RESERVED5_64K                             0x6
+#define NVM_CFG1_PORT_RESERVED5_128K                            0x7
+#define NVM_CFG1_PORT_RESERVED5_256K                            0x8
+#define NVM_CFG1_PORT_RESERVED5_512K                            0x9
+#define NVM_CFG1_PORT_RESERVED5_1M                              0xA
+#define NVM_CFG1_PORT_RESERVED5_2M                              0xB
+#define NVM_CFG1_PORT_RESERVED5_4M                              0xC
+#define NVM_CFG1_PORT_RESERVED5_8M                              0xD
+#define NVM_CFG1_PORT_RESERVED5_16M                             0xE
+#define NVM_CFG1_PORT_RESERVED5_32M                             0xF
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_MASK                       0x001E0000
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_OFFSET                     17
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_AUTONEG                    0x0
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_1G                         0x1
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_10G                        0x2
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_25G                        0x4
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_40G                        0x5
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_50G                        0x6
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_100G                       0x7
+#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_MASK                 0x00E00000
+#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_OFFSET               21
+
+       u32     mba_cfg2;                                       /* 0x2C */
+#define NVM_CFG1_PORT_MBA_VLAN_VALUE_MASK                       0x0000FFFF
+#define NVM_CFG1_PORT_MBA_VLAN_VALUE_OFFSET                     0
+#define NVM_CFG1_PORT_MBA_VLAN_MASK                             0x00010000
+#define NVM_CFG1_PORT_MBA_VLAN_OFFSET                           16
+
+       u32     vf_cfg;                                         /* 0x30 */
+#define NVM_CFG1_PORT_RESERVED8_MASK                            0x0000FFFF
+#define NVM_CFG1_PORT_RESERVED8_OFFSET                          0
+#define NVM_CFG1_PORT_RESERVED6_MASK                            0x000F0000
+#define NVM_CFG1_PORT_RESERVED6_OFFSET                          16
+#define NVM_CFG1_PORT_RESERVED6_DISABLED                        0x0
+#define NVM_CFG1_PORT_RESERVED6_4K                              0x1
+#define NVM_CFG1_PORT_RESERVED6_8K                              0x2
+#define NVM_CFG1_PORT_RESERVED6_16K                             0x3
+#define NVM_CFG1_PORT_RESERVED6_32K                             0x4
+#define NVM_CFG1_PORT_RESERVED6_64K                             0x5
+#define NVM_CFG1_PORT_RESERVED6_128K                            0x6
+#define NVM_CFG1_PORT_RESERVED6_256K                            0x7
+#define NVM_CFG1_PORT_RESERVED6_512K                            0x8
+#define NVM_CFG1_PORT_RESERVED6_1M                              0x9
+#define NVM_CFG1_PORT_RESERVED6_2M                              0xA
+#define NVM_CFG1_PORT_RESERVED6_4M                              0xB
+#define NVM_CFG1_PORT_RESERVED6_8M                              0xC
+#define NVM_CFG1_PORT_RESERVED6_16M                             0xD
+#define NVM_CFG1_PORT_RESERVED6_32M                             0xE
+#define NVM_CFG1_PORT_RESERVED6_64M                             0xF
+
+       struct nvm_cfg_mac_address      lldp_mac_address;       /* 0x34 */
+
+       u32                             led_port_settings;      /* 0x3C */
+#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK                   0x000000FF
+#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET                 0
+#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK                   0x0000FF00
+#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET                 8
+#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK                   0x00FF0000
+#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET                 16
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G                      0x1
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G                     0x2
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G                     0x8
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G                     0x10
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G                     0x20
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G                    0x40
+
+       u32 transceiver_00;                                     /* 0x40 */
+
+       /*  Define for mapping of transceiver signal module absent */
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK                     0x000000FF
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET                   0
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA                       0x0
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0                    0x1
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1                    0x2
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2                    0x3
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3                    0x4
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4                    0x5
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5                    0x6
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6                    0x7
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7                    0x8
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8                    0x9
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9                    0xA
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10                   0xB
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11                   0xC
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12                   0xD
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13                   0xE
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14                   0xF
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15                   0x10
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16                   0x11
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17                   0x12
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18                   0x13
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19                   0x14
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20                   0x15
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21                   0x16
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22                   0x17
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23                   0x18
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24                   0x19
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25                   0x1A
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26                   0x1B
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27                   0x1C
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28                   0x1D
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29                   0x1E
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30                   0x1F
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31                   0x20
+       /*  Define the GPIO mux settings  to switch i2c mux to this port */
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK                  0x00000F00
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET                8
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK                  0x0000F000
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET                12
+
+       u32 reserved[133];                                      /* 0x44 */
+};
+
+struct nvm_cfg1_func {
+       struct nvm_cfg_mac_address      mac_address;            /* 0x0 */
+
+       u32                             rsrv1;                  /* 0x8 */
+#define NVM_CFG1_FUNC_RESERVED1_MASK                            0x0000FFFF
+#define NVM_CFG1_FUNC_RESERVED1_OFFSET                          0
+#define NVM_CFG1_FUNC_RESERVED2_MASK                            0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED2_OFFSET                          16
+
+       u32                             rsrv2;                  /* 0xC */
+#define NVM_CFG1_FUNC_RESERVED3_MASK                            0x0000FFFF
+#define NVM_CFG1_FUNC_RESERVED3_OFFSET                          0
+#define NVM_CFG1_FUNC_RESERVED4_MASK                            0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED4_OFFSET                          16
+
+       u32                             device_id;              /* 0x10 */
+#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK                  0x0000FFFF
+#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET                0
+#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK                     0xFFFF0000
+#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET                   16
+
+       u32                             cmn_cfg;                /* 0x14 */
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_MASK                    0x00000007
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_OFFSET                  0
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_PXE                     0x0
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_RPL                     0x1
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_BOOTP                   0x2
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_ISCSI_BOOT              0x3
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_FCOE_BOOT               0x4
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_NONE                    0x7
+#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK                     0x0007FFF8
+#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET                   3
+#define NVM_CFG1_FUNC_PERSONALITY_MASK                          0x00780000
+#define NVM_CFG1_FUNC_PERSONALITY_OFFSET                        19
+#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET                      0x0
+#define NVM_CFG1_FUNC_PERSONALITY_ISCSI                         0x1
+#define NVM_CFG1_FUNC_PERSONALITY_FCOE                          0x2
+#define NVM_CFG1_FUNC_PERSONALITY_ROCE                          0x3
+#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK                     0x7F800000
+#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET                   23
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK                   0x80000000
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET                 31
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED               0x0
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED                0x1
+
+       u32 pci_cfg;                                            /* 0x18 */
+#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK                 0x0000007F
+#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET               0
+#define NVM_CFG1_FUNC_RESERVESD12_MASK                          0x00003F80
+#define NVM_CFG1_FUNC_RESERVESD12_OFFSET                        7
+#define NVM_CFG1_FUNC_BAR1_SIZE_MASK                            0x0003C000
+#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET                          14
+#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED                        0x0
+#define NVM_CFG1_FUNC_BAR1_SIZE_64K                             0x1
+#define NVM_CFG1_FUNC_BAR1_SIZE_128K                            0x2
+#define NVM_CFG1_FUNC_BAR1_SIZE_256K                            0x3
+#define NVM_CFG1_FUNC_BAR1_SIZE_512K                            0x4
+#define NVM_CFG1_FUNC_BAR1_SIZE_1M                              0x5
+#define NVM_CFG1_FUNC_BAR1_SIZE_2M                              0x6
+#define NVM_CFG1_FUNC_BAR1_SIZE_4M                              0x7
+#define NVM_CFG1_FUNC_BAR1_SIZE_8M                              0x8
+#define NVM_CFG1_FUNC_BAR1_SIZE_16M                             0x9
+#define NVM_CFG1_FUNC_BAR1_SIZE_32M                             0xA
+#define NVM_CFG1_FUNC_BAR1_SIZE_64M                             0xB
+#define NVM_CFG1_FUNC_BAR1_SIZE_128M                            0xC
+#define NVM_CFG1_FUNC_BAR1_SIZE_256M                            0xD
+#define NVM_CFG1_FUNC_BAR1_SIZE_512M                            0xE
+#define NVM_CFG1_FUNC_BAR1_SIZE_1G                              0xF
+#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK                        0x03FC0000
+#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET                      18
+
+       struct nvm_cfg_mac_address      fcoe_node_wwn_mac_addr; /* 0x1C */
+
+       struct nvm_cfg_mac_address      fcoe_port_wwn_mac_addr; /* 0x24 */
+
+       u32                             reserved[9];            /* 0x2C */
+};
+
+struct nvm_cfg1 {
+       struct nvm_cfg1_glob    glob;                           /* 0x0 */
+
+       struct nvm_cfg1_path    path[MCP_GLOB_PATH_MAX];        /* 0x140 */
+
+       struct nvm_cfg1_port    port[MCP_GLOB_PORT_MAX];        /* 0x230 */
+
+       struct nvm_cfg1_func    func[MCP_GLOB_FUNC_MAX];        /* 0xB90 */
+};
+
+/******************************************
+* nvm_cfg structs
+******************************************/
+
+enum nvm_cfg_sections {
+       NVM_CFG_SECTION_NVM_CFG1,
+       NVM_CFG_SECTION_MAX
+};
+
+struct nvm_cfg {
+       u32             num_sections;
+       u32             sections_offset[NVM_CFG_SECTION_MAX];
+       struct nvm_cfg1 cfg1;
+};
+
+#define PORT_0          0
+#define PORT_1          1
+#define PORT_2          2
+#define PORT_3          3
+
+extern struct spad_layout g_spad;
+
+#define MCP_SPAD_SIZE                       0x00028000  /* 160 KB */
+
+#define SPAD_OFFSET(addr) (((u32)addr - (u32)CPU_SPAD_BASE))
+
+#define TO_OFFSIZE(_offset, _size)                             \
+       (u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_SHIFT) | \
+             (((u32)(_size) >> 2) << OFFSIZE_SIZE_SHIFT))
+
+enum spad_sections {
+       SPAD_SECTION_TRACE,
+       SPAD_SECTION_NVM_CFG,
+       SPAD_SECTION_PUBLIC,
+       SPAD_SECTION_PRIVATE,
+       SPAD_SECTION_MAX
+};
+
+struct spad_layout {
+       struct nvm_cfg          nvm_cfg;
+       struct mcp_public_data  public_data;
+};
+
+#define CRC_MAGIC_VALUE                     0xDEBB20E3
+#define CRC32_POLYNOMIAL                    0xEDB88320
+#define NVM_CRC_SIZE                            (sizeof(u32))
+
+enum nvm_sw_arbitrator {
+       NVM_SW_ARB_HOST,
+       NVM_SW_ARB_MCP,
+       NVM_SW_ARB_UART,
+       NVM_SW_ARB_RESERVED
+};
+
+/****************************************************************************
+* Boot Strap Region                                                        *
+****************************************************************************/
+struct legacy_bootstrap_region {
+       u32     magic_value;
+#define NVM_MAGIC_VALUE          0x669955aa
+       u32     sram_start_addr;
+       u32     code_len;               /* boot code length (in dwords) */
+       u32     code_start_addr;
+       u32     crc;                    /* 32-bit CRC */
+};
+
+/****************************************************************************
+* Directories Region                                                       *
+****************************************************************************/
+struct nvm_code_entry {
+       u32     image_type;             /* Image type */
+       u32     nvm_start_addr;         /* NVM address of the image */
+       u32     len;                    /* Include CRC */
+       u32     sram_start_addr;
+       u32     sram_run_addr;          /* Relevant in case of MIM only */
+};
+
+enum nvm_image_type {
+       NVM_TYPE_TIM1           = 0x01,
+       NVM_TYPE_TIM2           = 0x02,
+       NVM_TYPE_MIM1           = 0x03,
+       NVM_TYPE_MIM2           = 0x04,
+       NVM_TYPE_MBA            = 0x05,
+       NVM_TYPE_MODULES_PN     = 0x06,
+       NVM_TYPE_VPD            = 0x07,
+       NVM_TYPE_MFW_TRACE1     = 0x08,
+       NVM_TYPE_MFW_TRACE2     = 0x09,
+       NVM_TYPE_NVM_CFG1       = 0x0a,
+       NVM_TYPE_L2B            = 0x0b,
+       NVM_TYPE_DIR1           = 0x0c,
+       NVM_TYPE_EAGLE_FW1      = 0x0d,
+       NVM_TYPE_FALCON_FW1     = 0x0e,
+       NVM_TYPE_PCIE_FW1       = 0x0f,
+       NVM_TYPE_HW_SET         = 0x10,
+       NVM_TYPE_LIM            = 0x11,
+       NVM_TYPE_AVS_FW1        = 0x12,
+       NVM_TYPE_DIR2           = 0x13,
+       NVM_TYPE_CCM            = 0x14,
+       NVM_TYPE_EAGLE_FW2      = 0x15,
+       NVM_TYPE_FALCON_FW2     = 0x16,
+       NVM_TYPE_PCIE_FW2       = 0x17,
+       NVM_TYPE_AVS_FW2        = 0x18,
+
+       NVM_TYPE_MAX,
+};
+
+#define MAX_NVM_DIR_ENTRIES 200
+
+struct nvm_dir {
+       s32 seq;
+#define NVM_DIR_NEXT_MFW_MASK   0x00000001
+#define NVM_DIR_SEQ_MASK        0xfffffffe
+#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK)
+
+#define IS_DIR_SEQ_VALID(seq) ((seq & NVM_DIR_SEQ_MASK) != NVM_DIR_SEQ_MASK)
+
+       u32                     num_images;
+       u32                     rsrv;
+       struct nvm_code_entry   code[1]; /* Up to MAX_NVM_DIR_ENTRIES */
+};
+
+#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) +             \
+                                  (_num_images -                        \
+                                   1) * sizeof(struct nvm_code_entry) + \
+                                  NVM_CRC_SIZE)
+
+struct nvm_vpd_image {
+       u32     format_revision;
+#define VPD_IMAGE_VERSION        1
+
+       /* This array length depends on the number of VPD fields */
+       u8      vpd_data[1];
+};
+
+/****************************************************************************
+* NVRAM FULL MAP                                                           *
+****************************************************************************/
+#define DIR_ID_1    (0)
+#define DIR_ID_2    (1)
+#define MAX_DIR_IDS (2)
+
+#define MFW_BUNDLE_1    (0)
+#define MFW_BUNDLE_2    (1)
+#define MAX_MFW_BUNDLES (2)
+
+#define FLASH_PAGE_SIZE 0x1000
+#define NVM_DIR_MAX_SIZE    (FLASH_PAGE_SIZE)           /* 4Kb */
+#define ASIC_MIM_MAX_SIZE   (300 * FLASH_PAGE_SIZE)     /* 1.2Mb */
+#define FPGA_MIM_MAX_SIZE   (25 * FLASH_PAGE_SIZE)      /* 60Kb */
+
+#define LIM_MAX_SIZE        ((2 *                                    \
+                             FLASH_PAGE_SIZE) -                      \
+                            sizeof(struct legacy_bootstrap_region) - \
+                            NVM_RSV_SIZE)
+#define LIM_OFFSET          (NVM_OFFSET(lim_image))
+#define NVM_RSV_SIZE            (44)
+#define MIM_MAX_SIZE(is_asic) ((is_asic) ? ASIC_MIM_MAX_SIZE : \
+                              FPGA_MIM_MAX_SIZE)
+#define MIM_OFFSET(idx, is_asic) (NVM_OFFSET(dir[MAX_MFW_BUNDLES]) + \
+                                 ((idx ==                           \
+                                   NVM_TYPE_MIM2) ? MIM_MAX_SIZE(is_asic) : 0))
+#define NVM_FIXED_AREA_SIZE(is_asic) (sizeof(struct nvm_image) + \
+                                     MIM_MAX_SIZE(is_asic) * 2)
+
+union nvm_dir_union {
+       struct nvm_dir  dir;
+       u8              page[FLASH_PAGE_SIZE];
+};
+
+/*                        Address
+ *  +-------------------+ 0x000000
+ *  |    Bootstrap:     |
+ *  | magic_number      |
+ *  | sram_start_addr   |
+ *  | code_len          |
+ *  | code_start_addr   |
+ *  | crc               |
+ *  +-------------------+ 0x000014
+ *  | rsrv              |
+ *  +-------------------+ 0x000040
+ *  | LIM               |
+ *  +-------------------+ 0x002000
+ *  | Dir1              |
+ *  +-------------------+ 0x003000
+ *  | Dir2              |
+ *  +-------------------+ 0x004000
+ *  | MIM1              |
+ *  +-------------------+ 0x130000
+ *  | MIM2              |
+ *  +-------------------+ 0x25C000
+ *  | Rest Images:      |
+ *  | TIM1/2            |
+ *  | MFW_TRACE1/2      |
+ *  | Eagle/Falcon FW   |
+ *  | PCIE/AVS FW       |
+ *  | MBA/CCM/L2B       |
+ *  | VPD               |
+ *  | optic_modules     |
+ *  |  ...              |
+ *  +-------------------+ 0x400000
+ */
+struct nvm_image {
+/*********** !!!  FIXED SECTIONS  !!! DO NOT MODIFY !!! **********************/
+       /* NVM Offset  (size) */
+       struct legacy_bootstrap_region  bootstrap;
+       u8                              rsrv[NVM_RSV_SIZE];
+       u8                              lim_image[LIM_MAX_SIZE];
+       union nvm_dir_union             dir[MAX_MFW_BUNDLES];
+
+       /* MIM1_IMAGE                              0x004000 (0x12c000) */
+       /* MIM2_IMAGE                              0x130000 (0x12c000) */
+/*********** !!!  FIXED SECTIONS  !!! DO NOT MODIFY !!! **********************/
+};                              /* 0x134 */
+
+#define NVM_OFFSET(f)  ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->f))))
+
+struct hw_set_info {
+       u32     reg_type;
+#define GRC_REG_TYPE 1
+#define PHY_REG_TYPE 2
+#define PCI_REG_TYPE 4
+
+       u32     bank_num;
+       u32     pf_num;
+       u32     operation;
+#define READ_OP     1
+#define WRITE_OP    2
+#define RMW_SET_OP  3
+#define RMW_CLR_OP  4
+
+       u32     reg_addr;
+       u32     reg_data;
+
+       u32     reset_type;
+#define POR_RESET_TYPE BIT(0)
+#define HARD_RESET_TYPE        BIT(1)
+#define CORE_RESET_TYPE        BIT(2)
+#define MCP_RESET_TYPE BIT(3)
+#define PERSET_ASSERT  BIT(4)
+#define PERSET_DEASSERT        BIT(5)
+};
+
+struct hw_set_image {
+       u32                     format_version;
+#define HW_SET_IMAGE_VERSION        1
+       u32                     no_hw_sets;
+
+       /* This array length depends on the no_hw_sets */
+       struct hw_set_info      hw_sets[1];
+};
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
new file mode 100644 (file)
index 0000000..ffa9927
--- /dev/null
@@ -0,0 +1,776 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_reg_addr.h"
+
+#define QED_BAR_ACQUIRE_TIMEOUT 1000
+
+/* Invalid values */
+#define QED_BAR_INVALID_OFFSET          (cpu_to_le32(-1))
+
+struct qed_ptt {
+       struct list_head        list_entry;
+       unsigned int            idx;
+       struct pxp_ptt_entry    pxp;
+};
+
+struct qed_ptt_pool {
+       struct list_head        free_list;
+       spinlock_t              lock; /* ptt synchronized access */
+       struct qed_ptt          ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
+};
+
+int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
+                                             GFP_ATOMIC);
+       int i;
+
+       if (!p_pool)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&p_pool->free_list);
+       for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+               p_pool->ptts[i].idx = i;
+               p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
+               p_pool->ptts[i].pxp.pretend.control = 0;
+               if (i >= RESERVED_PTT_MAX)
+                       list_add(&p_pool->ptts[i].list_entry,
+                                &p_pool->free_list);
+       }
+
+       p_hwfn->p_ptt_pool = p_pool;
+       spin_lock_init(&p_pool->lock);
+
+       return 0;
+}
+
+void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ptt *p_ptt;
+       int i;
+
+       for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+               p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
+               p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
+       }
+}
+
+void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
+{
+       kfree(p_hwfn->p_ptt_pool);
+       p_hwfn->p_ptt_pool = NULL;
+}
+
+struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ptt *p_ptt;
+       unsigned int i;
+
+       /* Take the free PTT from the list */
+       for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
+               spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
+
+               if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
+                       p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list,
+                                                struct qed_ptt, list_entry);
+                       list_del(&p_ptt->list_entry);
+
+                       spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+
+                       DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                                  "allocated ptt %d\n", p_ptt->idx);
+                       return p_ptt;
+               }
+
+               spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+               usleep_range(1000, 2000);
+       }
+
+       DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
+       return NULL;
+}
+
+void qed_ptt_release(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt)
+{
+       spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
+       list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
+       spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+}
+
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
+                       struct qed_ptt *p_ptt)
+{
+       /* The HW is using DWORDS and we need to translate it to Bytes */
+       return le32_to_cpu(p_ptt->pxp.offset) << 2;
+}
+
+static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt)
+{
+       return PXP_PF_WINDOW_ADMIN_PER_PF_START +
+              p_ptt->idx * sizeof(struct pxp_ptt_entry);
+}
+
+u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
+{
+       return PXP_EXTERNAL_BAR_PF_WINDOW_START +
+              p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
+}
+
+void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    u32 new_hw_addr)
+{
+       u32 prev_hw_addr;
+
+       prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
+
+       if (new_hw_addr == prev_hw_addr)
+               return;
+
+       /* Update PTT entery in admin window */
+       DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                  "Updating PTT entry %d to offset 0x%x\n",
+                  p_ptt->idx, new_hw_addr);
+
+       /* The HW is using DWORDS and the address is in Bytes */
+       p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2);
+
+       REG_WR(p_hwfn,
+              qed_ptt_config_addr(p_ptt) +
+              offsetof(struct pxp_ptt_entry, offset),
+              le32_to_cpu(p_ptt->pxp.offset));
+}
+
+static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
+                      struct qed_ptt *p_ptt,
+                      u32 hw_addr)
+{
+       u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
+       u32 offset;
+
+       offset = hw_addr - win_hw_addr;
+
+       /* Verify the address is within the window */
+       if (hw_addr < win_hw_addr ||
+           offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
+               qed_ptt_set_win(p_hwfn, p_ptt, hw_addr);
+               offset = 0;
+       }
+
+       return qed_ptt_get_bar_addr(p_ptt) + offset;
+}
+
+struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
+                                    enum reserved_ptts ptt_idx)
+{
+       if (ptt_idx >= RESERVED_PTT_MAX) {
+               DP_NOTICE(p_hwfn,
+                         "Requested PTT %d is out of range\n", ptt_idx);
+               return NULL;
+       }
+
+       return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
+}
+
+void qed_wr(struct qed_hwfn *p_hwfn,
+           struct qed_ptt *p_ptt,
+           u32 hw_addr, u32 val)
+{
+       u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
+
+       REG_WR(p_hwfn, bar_addr, val);
+       DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                  "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+                  bar_addr, hw_addr, val);
+}
+
+u32 qed_rd(struct qed_hwfn *p_hwfn,
+          struct qed_ptt *p_ptt,
+          u32 hw_addr)
+{
+       u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
+       u32 val = REG_RD(p_hwfn, bar_addr);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                  "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+                  bar_addr, hw_addr, val);
+
+       return val;
+}
+
+static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt,
+                         void *addr,
+                         u32 hw_addr,
+                         size_t n,
+                         bool to_device)
+{
+       u32 dw_count, *host_addr, hw_offset;
+       size_t quota, done = 0;
+       u32 __iomem *reg_addr;
+
+       while (done < n) {
+               quota = min_t(size_t, n - done,
+                             PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
+
+               qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
+               hw_offset = qed_ptt_get_bar_addr(p_ptt);
+
+               dw_count = quota / 4;
+               host_addr = (u32 *)((u8 *)addr + done);
+               reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset);
+               if (to_device)
+                       while (dw_count--)
+                               DIRECT_REG_WR(reg_addr++, *host_addr++);
+               else
+                       while (dw_count--)
+                               *host_addr++ = DIRECT_REG_RD(reg_addr++);
+
+               done += quota;
+       }
+}
+
+void qed_memcpy_from(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    void *dest, u32 hw_addr, size_t n)
+{
+       DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                  "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
+                  hw_addr, dest, hw_addr, (unsigned long)n);
+
+       qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
+}
+
+void qed_memcpy_to(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt,
+                  u32 hw_addr, void *src, size_t n)
+{
+       DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                  "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
+                  hw_addr, hw_addr, src, (unsigned long)n);
+
+       qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
+}
+
+void qed_fid_pretend(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    u16 fid)
+{
+       u16 control = 0;
+
+       SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
+       SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
+
+       /* Every pretend undos previous pretends, including
+        * previous port pretend.
+        */
+       SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+       SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+       SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+       if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
+               fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
+
+       p_ptt->pxp.pretend.control = cpu_to_le16(control);
+       p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
+
+       REG_WR(p_hwfn,
+              qed_ptt_config_addr(p_ptt) +
+              offsetof(struct pxp_ptt_entry, pretend),
+              *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void qed_port_pretend(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     u8 port_id)
+{
+       u16 control = 0;
+
+       SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
+       SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
+       SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+       p_ptt->pxp.pretend.control = cpu_to_le16(control);
+
+       REG_WR(p_hwfn,
+              qed_ptt_config_addr(p_ptt) +
+              offsetof(struct pxp_ptt_entry, pretend),
+              *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void qed_port_unpretend(struct qed_hwfn *p_hwfn,
+                       struct qed_ptt *p_ptt)
+{
+       u16 control = 0;
+
+       SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+       SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+       SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+       p_ptt->pxp.pretend.control = cpu_to_le16(control);
+
+       REG_WR(p_hwfn,
+              qed_ptt_config_addr(p_ptt) +
+              offsetof(struct pxp_ptt_entry, pretend),
+              *(u32 *)&p_ptt->pxp.pretend);
+}
+
+/* DMAE */
+static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
+                           const u8 is_src_type_grc,
+                           const u8 is_dst_type_grc,
+                           struct qed_dmae_params *p_params)
+{
+       u32 opcode = 0;
+       u16 opcodeB = 0;
+
+       /* Whether the source is the PCIe or the GRC.
+        * 0- The source is the PCIe
+        * 1- The source is the GRC.
+        */
+       opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
+                                  : DMAE_CMD_SRC_MASK_PCIE) <<
+                  DMAE_CMD_SRC_SHIFT;
+       opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
+                  DMAE_CMD_SRC_PF_ID_SHIFT);
+
+       /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
+       opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
+                                  : DMAE_CMD_DST_MASK_PCIE) <<
+                  DMAE_CMD_DST_SHIFT;
+       opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
+                  DMAE_CMD_DST_PF_ID_SHIFT);
+
+       /* Whether to write a completion word to the completion destination:
+        * 0-Do not write a completion word
+        * 1-Write the completion word
+        */
+       opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
+       opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
+                  DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+
+       if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST)
+               opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
+
+       opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
+
+       opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT);
+
+       /* reset source address in next go */
+       opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
+                  DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+
+       /* reset dest address in next go */
+       opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
+                  DMAE_CMD_DST_ADDR_RESET_SHIFT);
+
+       opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK <<
+                   DMAE_CMD_SRC_VF_ID_SHIFT);
+
+       opcodeB |= (DMAE_CMD_DST_VF_ID_MASK <<
+                   DMAE_CMD_DST_VF_ID_SHIFT);
+
+       p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
+       p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB);
+}
+
+u32 qed_dmae_idx_to_go_cmd(u8 idx)
+{
+       /* All the DMAE 'go' registers form an array in internal memory */
+       return DMAE_REG_GO_C0 + (idx << 2);
+}
+
+static int
+qed_dmae_post_command(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt)
+{
+       struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd;
+       u8 idx_cmd = p_hwfn->dmae_info.channel, i;
+       int qed_status = 0;
+
+       /* verify address is not NULL */
+       if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) ||
+            ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) {
+               DP_NOTICE(p_hwfn,
+                         "source or destination address 0 idx_cmd=%d\n"
+                         "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+                          idx_cmd,
+                          le32_to_cpu(command->opcode),
+                          le16_to_cpu(command->opcode_b),
+                          le16_to_cpu(command->length),
+                          le32_to_cpu(command->src_addr_hi),
+                          le32_to_cpu(command->src_addr_lo),
+                          le32_to_cpu(command->dst_addr_hi),
+                          le32_to_cpu(command->dst_addr_lo));
+
+               return -EINVAL;
+       }
+
+       DP_VERBOSE(p_hwfn,
+                  NETIF_MSG_HW,
+                  "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+                  idx_cmd,
+                  le32_to_cpu(command->opcode),
+                  le16_to_cpu(command->opcode_b),
+                  le16_to_cpu(command->length),
+                  le32_to_cpu(command->src_addr_hi),
+                  le32_to_cpu(command->src_addr_lo),
+                  le32_to_cpu(command->dst_addr_hi),
+                  le32_to_cpu(command->dst_addr_lo));
+
+       /* Copy the command to DMAE - need to do it before every call
+        * for source/dest address no reset.
+        * The first 9 DWs are the command registers, the 10 DW is the
+        * GO register, and the rest are result registers
+        * (which are read only by the client).
+        */
+       for (i = 0; i < DMAE_CMD_SIZE; i++) {
+               u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
+                          *(((u32 *)command) + i) : 0;
+
+               qed_wr(p_hwfn, p_ptt,
+                      DMAE_REG_CMD_MEM +
+                      (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
+                      (i * sizeof(u32)), data);
+       }
+
+       qed_wr(p_hwfn, p_ptt,
+              qed_dmae_idx_to_go_cmd(idx_cmd),
+              DMAE_GO_VALUE);
+
+       return qed_status;
+}
+
+int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
+{
+       dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
+       struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
+       u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
+       u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
+
+       *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                    sizeof(u32),
+                                    p_addr,
+                                    GFP_KERNEL);
+       if (!*p_comp) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
+               goto err;
+       }
+
+       p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
+       *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                   sizeof(struct dmae_cmd),
+                                   p_addr, GFP_KERNEL);
+       if (!*p_cmd) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n");
+               goto err;
+       }
+
+       p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+       *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                    sizeof(u32) * DMAE_MAX_RW_SIZE,
+                                    p_addr, GFP_KERNEL);
+       if (!*p_buff) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n");
+               goto err;
+       }
+
+       p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+
+       return 0;
+err:
+       qed_dmae_info_free(p_hwfn);
+       return -ENOMEM;
+}
+
+void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
+{
+       dma_addr_t p_phys;
+
+       /* Just make sure no one is in the middle */
+       mutex_lock(&p_hwfn->dmae_info.mutex);
+
+       if (p_hwfn->dmae_info.p_completion_word) {
+               p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 sizeof(u32),
+                                 p_hwfn->dmae_info.p_completion_word,
+                                 p_phys);
+               p_hwfn->dmae_info.p_completion_word = NULL;
+       }
+
+       if (p_hwfn->dmae_info.p_dmae_cmd) {
+               p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 sizeof(struct dmae_cmd),
+                                 p_hwfn->dmae_info.p_dmae_cmd,
+                                 p_phys);
+               p_hwfn->dmae_info.p_dmae_cmd = NULL;
+       }
+
+       if (p_hwfn->dmae_info.p_intermediate_buffer) {
+               p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 sizeof(u32) * DMAE_MAX_RW_SIZE,
+                                 p_hwfn->dmae_info.p_intermediate_buffer,
+                                 p_phys);
+               p_hwfn->dmae_info.p_intermediate_buffer = NULL;
+       }
+
+       mutex_unlock(&p_hwfn->dmae_info.mutex);
+}
+
+static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
+{
+       u32 wait_cnt = 0;
+       u32 wait_cnt_limit = 10000;
+
+       int qed_status = 0;
+
+       barrier();
+       while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
+               udelay(DMAE_MIN_WAIT_TIME);
+               if (++wait_cnt > wait_cnt_limit) {
+                       DP_NOTICE(p_hwfn->cdev,
+                                 "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
+                                 *p_hwfn->dmae_info.p_completion_word,
+                                DMAE_COMPLETION_VAL);
+                       qed_status = -EBUSY;
+                       break;
+               }
+
+               /* to sync the completion_word since we are not
+                * using the volatile keyword for p_completion_word
+                */
+               barrier();
+       }
+
+       if (qed_status == 0)
+               *p_hwfn->dmae_info.p_completion_word = 0;
+
+       return qed_status;
+}
+
+static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
+                                         struct qed_ptt *p_ptt,
+                                         u64 src_addr,
+                                         u64 dst_addr,
+                                         u8 src_type,
+                                         u8 dst_type,
+                                         u32 length)
+{
+       dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+       struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+       int qed_status = 0;
+
+       switch (src_type) {
+       case QED_DMAE_ADDRESS_GRC:
+       case QED_DMAE_ADDRESS_HOST_PHYS:
+               cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr));
+               cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr));
+               break;
+       /* for virtual source addresses we use the intermediate buffer. */
+       case QED_DMAE_ADDRESS_HOST_VIRT:
+               cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys));
+               cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
+               memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
+                      (void *)(uintptr_t)src_addr,
+                      length * sizeof(u32));
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       switch (dst_type) {
+       case QED_DMAE_ADDRESS_GRC:
+       case QED_DMAE_ADDRESS_HOST_PHYS:
+               cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr));
+               cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr));
+               break;
+       /* for virtual source addresses we use the intermediate buffer. */
+       case QED_DMAE_ADDRESS_HOST_VIRT:
+               cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys));
+               cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys));
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       cmd->length = cpu_to_le16((u16)length);
+
+       qed_dmae_post_command(p_hwfn, p_ptt);
+
+       qed_status = qed_dmae_operation_wait(p_hwfn);
+
+       if (qed_status) {
+               DP_NOTICE(p_hwfn,
+                         "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
+                         src_addr,
+                         dst_addr,
+                         length);
+               return qed_status;
+       }
+
+       if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
+               memcpy((void *)(uintptr_t)(dst_addr),
+                      &p_hwfn->dmae_info.p_intermediate_buffer[0],
+                      length * sizeof(u32));
+
+       return 0;
+}
+
+static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
+                                   struct qed_ptt *p_ptt,
+                                   u64 src_addr, u64 dst_addr,
+                                   u8 src_type, u8 dst_type,
+                                   u32 size_in_dwords,
+                                   struct qed_dmae_params *p_params)
+{
+       dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
+       u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
+       struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+       u64 src_addr_split = 0, dst_addr_split = 0;
+       u16 length_limit = DMAE_MAX_RW_SIZE;
+       int qed_status = 0;
+       u32 offset = 0;
+
+       qed_dmae_opcode(p_hwfn,
+                       (src_type == QED_DMAE_ADDRESS_GRC),
+                       (dst_type == QED_DMAE_ADDRESS_GRC),
+                       p_params);
+
+       cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys));
+       cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys));
+       cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL);
+
+       /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
+       cnt_split = size_in_dwords / length_limit;
+       length_mod = size_in_dwords % length_limit;
+
+       src_addr_split = src_addr;
+       dst_addr_split = dst_addr;
+
+       for (i = 0; i <= cnt_split; i++) {
+               offset = length_limit * i;
+
+               if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) {
+                       if (src_type == QED_DMAE_ADDRESS_GRC)
+                               src_addr_split = src_addr + offset;
+                       else
+                               src_addr_split = src_addr + (offset * 4);
+               }
+
+               if (dst_type == QED_DMAE_ADDRESS_GRC)
+                       dst_addr_split = dst_addr + offset;
+               else
+                       dst_addr_split = dst_addr + (offset * 4);
+
+               length_cur = (cnt_split == i) ? length_mod : length_limit;
+
+               /* might be zero on last iteration */
+               if (!length_cur)
+                       continue;
+
+               qed_status = qed_dmae_execute_sub_operation(p_hwfn,
+                                                           p_ptt,
+                                                           src_addr_split,
+                                                           dst_addr_split,
+                                                           src_type,
+                                                           dst_type,
+                                                           length_cur);
+               if (qed_status) {
+                       DP_NOTICE(p_hwfn,
+                                 "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
+                                 qed_status,
+                                 src_addr,
+                                 dst_addr,
+                                 length_cur);
+                       break;
+               }
+       }
+
+       return qed_status;
+}
+
+int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     u64 source_addr,
+                     u32 grc_addr,
+                     u32 size_in_dwords,
+                     u32 flags)
+{
+       u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+       struct qed_dmae_params params;
+       int rc;
+
+       memset(&params, 0, sizeof(struct qed_dmae_params));
+       params.flags = flags;
+
+       mutex_lock(&p_hwfn->dmae_info.mutex);
+
+       rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+                                     grc_addr_in_dw,
+                                     QED_DMAE_ADDRESS_HOST_VIRT,
+                                     QED_DMAE_ADDRESS_GRC,
+                                     size_in_dwords, &params);
+
+       mutex_unlock(&p_hwfn->dmae_info.mutex);
+
+       return rc;
+}
+
+u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
+                 enum protocol_type proto,
+                 union qed_qm_pq_params *p_params)
+{
+       u16 pq_id = 0;
+
+       if ((proto == PROTOCOLID_CORE || proto == PROTOCOLID_ETH) &&
+           !p_params) {
+               DP_NOTICE(p_hwfn,
+                         "Protocol %d received NULL PQ params\n",
+                         proto);
+               return 0;
+       }
+
+       switch (proto) {
+       case PROTOCOLID_CORE:
+               if (p_params->core.tc == LB_TC)
+                       pq_id = p_hwfn->qm_info.pure_lb_pq;
+               else
+                       pq_id = p_hwfn->qm_info.offload_pq;
+               break;
+       case PROTOCOLID_ETH:
+               pq_id = p_params->eth.tc;
+               break;
+       default:
+               pq_id = 0;
+       }
+
+       pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);
+
+       return pq_id;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
new file mode 100644 (file)
index 0000000..e56d433
--- /dev/null
@@ -0,0 +1,263 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_HW_H
+#define _QED_HW_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+
+/* Forward decleration */
+struct qed_ptt;
+
+enum reserved_ptts {
+       RESERVED_PTT_EDIAG,
+       RESERVED_PTT_USER_SPACE,
+       RESERVED_PTT_MAIN,
+       RESERVED_PTT_DPC,
+       RESERVED_PTT_MAX
+};
+
+enum _dmae_cmd_dst_mask {
+       DMAE_CMD_DST_MASK_NONE  = 0,
+       DMAE_CMD_DST_MASK_PCIE  = 1,
+       DMAE_CMD_DST_MASK_GRC   = 2
+};
+
+enum _dmae_cmd_src_mask {
+       DMAE_CMD_SRC_MASK_PCIE  = 0,
+       DMAE_CMD_SRC_MASK_GRC   = 1
+};
+
+enum _dmae_cmd_crc_mask {
+       DMAE_CMD_COMP_CRC_EN_MASK_NONE  = 0,
+       DMAE_CMD_COMP_CRC_EN_MASK_SET   = 1
+};
+
+/* definitions for DMA constants */
+#define DMAE_GO_VALUE   0x1
+
+#define DMAE_COMPLETION_VAL     0xD1AE
+#define DMAE_CMD_ENDIANITY      0x2
+
+#define DMAE_CMD_SIZE   14
+#define DMAE_CMD_SIZE_TO_FILL   (DMAE_CMD_SIZE - 5)
+#define DMAE_MIN_WAIT_TIME      0x2
+#define DMAE_MAX_CLIENTS        32
+
+/**
+ * @brief qed_gtt_init - Initialize GTT windows
+ *
+ * @param p_hwfn
+ */
+void qed_gtt_init(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured
+ *
+ * @param p_hwfn
+ */
+void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool
+ *
+ * @param p_hwfn
+ *
+ * @return struct _qed_status - success (0), negative - error.
+ */
+int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_pool_free -
+ *
+ * @param p_hwfn
+ */
+void qed_ptt_pool_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
+                       struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_ptt_get_bar_addr - Get PPT's external BAR address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_ptt_set_win - Set PTT Window's GRC BAR address
+ *
+ * @param p_hwfn
+ * @param new_hw_addr
+ * @param p_ptt
+ */
+void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    u32 new_hw_addr);
+
+/**
+ * @brief qed_get_reserved_ptt - Get a specific reserved PTT
+ *
+ * @param p_hwfn
+ * @param ptt_idx
+ *
+ * @return struct qed_ptt *
+ */
+struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
+                                    enum reserved_ptts ptt_idx);
+
+/**
+ * @brief qed_wr - Write value to BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+void qed_wr(struct qed_hwfn *p_hwfn,
+           struct qed_ptt *p_ptt,
+           u32 hw_addr,
+           u32 val);
+
+/**
+ * @brief qed_rd - Read value from BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+u32 qed_rd(struct qed_hwfn *p_hwfn,
+          struct qed_ptt *p_ptt,
+          u32 hw_addr);
+
+/**
+ * @brief qed_memcpy_from - copy n bytes from BAR using the given
+ *        ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param dest
+ * @param hw_addr
+ * @param n
+ */
+void qed_memcpy_from(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    void *dest,
+                    u32 hw_addr,
+                    size_t n);
+
+/**
+ * @brief qed_memcpy_to - copy n bytes to BAR using the given
+ *        ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param hw_addr
+ * @param src
+ * @param n
+ */
+void qed_memcpy_to(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt,
+                  u32 hw_addr,
+                  void *src,
+                  size_t n);
+/**
+ * @brief qed_fid_pretend - pretend to another function when
+ *        accessing the ptt window. There is no way to unpretend
+ *        a function. The only way to cancel a pretend is to
+ *        pretend back to the original function.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param fid - fid field of pxp_pretend structure. Can contain
+ *            either pf / vf, port/path fields are don't care.
+ */
+void qed_fid_pretend(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    u16 fid);
+
+/**
+ * @brief qed_port_pretend - pretend to another port when
+ *        accessing the ptt window
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param port_id - the port to pretend to
+ */
+void qed_port_pretend(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     u8 port_id);
+
+/**
+ * @brief qed_port_unpretend - cancel any previously set port
+ *        pretend
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_port_unpretend(struct qed_hwfn *p_hwfn,
+                       struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
+ * this is declared here since other files will require it.
+ * @param idx
+ */
+u32 qed_dmae_idx_to_go_cmd(u8 idx);
+
+/**
+ * @brief qed_dmae_info_alloc - Init the dmae_info structure
+ * which is part of p_hwfn.
+ * @param p_hwfn
+ */
+int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_dmae_info_free - Free the dmae_info structure
+ * which is part of p_hwfn
+ *
+ * @param p_hwfn
+ */
+void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
+
+union qed_qm_pq_params {
+       struct {
+               u8 tc;
+       }       core;
+
+       struct {
+               u8      is_vf;
+               u8      vf_id;
+               u8      tc;
+       }       eth;
+};
+
+u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
+                 enum protocol_type proto,
+                 union qed_qm_pq_params *params);
+
+int qed_init_fw_data(struct qed_dev *cdev,
+                    const u8 *fw_data);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
new file mode 100644 (file)
index 0000000..0b21a55
--- /dev/null
@@ -0,0 +1,798 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+enum cminterface {
+       MCM_SEC,
+       MCM_PRI,
+       UCM_SEC,
+       UCM_PRI,
+       TCM_SEC,
+       TCM_PRI,
+       YCM_SEC,
+       YCM_PRI,
+       XCM_SEC,
+       XCM_PRI,
+       NUM_OF_CM_INTERFACES
+};
+
+/* general constants */
+#define QM_PQ_ELEMENT_SIZE                      4 /* in bytes */
+#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
+                                                       QM_PQ_ELEMENT_SIZE, \
+                                                       0x1000) : 0)
+#define QM_PQ_SIZE_256B(pq_size)       (pq_size ? DIV_ROUND_UP(pq_size, \
+                                                               0x100) - 1 : 0)
+#define QM_INVALID_PQ_ID                        0xffff
+/* feature enable */
+#define QM_BYPASS_EN                            1
+#define QM_BYTE_CRD_EN                          1
+/* other PQ constants */
+#define QM_OTHER_PQS_PER_PF                     4
+/* WFQ constants */
+#define QM_WFQ_UPPER_BOUND             6250000
+#define QM_WFQ_VP_PQ_VOQ_SHIFT          0
+#define QM_WFQ_VP_PQ_PF_SHIFT           5
+#define QM_WFQ_INC_VAL(weight)          ((weight) * 0x9000)
+#define QM_WFQ_MAX_INC_VAL                      4375000
+#define QM_WFQ_INIT_CRD(inc_val)        (2 * (inc_val))
+/* RL constants */
+#define QM_RL_UPPER_BOUND                       6250000
+#define QM_RL_PERIOD                            5               /* in us */
+#define QM_RL_PERIOD_CLK_25M            (25 * QM_RL_PERIOD)
+#define QM_RL_INC_VAL(rate)            max_t(u32,      \
+                                             (((rate ? rate : 1000000) \
+                                               * QM_RL_PERIOD) / 8), 1)
+#define QM_RL_MAX_INC_VAL                       4375000
+/* AFullOprtnstcCrdMask constants */
+#define QM_OPPOR_LINE_VOQ_DEF           1
+#define QM_OPPOR_FW_STOP_DEF            0
+#define QM_OPPOR_PQ_EMPTY_DEF           1
+#define EAGLE_WORKAROUND_TC                     7
+/* Command Queue constants */
+#define PBF_CMDQ_PURE_LB_LINES                          150
+#define PBF_CMDQ_EAGLE_WORKAROUND_LINES         8
+#define PBF_CMDQ_LINES_RT_OFFSET(voq)           (               \
+               PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
+               (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET -      \
+                PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+#define PBF_BTB_GUARANTEED_RT_OFFSET(voq)       (            \
+               PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
+               (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET -      \
+                PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines)          ((((pbf_cmd_lines) - \
+                                                  4) *              \
+                                                 2) | QM_LINE_CRD_REG_SIGN_BIT)
+/* BTB: blocks constants (block size = 256B) */
+#define BTB_JUMBO_PKT_BLOCKS            38
+#define BTB_HEADROOM_BLOCKS                     BTB_JUMBO_PKT_BLOCKS
+#define BTB_EAGLE_WORKAROUND_BLOCKS     4
+#define BTB_PURE_LB_FACTOR                      10
+#define BTB_PURE_LB_RATIO                       7
+/* QM stop command constants */
+#define QM_STOP_PQ_MASK_WIDTH                   32
+#define QM_STOP_CMD_ADDR                                0x2
+#define QM_STOP_CMD_STRUCT_SIZE                 2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET   0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT    0
+#define QM_STOP_CMD_PAUSE_MASK_MASK             -1
+#define QM_STOP_CMD_GROUP_ID_OFFSET             1
+#define QM_STOP_CMD_GROUP_ID_SHIFT              16
+#define QM_STOP_CMD_GROUP_ID_MASK               15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET              1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT               24
+#define QM_STOP_CMD_PQ_TYPE_MASK                1
+#define QM_STOP_CMD_MAX_POLL_COUNT              100
+#define QM_STOP_CMD_POLL_PERIOD_US              500
+/* QM command macros */
+#define QM_CMD_STRUCT_SIZE(cmd)                        cmd ## \
+       _STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field,                                \
+                        value)        SET_FIELD(var[cmd ## _ ## field ## \
+                                                    _OFFSET],            \
+                                                cmd ## _ ## field,       \
+                                                value)
+/* QM: VOQ macros */
+#define PHYS_VOQ(port, tc, max_phy_tcs_pr_port)        ((port) *       \
+                                                (max_phy_tcs_pr_port) \
+                                                + (tc))
+#define LB_VOQ(port)                           ( \
+               MAX_PHYS_VOQS + (port))
+#define VOQ(port, tc, max_phy_tcs_pr_port)     \
+       ((tc) <         \
+        LB_TC ? PHYS_VOQ(port,         \
+                         tc,                    \
+                         max_phy_tcs_pr_port) \
+               : LB_VOQ(port))
+/******************** INTERNAL IMPLEMENTATION *********************/
+/* Prepare PF RL enable/disable runtime init values */
+static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
+                            bool pf_rl_en)
+{
+       STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
+       if (pf_rl_en) {
+               /* enable RLs for all VOQs */
+               STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
+                            (1 << MAX_NUM_VOQS) - 1);
+               /* write RL period */
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_RLPFPERIOD_RT_OFFSET,
+                            QM_RL_PERIOD_CLK_25M);
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_RLPFPERIODTIMER_RT_OFFSET,
+                            QM_RL_PERIOD_CLK_25M);
+               /* set credit threshold for QM bypass flow */
+               if (QM_BYPASS_EN)
+                       STORE_RT_REG(p_hwfn,
+                                    QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
+                                    QM_RL_UPPER_BOUND);
+       }
+}
+
+/* Prepare PF WFQ enable/disable runtime init values */
+static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn,
+                             bool pf_wfq_en)
+{
+       STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
+       /* set credit threshold for QM bypass flow */
+       if (pf_wfq_en && QM_BYPASS_EN)
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
+                            QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare VPORT RL enable/disable runtime init values */
+static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn,
+                               bool vport_rl_en)
+{
+       STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
+                    vport_rl_en ? 1 : 0);
+       if (vport_rl_en) {
+               /* write RL period (use timer 0 only) */
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
+                            QM_RL_PERIOD_CLK_25M);
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
+                            QM_RL_PERIOD_CLK_25M);
+               /* set credit threshold for QM bypass flow */
+               if (QM_BYPASS_EN)
+                       STORE_RT_REG(p_hwfn,
+                                    QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
+                                    QM_RL_UPPER_BOUND);
+       }
+}
+
+/* Prepare VPORT WFQ enable/disable runtime init values */
+static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn,
+                                bool vport_wfq_en)
+{
+       STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
+                    vport_wfq_en ? 1 : 0);
+       /* set credit threshold for QM bypass flow */
+       if (vport_wfq_en && QM_BYPASS_EN)
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
+                            QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines for
+ * the specified VOQ
+ */
+static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
+                                      u8 voq,
+                                      u16 cmdq_lines)
+{
+       u32 qm_line_crd;
+
+       /* In A0 - Limit the size of pbf queue so that only 511 commands with
+        * the minimum size of 4 (FCoE minimum size)
+        */
+       bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
+
+       if (is_bb_a0)
+               cmdq_lines = min_t(u32, cmdq_lines, 1022);
+       qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
+       OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+                        (u32)cmdq_lines);
+       STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
+       STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+                    qm_line_crd);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines. */
+static void qed_cmdq_lines_rt_init(
+       struct qed_hwfn *p_hwfn,
+       u8 max_ports_per_engine,
+       u8 max_phys_tcs_per_port,
+       struct init_qm_port_params port_params[MAX_NUM_PORTS])
+{
+       u8 tc, voq, port_id;
+
+       /* clear PBF lines for all VOQs */
+       for (voq = 0; voq < MAX_NUM_VOQS; voq++)
+               STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+       for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+               if (port_params[port_id].active) {
+                       u16 phys_lines, phys_lines_per_tc;
+                       u8 phys_tcs = port_params[port_id].num_active_phys_tcs;
+
+                       /* find #lines to divide between the active
+                        * physical TCs.
+                        */
+                       phys_lines = port_params[port_id].num_pbf_cmd_lines -
+                                    PBF_CMDQ_PURE_LB_LINES;
+                       /* find #lines per active physical TC */
+                       phys_lines_per_tc = phys_lines / phys_tcs;
+                       /* init registers per active TC */
+                       for (tc = 0; tc < phys_tcs; tc++) {
+                               voq = PHYS_VOQ(port_id, tc,
+                                              max_phys_tcs_per_port);
+                               qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
+                                                          phys_lines_per_tc);
+                       }
+                       /* init registers for pure LB TC */
+                       qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
+                                                  PBF_CMDQ_PURE_LB_LINES);
+               }
+       }
+}
+
+static void qed_btb_blocks_rt_init(
+       struct qed_hwfn *p_hwfn,
+       u8 max_ports_per_engine,
+       u8 max_phys_tcs_per_port,
+       struct init_qm_port_params port_params[MAX_NUM_PORTS])
+{
+       u32 usable_blocks, pure_lb_blocks, phys_blocks;
+       u8 tc, voq, port_id;
+
+       for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+               u32 temp;
+               u8 phys_tcs;
+
+               if (!port_params[port_id].active)
+                       continue;
+
+               phys_tcs = port_params[port_id].num_active_phys_tcs;
+
+               /* subtract headroom blocks */
+               usable_blocks = port_params[port_id].num_btb_blocks -
+                               BTB_HEADROOM_BLOCKS;
+
+               /* find blocks per physical TC. use factor to avoid
+                * floating arithmethic.
+                */
+               pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
+                                (phys_tcs * BTB_PURE_LB_FACTOR +
+                                 BTB_PURE_LB_RATIO);
+               pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
+                                      pure_lb_blocks / BTB_PURE_LB_FACTOR);
+               phys_blocks = (usable_blocks - pure_lb_blocks) / phys_tcs;
+
+               /* init physical TCs */
+               for (tc = 0; tc < phys_tcs; tc++) {
+                       voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port);
+                       STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
+                                    phys_blocks);
+               }
+
+               /* init pure LB TC */
+               temp = LB_VOQ(port_id);
+               STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
+                            pure_lb_blocks);
+       }
+}
+
+/* Prepare Tx PQ mapping runtime init values for the specified PF */
+static void qed_tx_pq_map_rt_init(
+       struct qed_hwfn *p_hwfn,
+       struct qed_ptt *p_ptt,
+       struct qed_qm_pf_rt_init_params *p_params,
+       u32 base_mem_addr_4kb)
+{
+       struct init_qm_vport_params *vport_params = p_params->vport_params;
+       u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+       u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
+       u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
+                           QM_PF_QUEUE_GROUP_SIZE;
+       bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
+       u16 i, pq_id, pq_group;
+
+       /* a bit per Tx PQ indicating if the PQ is associated with a VF */
+       u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+       u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
+       u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+       u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
+       u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
+       u32 mem_addr_4kb = base_mem_addr_4kb;
+
+       /* set mapping from PQ group to PF */
+       for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
+               STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
+                            (u32)(p_params->pf_id));
+       /* set PQ sizes */
+       STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
+                    QM_PQ_SIZE_256B(p_params->num_pf_cids));
+       STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
+                    QM_PQ_SIZE_256B(p_params->num_vf_cids));
+
+       /* go over all Tx PQs */
+       for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
+               u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
+                            p_params->max_phys_tcs_per_port);
+               bool is_vf_pq = (i >= p_params->num_pf_pqs);
+               struct qm_rf_pq_map tx_pq_map;
+
+               /* update first Tx PQ of VPORT/TC */
+               u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
+                                   p_params->start_vport;
+               u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
+               u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
+
+               if (first_tx_pq_id == QM_INVALID_PQ_ID) {
+                       /* create new VP PQ */
+                       pq_ids[p_params->pq_params[i].tc_id] = pq_id;
+                       first_tx_pq_id = pq_id;
+                       /* map VP PQ to VOQ and PF */
+                       STORE_RT_REG(p_hwfn,
+                                    QM_REG_WFQVPMAP_RT_OFFSET +
+                                    first_tx_pq_id,
+                                    (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+                                    (p_params->pf_id <<
+                                     QM_WFQ_VP_PQ_PF_SHIFT));
+               }
+               /* fill PQ map entry */
+               memset(&tx_pq_map, 0, sizeof(tx_pq_map));
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
+                         is_vf_pq ? 1 : 0);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
+                         is_vf_pq ? p_params->pq_params[i].vport_id : 0);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
+                         p_params->pq_params[i].wrr_group);
+               /* write PQ map entry to CAM */
+               STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
+                            *((u32 *)&tx_pq_map));
+               /* set base address */
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
+                            mem_addr_4kb);
+               /* check if VF PQ */
+               if (is_vf_pq) {
+                       /* if PQ is associated with a VF, add indication
+                        * to PQ VF mask
+                        */
+                       tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
+                               (1 << (pq_id % tx_pq_vf_mask_width));
+                       mem_addr_4kb += vport_pq_mem_4kb;
+               } else {
+                       mem_addr_4kb += pq_mem_4kb;
+               }
+       }
+
+       /* store Tx PQ VF mask to size select register */
+       for (i = 0; i < num_tx_pq_vf_masks; i++) {
+               if (tx_pq_vf_mask[i]) {
+                       if (is_bb_a0) {
+                               u32 curr_mask = 0, addr;
+
+                               addr = QM_REG_MAXPQSIZETXSEL_0 + (i * 4);
+                               if (!p_params->is_first_pf)
+                                       curr_mask = qed_rd(p_hwfn, p_ptt,
+                                                          addr);
+
+                               addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
+
+                               STORE_RT_REG(p_hwfn, addr,
+                                            curr_mask | tx_pq_vf_mask[i]);
+                       } else {
+                               u32 addr;
+
+                               addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
+                               STORE_RT_REG(p_hwfn, addr,
+                                            tx_pq_vf_mask[i]);
+                       }
+               }
+       }
+}
+
+/* Prepare Other PQ mapping runtime init values for the specified PF */
+static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+                                    u8 port_id,
+                                    u8 pf_id,
+                                    u32 num_pf_cids,
+                                    u32 num_tids,
+                                    u32 base_mem_addr_4kb)
+{
+       u16 i, pq_id;
+
+       /* a single other PQ group is used in each PF,
+        * where PQ group i is used in PF i.
+        */
+       u16 pq_group = pf_id;
+       u32 pq_size = num_pf_cids + num_tids;
+       u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+       u32 mem_addr_4kb = base_mem_addr_4kb;
+
+       /* map PQ group to PF */
+       STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
+                    (u32)(pf_id));
+       /* set PQ sizes */
+       STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
+                    QM_PQ_SIZE_256B(pq_size));
+       /* set base address */
+       for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
+            i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
+                            mem_addr_4kb);
+               mem_addr_4kb += pq_mem_4kb;
+       }
+}
+
+/* Prepare PF WFQ runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
+                             struct qed_qm_pf_rt_init_params *p_params)
+{
+       u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+       u32 crd_reg_offset;
+       u32 inc_val;
+       u16 i;
+
+       if (p_params->pf_id < MAX_NUM_PFS_BB)
+               crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
+       else
+               crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
+                                (p_params->pf_id % MAX_NUM_PFS_BB);
+
+       inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
+       if (inc_val > QM_WFQ_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+               return -1;
+       }
+       STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+                    inc_val);
+       STORE_RT_REG(p_hwfn,
+                    QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
+                    QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+
+       for (i = 0; i < num_tx_pqs; i++) {
+               u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
+                            p_params->max_phys_tcs_per_port);
+
+               OVERWRITE_RT_REG(p_hwfn,
+                                crd_reg_offset + voq * MAX_NUM_PFS_BB,
+                                QM_WFQ_INIT_CRD(inc_val) |
+                                QM_WFQ_CRD_REG_SIGN_BIT);
+       }
+
+       return 0;
+}
+
+/* Prepare PF RL runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn,
+                            u8 pf_id,
+                            u32 pf_rl)
+{
+       u32 inc_val = QM_RL_INC_VAL(pf_rl);
+
+       if (inc_val > QM_RL_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+               return -1;
+       }
+       STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+                    QM_RL_CRD_REG_SIGN_BIT);
+       STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+                    QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+       STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+       return 0;
+}
+
+/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
+static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
+                             u8 start_vport,
+                             u8 num_vports,
+                             struct init_qm_vport_params *vport_params)
+{
+       u8 tc, i, vport_id;
+       u32 inc_val;
+
+       /* go over all PF VPORTs */
+       for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
+               u32 temp = QM_REG_WFQVPUPPERBOUND_RT_OFFSET;
+               u16 *pq_ids = &vport_params[i].first_tx_pq_id[0];
+
+               if (!vport_params[i].vport_wfq)
+                       continue;
+
+               inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+               if (inc_val > QM_WFQ_MAX_INC_VAL) {
+                       DP_NOTICE(p_hwfn,
+                                 "Invalid VPORT WFQ weight configuration");
+                       return -1;
+               }
+
+               /* each VPORT can have several VPORT PQ IDs for
+                * different TCs
+                */
+               for (tc = 0; tc < NUM_OF_TCS; tc++) {
+                       u16 vport_pq_id = pq_ids[tc];
+
+                       if (vport_pq_id != QM_INVALID_PQ_ID) {
+                               STORE_RT_REG(p_hwfn,
+                                            QM_REG_WFQVPWEIGHT_RT_OFFSET +
+                                            vport_pq_id, inc_val);
+                               STORE_RT_REG(p_hwfn, temp + vport_pq_id,
+                                            QM_WFQ_UPPER_BOUND |
+                                            QM_WFQ_CRD_REG_SIGN_BIT);
+                               STORE_RT_REG(p_hwfn,
+                                            QM_REG_WFQVPCRD_RT_OFFSET +
+                                            vport_pq_id,
+                                            QM_WFQ_INIT_CRD(inc_val) |
+                                            QM_WFQ_CRD_REG_SIGN_BIT);
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
+                               u8 start_vport,
+                               u8 num_vports,
+                               struct init_qm_vport_params *vport_params)
+{
+       u8 i, vport_id;
+
+       /* go over all PF VPORTs */
+       for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
+               u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
+
+               if (inc_val > QM_RL_MAX_INC_VAL) {
+                       DP_NOTICE(p_hwfn,
+                                 "Invalid VPORT rate-limit configuration");
+                       return -1;
+               }
+
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+                            QM_RL_CRD_REG_SIGN_BIT);
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
+                            QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+                            inc_val);
+       }
+
+       return 0;
+}
+
+static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt)
+{
+       u32 reg_val, i;
+
+       for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+            i++) {
+               udelay(QM_STOP_CMD_POLL_PERIOD_US);
+               reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
+       }
+
+       /* check if timeout while waiting for SDM command ready */
+       if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                          "Timeout when waiting for QM SDM command ready signal\n");
+               return false;
+       }
+
+       return true;
+}
+
+static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           u32 cmd_addr,
+                           u32 cmd_data_lsb,
+                           u32 cmd_data_msb)
+{
+       if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
+               return false;
+
+       qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
+       qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
+       qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
+       qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
+       qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
+
+       return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
+}
+
+/******************** INTERFACE IMPLEMENTATION *********************/
+u32 qed_qm_pf_mem_size(u8 pf_id,
+                      u32 num_pf_cids,
+                      u32 num_vf_cids,
+                      u32 num_tids,
+                      u16 num_pf_pqs,
+                      u16 num_vf_pqs)
+{
+       return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
+              QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
+              QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
+}
+
+int qed_qm_common_rt_init(
+       struct qed_hwfn *p_hwfn,
+       struct qed_qm_common_rt_init_params *p_params)
+{
+       /* init AFullOprtnstcCrdMask */
+       u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
+                   QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
+                  (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
+                  (p_params->pf_wfq_en <<
+                   QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
+                  (p_params->vport_wfq_en <<
+                   QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
+                  (p_params->pf_rl_en <<
+                   QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
+                  (p_params->vport_rl_en <<
+                   QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
+                  (QM_OPPOR_FW_STOP_DEF <<
+                   QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
+                  (QM_OPPOR_PQ_EMPTY_DEF <<
+                   QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+
+       STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+       qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
+       qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
+       qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
+       qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
+       qed_cmdq_lines_rt_init(p_hwfn,
+                              p_params->max_ports_per_engine,
+                              p_params->max_phys_tcs_per_port,
+                              p_params->port_params);
+       qed_btb_blocks_rt_init(p_hwfn,
+                              p_params->max_ports_per_engine,
+                              p_params->max_phys_tcs_per_port,
+                              p_params->port_params);
+       return 0;
+}
+
+int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     struct qed_qm_pf_rt_init_params *p_params)
+{
+       struct init_qm_vport_params *vport_params = p_params->vport_params;
+       u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
+                                              p_params->num_tids) *
+                                QM_OTHER_PQS_PER_PF;
+       u8 tc, i;
+
+       /* clear first Tx PQ ID array for each VPORT */
+       for (i = 0; i < p_params->num_vports; i++)
+               for (tc = 0; tc < NUM_OF_TCS; tc++)
+                       vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
+
+       /* map Other PQs (if any) */
+       qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
+                                p_params->num_pf_cids, p_params->num_tids, 0);
+
+       /* map Tx PQs */
+       qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
+
+       if (p_params->pf_wfq)
+               if (qed_pf_wfq_rt_init(p_hwfn, p_params))
+                       return -1;
+
+       if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
+               return -1;
+
+       if (qed_vp_wfq_rt_init(p_hwfn, p_params->start_vport,
+                              p_params->num_vports, vport_params))
+               return -1;
+
+       if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
+                                p_params->num_vports, vport_params))
+               return -1;
+
+       return 0;
+}
+
+int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt,
+                  u8 pf_id,
+                  u32 pf_rl)
+{
+       u32 inc_val = QM_RL_INC_VAL(pf_rl);
+
+       if (inc_val > QM_RL_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+               return -1;
+       }
+
+       qed_wr(p_hwfn, p_ptt,
+              QM_REG_RLPFCRD + pf_id * 4,
+              QM_RL_CRD_REG_SIGN_BIT);
+       qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
+
+       return 0;
+}
+
+int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     u8 vport_id,
+                     u32 vport_rl)
+{
+       u32 inc_val = QM_RL_INC_VAL(vport_rl);
+
+       if (inc_val > QM_RL_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
+               return -1;
+       }
+
+       qed_wr(p_hwfn, p_ptt,
+              QM_REG_RLGLBLCRD + vport_id * 4,
+              QM_RL_CRD_REG_SIGN_BIT);
+       qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+
+       return 0;
+}
+
+bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt,
+                         bool is_release_cmd,
+                         bool is_tx_pq,
+                         u16 start_pq,
+                         u16 num_pqs)
+{
+       u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
+       u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
+
+       /* set command's PQ type */
+       QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
+
+       for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
+               /* set PQ bit in mask (stop command only) */
+               if (!is_release_cmd)
+                       pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+
+               /* if last PQ or end of PQ mask, write command */
+               if ((pq_id == last_pq) ||
+                   (pq_id % QM_STOP_PQ_MASK_WIDTH ==
+                    (QM_STOP_PQ_MASK_WIDTH - 1))) {
+                       QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+                                        PAUSE_MASK, pq_mask);
+                       QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+                                        GROUP_ID,
+                                        pq_id / QM_STOP_PQ_MASK_WIDTH);
+                       if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
+                                            cmd_arr[0], cmd_arr[1]))
+                               return false;
+                       pq_mask = 0;
+               }
+       }
+
+       return true;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
new file mode 100644 (file)
index 0000000..796f139
--- /dev/null
@@ -0,0 +1,531 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+#define QED_INIT_MAX_POLL_COUNT 100
+#define QED_INIT_POLL_PERIOD_US 500
+
+static u32 pxp_global_win[] = {
+       0,
+       0,
+       0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
+       0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
+       0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
+       0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
+       0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
+       0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
+       0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
+       0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
+       0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
+       0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+};
+
+void qed_init_iro_array(struct qed_dev *cdev)
+{
+       cdev->iro_arr = iro_arr;
+}
+
+/* Runtime configuration helpers */
+void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
+{
+       int i;
+
+       for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
+               p_hwfn->rt_data[i].b_valid = false;
+}
+
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
+                          u32 rt_offset,
+                          u32 val)
+{
+       p_hwfn->rt_data[rt_offset].init_val = val;
+       p_hwfn->rt_data[rt_offset].b_valid = true;
+}
+
+void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
+                          u32 rt_offset,
+                          u32 *val,
+                          size_t size)
+{
+       size_t i;
+
+       for (i = 0; i < size / sizeof(u32); i++) {
+               p_hwfn->rt_data[rt_offset + i].init_val = val[i];
+               p_hwfn->rt_data[rt_offset + i].b_valid = true;
+       }
+}
+
+static void qed_init_rt(struct qed_hwfn *p_hwfn,
+                       struct qed_ptt *p_ptt,
+                       u32 addr,
+                       u32 rt_offset,
+                       u32 size)
+{
+       struct qed_rt_data *rt_data = p_hwfn->rt_data + rt_offset;
+       u32 i;
+
+       for (i = 0; i < size; i++) {
+               if (!rt_data[i].b_valid)
+                       continue;
+               qed_wr(p_hwfn, p_ptt, addr + (i << 2), rt_data[i].init_val);
+       }
+}
+
+int qed_init_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_rt_data *rt_data;
+
+       rt_data = kzalloc(sizeof(*rt_data) * RUNTIME_ARRAY_SIZE, GFP_ATOMIC);
+       if (!rt_data)
+               return -ENOMEM;
+
+       p_hwfn->rt_data = rt_data;
+
+       return 0;
+}
+
+void qed_init_free(struct qed_hwfn *p_hwfn)
+{
+       kfree(p_hwfn->rt_data);
+       p_hwfn->rt_data = NULL;
+}
+
+static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
+                              struct qed_ptt *p_ptt,
+                              u32 addr,
+                              u32 dmae_data_offset,
+                              u32 size,
+                              const u32 *buf,
+                              bool b_must_dmae,
+                              bool b_can_dmae)
+{
+       int rc = 0;
+
+       /* Perform DMAE only for lengthy enough sections or for wide-bus */
+       if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
+               const u32 *data = buf + dmae_data_offset;
+               u32 i;
+
+               for (i = 0; i < size; i++)
+                       qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
+       } else {
+               rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+                                      (uintptr_t)(buf + dmae_data_offset),
+                                      addr, size, 0);
+       }
+
+       return rc;
+}
+
+static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt,
+                             u32 addr,
+                             u32 fill,
+                             u32 fill_count)
+{
+       static u32 zero_buffer[DMAE_MAX_RW_SIZE];
+
+       memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
+
+       /* invoke the DMAE virtual/physical buffer API with
+        * 1. DMAE init channel
+        * 2. addr,
+        * 3. p_hwfb->temp_data,
+        * 4. fill_count
+        */
+
+       return qed_dmae_host2grc(p_hwfn, p_ptt,
+                                (uintptr_t)(&zero_buffer[0]),
+                                addr, fill_count,
+                                QED_DMAE_FLAG_RW_REPL_SRC);
+}
+
+static void qed_init_fill(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt,
+                         u32 addr,
+                         u32 fill,
+                         u32 fill_count)
+{
+       u32 i;
+
+       for (i = 0; i < fill_count; i++, addr += sizeof(u32))
+               qed_wr(p_hwfn, p_ptt, addr, fill);
+}
+
+static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt,
+                             struct init_write_op *cmd,
+                             bool b_must_dmae,
+                             bool b_can_dmae)
+{
+       u32 data = le32_to_cpu(cmd->data);
+       u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+       u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
+       u32 offset, output_len, input_len, max_size;
+       struct qed_dev *cdev = p_hwfn->cdev;
+       union init_array_hdr *hdr;
+       const u32 *array_data;
+       int rc = 0;
+       u32 size;
+
+       array_data = cdev->fw_data->arr_data;
+
+       hdr = (union init_array_hdr *)(array_data +
+                                      dmae_array_offset);
+       data = le32_to_cpu(hdr->raw.data);
+       switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
+       case INIT_ARR_ZIPPED:
+               offset = dmae_array_offset + 1;
+               input_len = GET_FIELD(data,
+                                     INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
+               max_size = MAX_ZIPPED_SIZE * 4;
+               memset(p_hwfn->unzip_buf, 0, max_size);
+
+               output_len = qed_unzip_data(p_hwfn, input_len,
+                                           (u8 *)&array_data[offset],
+                                           max_size, (u8 *)p_hwfn->unzip_buf);
+               if (output_len) {
+                       rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
+                                                output_len,
+                                                p_hwfn->unzip_buf,
+                                                b_must_dmae, b_can_dmae);
+               } else {
+                       DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
+                       rc = -EINVAL;
+               }
+               break;
+       case INIT_ARR_PATTERN:
+       {
+               u32 repeats = GET_FIELD(data,
+                                       INIT_ARRAY_PATTERN_HDR_REPETITIONS);
+               u32 i;
+
+               size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
+
+               for (i = 0; i < repeats; i++, addr += size << 2) {
+                       rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
+                                                dmae_array_offset + 1,
+                                                size, array_data,
+                                                b_must_dmae, b_can_dmae);
+                       if (rc)
+                               break;
+               }
+               break;
+       }
+       case INIT_ARR_STANDARD:
+               size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
+               rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
+                                        dmae_array_offset + 1,
+                                        size, array_data,
+                                        b_must_dmae, b_can_dmae);
+               break;
+       }
+
+       return rc;
+}
+
+/* init_ops write command */
+static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt,
+                          struct init_write_op *cmd,
+                          bool b_can_dmae)
+{
+       u32 data = le32_to_cpu(cmd->data);
+       u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+       bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
+       union init_write_args *arg = &cmd->args;
+       int rc = 0;
+
+       /* Sanitize */
+       if (b_must_dmae && !b_can_dmae) {
+               DP_NOTICE(p_hwfn,
+                         "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
+                         addr);
+               return -EINVAL;
+       }
+
+       switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
+       case INIT_SRC_INLINE:
+               qed_wr(p_hwfn, p_ptt, addr,
+                      le32_to_cpu(arg->inline_val));
+               break;
+       case INIT_SRC_ZEROS:
+               if (b_must_dmae ||
+                   (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
+                       rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
+                                               le32_to_cpu(arg->zeros_count));
+               else
+                       qed_init_fill(p_hwfn, p_ptt, addr, 0,
+                                     le32_to_cpu(arg->zeros_count));
+               break;
+       case INIT_SRC_ARRAY:
+               rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
+                                       b_must_dmae, b_can_dmae);
+               break;
+       case INIT_SRC_RUNTIME:
+               qed_init_rt(p_hwfn, p_ptt, addr,
+                           le16_to_cpu(arg->runtime.offset),
+                           le16_to_cpu(arg->runtime.size));
+               break;
+       }
+
+       return rc;
+}
+
+static inline bool comp_eq(u32 val, u32 expected_val)
+{
+       return val == expected_val;
+}
+
+static inline bool comp_and(u32 val, u32 expected_val)
+{
+       return (val & expected_val) == expected_val;
+}
+
+static inline bool comp_or(u32 val, u32 expected_val)
+{
+       return (val | expected_val) > 0;
+}
+
+/* init_ops read/poll commands */
+static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           struct init_read_op *cmd)
+{
+       u32 data = le32_to_cpu(cmd->op_data);
+       u32 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
+
+       bool    (*comp_check)(u32       val,
+                             u32       expected_val);
+       u32     delay = QED_INIT_POLL_PERIOD_US, val;
+
+       val = qed_rd(p_hwfn, p_ptt, addr);
+
+       data = le32_to_cpu(cmd->op_data);
+       if (GET_FIELD(data, INIT_READ_OP_POLL)) {
+               int i;
+
+               switch (GET_FIELD(data, INIT_READ_OP_POLL_COMP)) {
+               case INIT_COMPARISON_EQ:
+                       comp_check = comp_eq;
+                       break;
+               case INIT_COMPARISON_OR:
+                       comp_check = comp_or;
+                       break;
+               case INIT_COMPARISON_AND:
+                       comp_check = comp_and;
+                       break;
+               default:
+                       comp_check = NULL;
+                       DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
+                              data);
+                       return;
+               }
+
+               for (i = 0;
+                    i < QED_INIT_MAX_POLL_COUNT &&
+                    !comp_check(val, le32_to_cpu(cmd->expected_val));
+                    i++) {
+                       udelay(delay);
+                       val = qed_rd(p_hwfn, p_ptt, addr);
+               }
+
+               if (i == QED_INIT_MAX_POLL_COUNT)
+                       DP_ERR(p_hwfn,
+                              "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
+                              addr, le32_to_cpu(cmd->expected_val),
+                              val, data);
+       }
+}
+
+/* init_ops callbacks entry point */
+static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           struct init_callback_op *p_cmd)
+{
+       DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
+}
+
+static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
+                                 u16 *offset,
+                                 int modes)
+{
+       struct qed_dev *cdev = p_hwfn->cdev;
+       const u8 *modes_tree_buf;
+       u8 arg1, arg2, tree_val;
+
+       modes_tree_buf = cdev->fw_data->modes_tree_buf;
+       tree_val = modes_tree_buf[(*offset)++];
+       switch (tree_val) {
+       case INIT_MODE_OP_NOT:
+               return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
+       case INIT_MODE_OP_OR:
+               arg1    = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+               arg2    = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+               return arg1 | arg2;
+       case INIT_MODE_OP_AND:
+               arg1    = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+               arg2    = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+               return arg1 & arg2;
+       default:
+               tree_val -= MAX_INIT_MODE_OPS;
+               return (modes & (1 << tree_val)) ? 1 : 0;
+       }
+}
+
+static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
+                            struct init_if_mode_op *p_cmd,
+                            int modes)
+{
+       u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
+
+       if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
+               return 0;
+       else
+               return GET_FIELD(le32_to_cpu(p_cmd->op_data),
+                                INIT_IF_MODE_OP_CMD_OFFSET);
+}
+
+static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
+                             struct init_if_phase_op *p_cmd,
+                             u32 phase,
+                             u32 phase_id)
+{
+       u32 data = le32_to_cpu(p_cmd->phase_data);
+       u32 op_data = le32_to_cpu(p_cmd->op_data);
+
+       if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
+             (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
+              GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
+               return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
+       else
+               return 0;
+}
+
+int qed_init_run(struct qed_hwfn *p_hwfn,
+                struct qed_ptt *p_ptt,
+                int phase,
+                int phase_id,
+                int modes)
+{
+       struct qed_dev *cdev = p_hwfn->cdev;
+       u32 cmd_num, num_init_ops;
+       union init_op *init_ops;
+       bool b_dmae = false;
+       int rc = 0;
+
+       num_init_ops = cdev->fw_data->init_ops_size;
+       init_ops = cdev->fw_data->init_ops;
+
+       p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
+       if (!p_hwfn->unzip_buf) {
+               DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n");
+               return -ENOMEM;
+       }
+
+       for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
+               union init_op *cmd = &init_ops[cmd_num];
+               u32 data = le32_to_cpu(cmd->raw.op_data);
+
+               switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
+               case INIT_OP_WRITE:
+                       rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
+                                            b_dmae);
+                       break;
+               case INIT_OP_READ:
+                       qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
+                       break;
+               case INIT_OP_IF_MODE:
+                       cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
+                                                    modes);
+                       break;
+               case INIT_OP_IF_PHASE:
+                       cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
+                                                     phase, phase_id);
+                       b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
+                       break;
+               case INIT_OP_DELAY:
+                       /* qed_init_run is always invoked from
+                        * sleep-able context
+                        */
+                       udelay(le32_to_cpu(cmd->delay.delay));
+                       break;
+
+               case INIT_OP_CALLBACK:
+                       qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+                       break;
+               }
+
+               if (rc)
+                       break;
+       }
+
+       kfree(p_hwfn->unzip_buf);
+       return rc;
+}
+
+void qed_gtt_init(struct qed_hwfn *p_hwfn)
+{
+       u32 gtt_base;
+       u32 i;
+
+       /* Set the global windows */
+       gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
+
+       for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
+               if (pxp_global_win[i])
+                       REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
+                              pxp_global_win[i]);
+}
+
+int qed_init_fw_data(struct qed_dev *cdev,
+                    const u8 *data)
+{
+       struct qed_fw_data *fw = cdev->fw_data;
+       struct bin_buffer_hdr *buf_hdr;
+       u32 offset, len;
+
+       if (!data) {
+               DP_NOTICE(cdev, "Invalid fw data\n");
+               return -EINVAL;
+       }
+
+       buf_hdr = (struct bin_buffer_hdr *)data;
+
+       offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
+       fw->init_ops = (union init_op *)(data + offset);
+
+       offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
+       fw->arr_data = (u32 *)(data + offset);
+
+       offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
+       fw->modes_tree_buf = (u8 *)(data + offset);
+       len = buf_hdr[BIN_BUF_INIT_CMD].length;
+       fw->init_ops_size = len / sizeof(struct init_raw_op);
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
new file mode 100644 (file)
index 0000000..1e83204
--- /dev/null
@@ -0,0 +1,110 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_INIT_OPS_H
+#define _QED_INIT_OPS_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+/**
+ * @brief qed_init_iro_array - init iro_arr.
+ *
+ *
+ * @param cdev
+ */
+void qed_init_iro_array(struct qed_dev *cdev);
+
+/**
+ * @brief qed_init_run - Run the init-sequence.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param phase
+ * @param phase_id
+ * @param modes
+ * @return _qed_status_t
+ */
+int qed_init_run(struct qed_hwfn *p_hwfn,
+                struct qed_ptt *p_ptt,
+                int phase,
+                int phase_id,
+                int modes);
+
+/**
+ * @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
+ *
+ *
+ * @param p_hwfn
+ *
+ * @return _qed_status_t
+ */
+int qed_init_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_hwfn_deallocate
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_init_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_clear_rt_data - Clears the runtime init array.
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ */
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
+                          u32 rt_offset,
+                          u32 val);
+
+#define STORE_RT_REG(hwfn, offset, val)        \
+       qed_init_store_rt_reg(hwfn, offset, val)
+
+#define OVERWRITE_RT_REG(hwfn, offset, val) \
+       qed_init_store_rt_reg(hwfn, offset, val)
+
+/**
+ * @brief
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ * @param size
+ */
+void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
+                          u32 rt_offset,
+                          u32 *val,
+                          size_t size);
+
+#define STORE_RT_REG_AGG(hwfn, offset, val) \
+       qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+
+/**
+ * @brief
+ *      Initialize GTT global windows and set admin window
+ *      related params of GTT/PTT to default values.
+ *
+ * @param p_hwfn
+ */
+void qed_gtt_init(struct qed_hwfn *p_hwfn);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
new file mode 100644 (file)
index 0000000..2e399b6
--- /dev/null
@@ -0,0 +1,1134 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+struct qed_pi_info {
+       qed_int_comp_cb_t       comp_cb;
+       void                    *cookie;
+};
+
+struct qed_sb_sp_info {
+       struct qed_sb_info      sb_info;
+
+       /* per protocol index data */
+       struct qed_pi_info      pi_info_arr[PIS_PER_SB];
+};
+
+#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
+       ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
+
+#define ATTN_STATE_BITS (0xfff)
+#define ATTN_BITS_MASKABLE      (0x3ff)
+struct qed_sb_attn_info {
+       /* Virtual & Physical address of the SB */
+       struct atten_status_block       *sb_attn;
+       dma_addr_t                    sb_phys;
+
+       /* Last seen running index */
+       u16                          index;
+
+       /* Previously asserted attentions, which are still unasserted */
+       u16                          known_attn;
+
+       /* Cleanup address for the link's general hw attention */
+       u32                          mfw_attn_addr;
+};
+
+static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
+                                     struct qed_sb_attn_info   *p_sb_desc)
+{
+       u16     rc = 0;
+       u16     index;
+
+       /* Make certain HW write took affect */
+       mmiowb();
+
+       index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
+       if (p_sb_desc->index != index) {
+               p_sb_desc->index        = index;
+               rc                    = QED_SB_ATT_IDX;
+       }
+
+       /* Make certain we got a consistent view with HW */
+       mmiowb();
+
+       return rc;
+}
+
+/**
+ *  @brief qed_int_assertion - handles asserted attention bits
+ *
+ *  @param p_hwfn
+ *  @param asserted_bits newly asserted bits
+ *  @return int
+ */
+static int qed_int_assertion(struct qed_hwfn *p_hwfn,
+                            u16 asserted_bits)
+{
+       struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+       u32 igu_mask;
+
+       /* Mask the source of the attention in the IGU */
+       igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                         IGU_REG_ATTENTION_ENABLE);
+       DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
+                  igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
+       igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
+       qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+                  "inner known ATTN state: 0x%04x --> 0x%04x\n",
+                  sb_attn_sw->known_attn,
+                  sb_attn_sw->known_attn | asserted_bits);
+       sb_attn_sw->known_attn |= asserted_bits;
+
+       /* Handle MCP events */
+       if (asserted_bits & 0x100) {
+               qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
+               /* Clean the MCP attention */
+               qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+                      sb_attn_sw->mfw_attn_addr, 0);
+       }
+
+       DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
+                     GTT_BAR0_MAP_REG_IGU_CMD +
+                     ((IGU_CMD_ATTN_BIT_SET_UPPER -
+                       IGU_CMD_INT_ACK_BASE) << 3),
+                     (u32)asserted_bits);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n",
+                  asserted_bits);
+
+       return 0;
+}
+
+/**
+ * @brief - handles deassertion of previously asserted attentions.
+ *
+ * @param p_hwfn
+ * @param deasserted_bits - newly deasserted bits
+ * @return int
+ *
+ */
+static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
+                              u16 deasserted_bits)
+{
+       struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+       u32 aeu_mask;
+
+       if (deasserted_bits != 0x100)
+               DP_ERR(p_hwfn, "Unexpected - non-link deassertion\n");
+
+       /* Clear IGU indication for the deasserted bits */
+       DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
+                     GTT_BAR0_MAP_REG_IGU_CMD +
+                     ((IGU_CMD_ATTN_BIT_CLR_UPPER -
+                       IGU_CMD_INT_ACK_BASE) << 3),
+                     ~((u32)deasserted_bits));
+
+       /* Unmask deasserted attentions in IGU */
+       aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                         IGU_REG_ATTENTION_ENABLE);
+       aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
+       qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
+
+       /* Clear deassertion from inner state */
+       sb_attn_sw->known_attn &= ~deasserted_bits;
+
+       return 0;
+}
+
+static int qed_int_attentions(struct qed_hwfn *p_hwfn)
+{
+       struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
+       struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
+       u32 attn_bits = 0, attn_acks = 0;
+       u16 asserted_bits, deasserted_bits;
+       __le16 index;
+       int rc = 0;
+
+       /* Read current attention bits/acks - safeguard against attentions
+        * by guaranting work on a synchronized timeframe
+        */
+       do {
+               index = p_sb_attn->sb_index;
+               attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
+               attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
+       } while (index != p_sb_attn->sb_index);
+       p_sb_attn->sb_index = index;
+
+       /* Attention / Deassertion are meaningful (and in correct state)
+        * only when they differ and consistent with known state - deassertion
+        * when previous attention & current ack, and assertion when current
+        * attention with no previous attention
+        */
+       asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
+               ~p_sb_attn_sw->known_attn;
+       deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
+               p_sb_attn_sw->known_attn;
+
+       if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) {
+               DP_INFO(p_hwfn,
+                       "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
+                       index, attn_bits, attn_acks, asserted_bits,
+                       deasserted_bits, p_sb_attn_sw->known_attn);
+       } else if (asserted_bits == 0x100) {
+               DP_INFO(p_hwfn,
+                       "MFW indication via attention\n");
+       } else {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+                          "MFW indication [deassertion]\n");
+       }
+
+       if (asserted_bits) {
+               rc = qed_int_assertion(p_hwfn, asserted_bits);
+               if (rc)
+                       return rc;
+       }
+
+       if (deasserted_bits) {
+               rc = qed_int_deassertion(p_hwfn, deasserted_bits);
+               if (rc)
+                       return rc;
+       }
+
+       return rc;
+}
+
+static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
+                           void __iomem *igu_addr,
+                           u32 ack_cons)
+{
+       struct igu_prod_cons_update igu_ack = { 0 };
+
+       igu_ack.sb_id_and_flags =
+               ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+                (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+                (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+                (IGU_SEG_ACCESS_ATTN <<
+                 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+       DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags);
+
+       /* Both segments (interrupts & acks) are written to same place address;
+        * Need to guarantee all commands will be received (in-order) by HW.
+        */
+       mmiowb();
+       barrier();
+}
+
+void qed_int_sp_dpc(unsigned long hwfn_cookie)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie;
+       struct qed_pi_info *pi_info = NULL;
+       struct qed_sb_attn_info *sb_attn;
+       struct qed_sb_info *sb_info;
+       int arr_size;
+       u16 rc = 0;
+
+       if (!p_hwfn) {
+               DP_ERR(p_hwfn->cdev, "DPC called - no hwfn!\n");
+               return;
+       }
+
+       if (!p_hwfn->p_sp_sb) {
+               DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n");
+               return;
+       }
+
+       sb_info = &p_hwfn->p_sp_sb->sb_info;
+       arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
+       if (!sb_info) {
+               DP_ERR(p_hwfn->cdev,
+                      "Status block is NULL - cannot ack interrupts\n");
+               return;
+       }
+
+       if (!p_hwfn->p_sb_attn) {
+               DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn");
+               return;
+       }
+       sb_attn = p_hwfn->p_sb_attn;
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
+                  p_hwfn, p_hwfn->my_id);
+
+       /* Disable ack for def status block. Required both for msix +
+        * inta in non-mask mode, in inta does no harm.
+        */
+       qed_sb_ack(sb_info, IGU_INT_DISABLE, 0);
+
+       /* Gather Interrupts/Attentions information */
+       if (!sb_info->sb_virt) {
+               DP_ERR(
+                       p_hwfn->cdev,
+                       "Interrupt Status block is NULL - cannot check for new interrupts!\n");
+       } else {
+               u32 tmp_index = sb_info->sb_ack;
+
+               rc = qed_sb_update_sb_idx(sb_info);
+               DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
+                          "Interrupt indices: 0x%08x --> 0x%08x\n",
+                          tmp_index, sb_info->sb_ack);
+       }
+
+       if (!sb_attn || !sb_attn->sb_attn) {
+               DP_ERR(
+                       p_hwfn->cdev,
+                       "Attentions Status block is NULL - cannot check for new attentions!\n");
+       } else {
+               u16 tmp_index = sb_attn->index;
+
+               rc |= qed_attn_update_idx(p_hwfn, sb_attn);
+               DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
+                          "Attention indices: 0x%08x --> 0x%08x\n",
+                          tmp_index, sb_attn->index);
+       }
+
+       /* Check if we expect interrupts at this time. if not just ack them */
+       if (!(rc & QED_SB_EVENT_MASK)) {
+               qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+               return;
+       }
+
+       /* Check the validity of the DPC ptt. If not ack interrupts and fail */
+       if (!p_hwfn->p_dpc_ptt) {
+               DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n");
+               qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+               return;
+       }
+
+       if (rc & QED_SB_ATT_IDX)
+               qed_int_attentions(p_hwfn);
+
+       if (rc & QED_SB_IDX) {
+               int pi;
+
+               /* Look for a free index */
+               for (pi = 0; pi < arr_size; pi++) {
+                       pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
+                       if (pi_info->comp_cb)
+                               pi_info->comp_cb(p_hwfn, pi_info->cookie);
+               }
+       }
+
+       if (sb_attn && (rc & QED_SB_ATT_IDX))
+               /* This should be done before the interrupts are enabled,
+                * since otherwise a new attention will be generated.
+                */
+               qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
+
+       qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+}
+
+static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
+{
+       struct qed_dev *cdev   = p_hwfn->cdev;
+       struct qed_sb_attn_info *p_sb   = p_hwfn->p_sb_attn;
+
+       if (p_sb) {
+               if (p_sb->sb_attn)
+                       dma_free_coherent(&cdev->pdev->dev,
+                                         SB_ATTN_ALIGNED_SIZE(p_hwfn),
+                                         p_sb->sb_attn,
+                                         p_sb->sb_phys);
+               kfree(p_sb);
+       }
+}
+
+static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt)
+{
+       struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+
+       memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
+
+       sb_info->index = 0;
+       sb_info->known_attn = 0;
+
+       /* Configure Attention Status Block in IGU */
+       qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
+              lower_32_bits(p_hwfn->p_sb_attn->sb_phys));
+       qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
+              upper_32_bits(p_hwfn->p_sb_attn->sb_phys));
+}
+
+static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt,
+                                void *sb_virt_addr,
+                                dma_addr_t sb_phy_addr)
+{
+       struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+
+       sb_info->sb_attn = sb_virt_addr;
+       sb_info->sb_phys = sb_phy_addr;
+
+       /* Set the address of cleanup for the mcp attention */
+       sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
+                                MISC_REG_AEU_GENERAL_ATTN_0;
+
+       qed_int_sb_attn_setup(p_hwfn, p_ptt);
+}
+
+static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt)
+{
+       struct qed_dev *cdev = p_hwfn->cdev;
+       struct qed_sb_attn_info *p_sb;
+       void *p_virt;
+       dma_addr_t p_phys = 0;
+
+       /* SB struct */
+       p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC);
+       if (!p_sb) {
+               DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n");
+               return -ENOMEM;
+       }
+
+       /* SB ring  */
+       p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+                                   SB_ATTN_ALIGNED_SIZE(p_hwfn),
+                                   &p_phys, GFP_KERNEL);
+
+       if (!p_virt) {
+               DP_NOTICE(cdev, "Failed to allocate status block (attentions)\n");
+               kfree(p_sb);
+               return -ENOMEM;
+       }
+
+       /* Attention setup */
+       p_hwfn->p_sb_attn = p_sb;
+       qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
+
+       return 0;
+}
+
+/* coalescing timeout = timeset << (timer_res + 1) */
+#define QED_CAU_DEF_RX_USECS 24
+#define QED_CAU_DEF_TX_USECS 48
+
+void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
+                          struct cau_sb_entry *p_sb_entry,
+                          u8 pf_id,
+                          u16 vf_number,
+                          u8 vf_valid)
+{
+       u32 cau_state;
+
+       memset(p_sb_entry, 0, sizeof(*p_sb_entry));
+
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
+
+       /* setting the time resultion to a fixed value ( = 1) */
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
+                 QED_CAU_DEF_RX_TIMER_RES);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
+                 QED_CAU_DEF_TX_TIMER_RES);
+
+       cau_state = CAU_HC_DISABLE_STATE;
+
+       if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+               cau_state = CAU_HC_ENABLE_STATE;
+               if (!p_hwfn->cdev->rx_coalesce_usecs)
+                       p_hwfn->cdev->rx_coalesce_usecs =
+                               QED_CAU_DEF_RX_USECS;
+               if (!p_hwfn->cdev->tx_coalesce_usecs)
+                       p_hwfn->cdev->tx_coalesce_usecs =
+                               QED_CAU_DEF_TX_USECS;
+       }
+
+       SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
+       SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
+}
+
+void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt,
+                        dma_addr_t sb_phys,
+                        u16 igu_sb_id,
+                        u16 vf_number,
+                        u8 vf_valid)
+{
+       struct cau_sb_entry sb_entry;
+       u32 val;
+
+       qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
+                             vf_number, vf_valid);
+
+       if (p_hwfn->hw_init_done) {
+               val = CAU_REG_SB_ADDR_MEMORY + igu_sb_id * sizeof(u64);
+               qed_wr(p_hwfn, p_ptt, val, lower_32_bits(sb_phys));
+               qed_wr(p_hwfn, p_ptt, val + sizeof(u32),
+                      upper_32_bits(sb_phys));
+
+               val = CAU_REG_SB_VAR_MEMORY + igu_sb_id * sizeof(u64);
+               qed_wr(p_hwfn, p_ptt, val, sb_entry.data);
+               qed_wr(p_hwfn, p_ptt, val + sizeof(u32), sb_entry.params);
+       } else {
+               /* Initialize Status Block Address */
+               STORE_RT_REG_AGG(p_hwfn,
+                                CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
+                                igu_sb_id * 2,
+                                sb_phys);
+
+               STORE_RT_REG_AGG(p_hwfn,
+                                CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
+                                igu_sb_id * 2,
+                                sb_entry);
+       }
+
+       /* Configure pi coalescing if set */
+       if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+               u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >>
+                            (QED_CAU_DEF_RX_TIMER_RES + 1);
+               u8 num_tc = 1, i;
+
+               qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
+                                   QED_COAL_RX_STATE_MACHINE,
+                                   timeset);
+
+               timeset = p_hwfn->cdev->tx_coalesce_usecs >>
+                         (QED_CAU_DEF_TX_TIMER_RES + 1);
+
+               for (i = 0; i < num_tc; i++) {
+                       qed_int_cau_conf_pi(p_hwfn, p_ptt,
+                                           igu_sb_id, TX_PI(i),
+                                           QED_COAL_TX_STATE_MACHINE,
+                                           timeset);
+               }
+       }
+}
+
+void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt,
+                        u16 igu_sb_id,
+                        u32 pi_index,
+                        enum qed_coalescing_fsm coalescing_fsm,
+                        u8 timeset)
+{
+       struct cau_pi_entry pi_entry;
+       u32 sb_offset;
+       u32 pi_offset;
+
+       sb_offset = igu_sb_id * PIS_PER_SB;
+       memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
+
+       SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+       if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
+               SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
+       else
+               SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
+
+       pi_offset = sb_offset + pi_index;
+       if (p_hwfn->hw_init_done) {
+               qed_wr(p_hwfn, p_ptt,
+                      CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
+                      *((u32 *)&(pi_entry)));
+       } else {
+               STORE_RT_REG(p_hwfn,
+                            CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+                            *((u32 *)&(pi_entry)));
+       }
+}
+
+void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     struct qed_sb_info *sb_info)
+{
+       /* zero status block and ack counter */
+       sb_info->sb_ack = 0;
+       memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+       qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
+                           sb_info->igu_sb_id, 0, 0);
+}
+
+/**
+ * @brief qed_get_igu_sb_id - given a sw sb_id return the
+ *        igu_sb_id
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return u16
+ */
+static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
+                            u16 sb_id)
+{
+       u16 igu_sb_id;
+
+       /* Assuming continuous set of IGU SBs dedicated for given PF */
+       if (sb_id == QED_SP_SB_ID)
+               igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+       else
+               igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
+                  (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
+
+       return igu_sb_id;
+}
+
+int qed_int_sb_init(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt,
+                   struct qed_sb_info *sb_info,
+                   void *sb_virt_addr,
+                   dma_addr_t sb_phy_addr,
+                   u16 sb_id)
+{
+       sb_info->sb_virt = sb_virt_addr;
+       sb_info->sb_phys = sb_phy_addr;
+
+       sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
+
+       if (sb_id != QED_SP_SB_ID) {
+               p_hwfn->sbs_info[sb_id] = sb_info;
+               p_hwfn->num_sbs++;
+       }
+
+       sb_info->cdev = p_hwfn->cdev;
+
+       /* The igu address will hold the absolute address that needs to be
+        * written to for a specific status block
+        */
+       sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+                                         GTT_BAR0_MAP_REG_IGU_CMD +
+                                         (sb_info->igu_sb_id << 3);
+
+       sb_info->flags |= QED_SB_INFO_INIT;
+
+       qed_int_sb_setup(p_hwfn, p_ptt, sb_info);
+
+       return 0;
+}
+
+int qed_int_sb_release(struct qed_hwfn *p_hwfn,
+                      struct qed_sb_info *sb_info,
+                      u16 sb_id)
+{
+       if (sb_id == QED_SP_SB_ID) {
+               DP_ERR(p_hwfn, "Do Not free sp sb using this function");
+               return -EINVAL;
+       }
+
+       /* zero status block and ack counter */
+       sb_info->sb_ack = 0;
+       memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+       p_hwfn->sbs_info[sb_id] = NULL;
+       p_hwfn->num_sbs--;
+
+       return 0;
+}
+
+static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
+{
+       struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
+
+       if (p_sb) {
+               if (p_sb->sb_info.sb_virt)
+                       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                         SB_ALIGNED_SIZE(p_hwfn),
+                                         p_sb->sb_info.sb_virt,
+                                         p_sb->sb_info.sb_phys);
+               kfree(p_sb);
+       }
+}
+
+static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
+                              struct qed_ptt *p_ptt)
+{
+       struct qed_sb_sp_info *p_sb;
+       dma_addr_t p_phys = 0;
+       void *p_virt;
+
+       /* SB struct */
+       p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC);
+       if (!p_sb) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n");
+               return -ENOMEM;
+       }
+
+       /* SB ring  */
+       p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                   SB_ALIGNED_SIZE(p_hwfn),
+                                   &p_phys, GFP_KERNEL);
+       if (!p_virt) {
+               DP_NOTICE(p_hwfn, "Failed to allocate status block\n");
+               kfree(p_sb);
+               return -ENOMEM;
+       }
+
+       /* Status Block setup */
+       p_hwfn->p_sp_sb = p_sb;
+       qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt,
+                       p_phys, QED_SP_SB_ID);
+
+       memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
+
+       return 0;
+}
+
+static void qed_int_sp_sb_setup(struct qed_hwfn *p_hwfn,
+                               struct qed_ptt *p_ptt)
+{
+       if (!p_hwfn)
+               return;
+
+       if (p_hwfn->p_sp_sb)
+               qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
+       else
+               DP_NOTICE(p_hwfn->cdev,
+                         "Failed to setup Slow path status block - NULL pointer\n");
+
+       if (p_hwfn->p_sb_attn)
+               qed_int_sb_attn_setup(p_hwfn, p_ptt);
+       else
+               DP_NOTICE(p_hwfn->cdev,
+                         "Failed to setup attentions status block - NULL pointer\n");
+}
+
+int qed_int_register_cb(struct qed_hwfn *p_hwfn,
+                       qed_int_comp_cb_t comp_cb,
+                       void *cookie,
+                       u8 *sb_idx,
+                       __le16 **p_fw_cons)
+{
+       struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+       int qed_status = -ENOMEM;
+       u8 pi;
+
+       /* Look for a free index */
+       for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
+               if (!p_sp_sb->pi_info_arr[pi].comp_cb) {
+                       p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
+                       p_sp_sb->pi_info_arr[pi].cookie = cookie;
+                       *sb_idx = pi;
+                       *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
+                       qed_status = 0;
+                       break;
+               }
+       }
+
+       return qed_status;
+}
+
+int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
+{
+       struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+       int qed_status = -ENOMEM;
+
+       if (p_sp_sb->pi_info_arr[pi].comp_cb) {
+               p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
+               p_sp_sb->pi_info_arr[pi].cookie = NULL;
+               qed_status = 0;
+       }
+
+       return qed_status;
+}
+
+u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
+{
+       return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
+}
+
+void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           enum qed_int_mode int_mode)
+{
+       u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
+
+       p_hwfn->cdev->int_mode = int_mode;
+       switch (p_hwfn->cdev->int_mode) {
+       case QED_INT_MODE_INTA:
+               igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
+               igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+               break;
+
+       case QED_INT_MODE_MSI:
+               igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+               igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+               break;
+
+       case QED_INT_MODE_MSIX:
+               igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+               break;
+       case QED_INT_MODE_POLL:
+               break;
+       }
+
+       qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
+}
+
+void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
+                       struct qed_ptt *p_ptt,
+                       enum qed_int_mode int_mode)
+{
+       int i;
+
+       p_hwfn->b_int_enabled = 1;
+
+       /* Mask non-link attentions */
+       for (i = 0; i < 9; i++)
+               qed_wr(p_hwfn, p_ptt,
+                      MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0);
+
+       /* Enable interrupt Generation */
+       qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
+
+       /* Configure AEU signal change to produce attentions for link */
+       qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
+       qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
+
+       /* Flush the writes to IGU */
+       mmiowb();
+
+       /* Unmask AEU signals toward IGU */
+       qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
+}
+
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
+                            struct qed_ptt *p_ptt)
+{
+       p_hwfn->b_int_enabled = 0;
+
+       qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
+}
+
+#define IGU_CLEANUP_SLEEP_LENGTH                (1000)
+void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           u32 sb_id,
+                           bool cleanup_set,
+                           u16 opaque_fid
+                           )
+{
+       u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
+       u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
+       u32 data = 0;
+       u32 cmd_ctrl = 0;
+       u32 val = 0;
+       u32 sb_bit = 0;
+       u32 sb_bit_addr = 0;
+
+       /* Set the data field */
+       SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
+       SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0);
+       SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
+
+       /* Set the control register */
+       SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
+       SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
+       SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
+
+       qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
+
+       barrier();
+
+       qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
+
+       /* Flush the write to IGU */
+       mmiowb();
+
+       /* calculate where to read the status bit from */
+       sb_bit = 1 << (sb_id % 32);
+       sb_bit_addr = sb_id / 32 * sizeof(u32);
+
+       sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
+
+       /* Now wait for the command to complete */
+       do {
+               val = qed_rd(p_hwfn, p_ptt, sb_bit_addr);
+
+               if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
+                       break;
+
+               usleep_range(5000, 10000);
+       } while (--sleep_cnt);
+
+       if (!sleep_cnt)
+               DP_NOTICE(p_hwfn,
+                         "Timeout waiting for clear status 0x%08x [for sb %d]\n",
+                         val, sb_id);
+}
+
+void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    u32 sb_id,
+                                    u16 opaque,
+                                    bool b_set)
+{
+       int pi;
+
+       /* Set */
+       if (b_set)
+               qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
+
+       /* Clear */
+       qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+
+       /* Clear the CAU for the SB */
+       for (pi = 0; pi < 12; pi++)
+               qed_wr(p_hwfn, p_ptt,
+                      CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
+}
+
+void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt,
+                             bool b_set,
+                             bool b_slowpath)
+{
+       u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
+       u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
+       u32 sb_id = 0;
+       u32 val = 0;
+
+       val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
+       val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
+       val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
+       qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+                  "IGU cleaning SBs [%d,...,%d]\n",
+                  igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
+
+       for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
+               qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+                                               p_hwfn->hw_info.opaque_fid,
+                                               b_set);
+
+       if (b_slowpath) {
+               sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+               DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+                          "IGU cleaning slowpath SB [%d]\n", sb_id);
+               qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+                                               p_hwfn->hw_info.opaque_fid,
+                                               b_set);
+       }
+}
+
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt)
+{
+       struct qed_igu_info *p_igu_info;
+       struct qed_igu_block *blk;
+       u32 val;
+       u16 sb_id;
+       u16 prev_sb_id = 0xFF;
+
+       p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_ATOMIC);
+
+       if (!p_hwfn->hw_info.p_igu_info)
+               return -ENOMEM;
+
+       p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+       /* Initialize base sb / sb cnt for PFs */
+       p_igu_info->igu_base_sb         = 0xffff;
+       p_igu_info->igu_sb_cnt          = 0;
+       p_igu_info->igu_dsb_id          = 0xffff;
+       p_igu_info->igu_base_sb_iov     = 0xffff;
+
+       for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+            sb_id++) {
+               blk = &p_igu_info->igu_map.igu_blocks[sb_id];
+
+               val = qed_rd(p_hwfn, p_ptt,
+                            IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
+
+               /* stop scanning when hit first invalid PF entry */
+               if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+                   GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+                       break;
+
+               blk->status = QED_IGU_STATUS_VALID;
+               blk->function_id = GET_FIELD(val,
+                                            IGU_MAPPING_LINE_FUNCTION_NUMBER);
+               blk->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
+               blk->vector_number = GET_FIELD(val,
+                                              IGU_MAPPING_LINE_VECTOR_NUMBER);
+
+               DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+                          "IGU_BLOCK[sb_id]:%x:func_id = %d is_pf = %d vector_num = 0x%x\n",
+                          val, blk->function_id, blk->is_pf,
+                          blk->vector_number);
+
+               if (blk->is_pf) {
+                       if (blk->function_id == p_hwfn->rel_pf_id) {
+                               blk->status |= QED_IGU_STATUS_PF;
+
+                               if (blk->vector_number == 0) {
+                                       if (p_igu_info->igu_dsb_id == 0xffff)
+                                               p_igu_info->igu_dsb_id = sb_id;
+                               } else {
+                                       if (p_igu_info->igu_base_sb ==
+                                           0xffff) {
+                                               p_igu_info->igu_base_sb = sb_id;
+                                       } else if (prev_sb_id != sb_id - 1) {
+                                               DP_NOTICE(p_hwfn->cdev,
+                                                         "consecutive igu vectors for HWFN %x broken",
+                                                         p_hwfn->rel_pf_id);
+                                               break;
+                                       }
+                                       prev_sb_id = sb_id;
+                                       /* we don't count the default */
+                                       (p_igu_info->igu_sb_cnt)++;
+                               }
+                       }
+               }
+       }
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+                  "IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
+                  p_igu_info->igu_base_sb,
+                  p_igu_info->igu_sb_cnt,
+                  p_igu_info->igu_dsb_id);
+
+       if (p_igu_info->igu_base_sb == 0xffff ||
+           p_igu_info->igu_dsb_id == 0xffff ||
+           p_igu_info->igu_sb_cnt == 0) {
+               DP_NOTICE(p_hwfn,
+                         "IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
+                          p_igu_info->igu_base_sb,
+                          p_igu_info->igu_sb_cnt,
+                          p_igu_info->igu_dsb_id);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/**
+ * @brief Initialize igu runtime registers
+ *
+ * @param p_hwfn
+ */
+void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
+{
+       u32 igu_pf_conf = 0;
+
+       igu_pf_conf |= IGU_PF_CONF_FUNC_EN;
+
+       STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
+}
+
+u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
+{
+       u64 intr_status = 0;
+       u32 intr_status_lo = 0;
+       u32 intr_status_hi = 0;
+       u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
+                              IGU_CMD_INT_ACK_BASE;
+       u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
+                              IGU_CMD_INT_ACK_BASE;
+
+       intr_status_lo = REG_RD(p_hwfn,
+                               GTT_BAR0_MAP_REG_IGU_CMD +
+                               lsb_igu_cmd_addr * 8);
+       intr_status_hi = REG_RD(p_hwfn,
+                               GTT_BAR0_MAP_REG_IGU_CMD +
+                               msb_igu_cmd_addr * 8);
+       intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
+
+       return intr_status;
+}
+
+static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
+{
+       tasklet_init(p_hwfn->sp_dpc,
+                    qed_int_sp_dpc, (unsigned long)p_hwfn);
+       p_hwfn->b_sp_dpc_enabled = true;
+}
+
+static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
+{
+       p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_ATOMIC);
+       if (!p_hwfn->sp_dpc)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
+{
+       kfree(p_hwfn->sp_dpc);
+}
+
+int qed_int_alloc(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt)
+{
+       int rc = 0;
+
+       rc = qed_int_sp_dpc_alloc(p_hwfn);
+       if (rc) {
+               DP_ERR(p_hwfn->cdev, "Failed to allocate sp dpc mem\n");
+               return rc;
+       }
+       rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
+       if (rc) {
+               DP_ERR(p_hwfn->cdev, "Failed to allocate sp sb mem\n");
+               return rc;
+       }
+       rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
+       if (rc) {
+               DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n");
+               return rc;
+       }
+       return rc;
+}
+
+void qed_int_free(struct qed_hwfn *p_hwfn)
+{
+       qed_int_sp_sb_free(p_hwfn);
+       qed_int_sb_attn_free(p_hwfn);
+       qed_int_sp_dpc_free(p_hwfn);
+}
+
+void qed_int_setup(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt)
+{
+       qed_int_sp_sb_setup(p_hwfn, p_ptt);
+       qed_int_sp_dpc_setup(p_hwfn);
+}
+
+int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+                       int *p_iov_blks)
+{
+       struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
+
+       if (!info)
+               return 0;
+
+       if (p_iov_blks)
+               *p_iov_blks = info->free_blks;
+
+       return info->igu_sb_cnt;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
new file mode 100644 (file)
index 0000000..16b5751
--- /dev/null
@@ -0,0 +1,391 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_INT_H
+#define _QED_INT_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+/* Fields of IGU PF CONFIGRATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN       (0x1 << 0)    /* function enable        */
+#define IGU_PF_CONF_MSI_MSIX_EN   (0x1 << 1)    /* MSI/MSIX enable        */
+#define IGU_PF_CONF_INT_LINE_EN   (0x1 << 2)    /* INT enable             */
+#define IGU_PF_CONF_ATTN_BIT_EN   (0x1 << 3)    /* attention enable       */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4)    /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE     (0x1 << 5)    /* simd all ones mode     */
+
+/* Igu control commands
+ */
+enum igu_ctrl_cmd {
+       IGU_CTRL_CMD_TYPE_RD,
+       IGU_CTRL_CMD_TYPE_WR,
+       MAX_IGU_CTRL_CMD
+};
+
+/* Control register for the IGU command register
+ */
+struct igu_ctrl_reg {
+       u32 ctrl_data;
+#define IGU_CTRL_REG_FID_MASK           0xFFFF  /* Opaque_FID   */
+#define IGU_CTRL_REG_FID_SHIFT          0
+#define IGU_CTRL_REG_PXP_ADDR_MASK      0xFFF   /* Command address */
+#define IGU_CTRL_REG_PXP_ADDR_SHIFT     16
+#define IGU_CTRL_REG_RESERVED_MASK      0x1
+#define IGU_CTRL_REG_RESERVED_SHIFT     28
+#define IGU_CTRL_REG_TYPE_MASK          0x1 /* use enum igu_ctrl_cmd */
+#define IGU_CTRL_REG_TYPE_SHIFT         31
+};
+
+enum qed_coalescing_fsm {
+       QED_COAL_RX_STATE_MACHINE,
+       QED_COAL_TX_STATE_MACHINE
+};
+
+/**
+ * @brief qed_int_cau_conf_pi - configure cau for a given
+ *        status block
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param igu_sb_id
+ * @param pi_index
+ * @param state
+ * @param timeset
+ */
+void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt,
+                        u16 igu_sb_id,
+                        u32 pi_index,
+                        enum qed_coalescing_fsm coalescing_fsm,
+                        u8 timeset);
+
+/**
+ * @brief qed_int_igu_enable_int - enable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode - interrupt mode to use
+ */
+void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           enum qed_int_mode int_mode);
+
+/**
+ * @brief qed_int_igu_disable_int - disable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
+                            struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc
+ *        register from igu.
+ *
+ * @param p_hwfn
+ *
+ * @return u64
+ */
+u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn);
+
+#define QED_SP_SB_ID 0xffff
+/**
+ * @brief qed_int_sb_init - Initializes the sb_info structure.
+ *
+ * once the structure is initialized it can be passed to sb related functions.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info      points to an uninitialized (but
+ *                     allocated) sb_info structure
+ * @param sb_virt_addr
+ * @param sb_phy_addr
+ * @param sb_id        the sb_id to be used (zero based in driver)
+ *                     should use QED_SP_SB_ID for SP Status block
+ *
+ * @return int
+ */
+int qed_int_sb_init(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt,
+                   struct qed_sb_info *sb_info,
+                   void *sb_virt_addr,
+                   dma_addr_t sb_phy_addr,
+                   u16 sb_id);
+/**
+ * @brief qed_int_sb_setup - Setup the sb.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info      initialized sb_info structure
+ */
+void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     struct qed_sb_info *sb_info);
+
+/**
+ * @brief qed_int_sb_release - releases the sb_info structure.
+ *
+ * once the structure is released, it's memory can be freed
+ *
+ * @param p_hwfn
+ * @param sb_info      points to an allocated sb_info structure
+ * @param sb_id                the sb_id to be used (zero based in driver)
+ *                     should never be equal to QED_SP_SB_ID
+ *                     (SP Status block)
+ *
+ * @return int
+ */
+int qed_int_sb_release(struct qed_hwfn *p_hwfn,
+                      struct qed_sb_info *sb_info,
+                      u16 sb_id);
+
+/**
+ * @brief qed_int_sp_dpc - To be called when an interrupt is received on the
+ *        default status block.
+ *
+ * @param p_hwfn - pointer to hwfn
+ *
+ */
+void qed_int_sp_dpc(unsigned long hwfn_cookie);
+
+/**
+ * @brief qed_int_get_num_sbs - get the number of status
+ *        blocks configured for this funciton in the igu.
+ *
+ * @param p_hwfn
+ * @param p_iov_blks - configured free blks for vfs
+ *
+ * @return int - number of status blocks configured
+ */
+int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+                       int *p_iov_blks);
+
+/**
+ * @file
+ *
+ * @brief Interrupt handler
+ */
+
+#define QED_CAU_DEF_RX_TIMER_RES 0
+#define QED_CAU_DEF_TX_TIMER_RES 0
+
+#define QED_SB_ATT_IDX  0x0001
+#define QED_SB_EVENT_MASK       0x0003
+
+#define SB_ALIGNED_SIZE(p_hwfn)        \
+       ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+
+struct qed_igu_block {
+       u8      status;
+#define QED_IGU_STATUS_FREE     0x01
+#define QED_IGU_STATUS_VALID    0x02
+#define QED_IGU_STATUS_PF       0x04
+
+       u8      vector_number;
+       u8      function_id;
+       u8      is_pf;
+};
+
+struct qed_igu_map {
+       struct qed_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
+};
+
+struct qed_igu_info {
+       struct qed_igu_map      igu_map;
+       u16                     igu_dsb_id;
+       u16                     igu_base_sb;
+       u16                     igu_base_sb_iov;
+       u16                     igu_sb_cnt;
+       u16                     igu_sb_cnt_iov;
+       u16                     free_blks;
+};
+
+/* TODO Names of function may change... */
+void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt,
+                             bool b_set,
+                             bool b_slowpath);
+
+void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_int_igu_read_cam - Reads the IGU CAM.
+ *     This function needs to be called during hardware
+ *     prepare. It reads the info from igu cam to know which
+ *     status block is the default / base status block etc.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt);
+
+typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn,
+                                void *cookie);
+/**
+ * @brief qed_int_register_cb - Register callback func for
+ *      slowhwfn statusblock.
+ *
+ *     Every protocol that uses the slowhwfn status block
+ *     should register a callback function that will be called
+ *     once there is an update of the sp status block.
+ *
+ * @param p_hwfn
+ * @param comp_cb - function to be called when there is an
+ *                  interrupt on the sp sb
+ *
+ * @param cookie  - passed to the callback function
+ * @param sb_idx  - OUT parameter which gives the chosen index
+ *                  for this protocol.
+ * @param p_fw_cons  - pointer to the actual address of the
+ *                     consumer for this protocol.
+ *
+ * @return int
+ */
+int qed_int_register_cb(struct qed_hwfn *p_hwfn,
+                       qed_int_comp_cb_t comp_cb,
+                       void *cookie,
+                       u8 *sb_idx,
+                       __le16 **p_fw_cons);
+
+/**
+ * @brief qed_int_unregister_cb - Unregisters callback
+ *      function from sp sb.
+ *      Partner of qed_int_register_cb -> should be called
+ *      when no longer required.
+ *
+ * @param p_hwfn
+ * @param pi
+ *
+ * @return int
+ */
+int qed_int_unregister_cb(struct qed_hwfn *p_hwfn,
+                         u8 pi);
+
+/**
+ * @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id.
+ *
+ * @param p_hwfn
+ *
+ * @return u16
+ */
+u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ *        block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id                - igu status block id
+ * @param cleanup_set  - set(1) / clear(0)
+ * @param opaque_fid    - the function for which to perform
+ *                     cleanup, for example a PF on behalf of
+ *                     its VFs.
+ */
+void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           u32 sb_id,
+                           bool cleanup_set,
+                           u16 opaque_fid);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ *        block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id                - igu status block id
+ * @param opaque       - opaque fid of the sb owner.
+ * @param cleanup_set  - set(1) / clear(0)
+ */
+void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    u32 sb_id,
+                                    u16 opaque,
+                                    bool b_set);
+
+/**
+ * @brief qed_int_cau_conf - configure cau for a given status
+ *        block
+ *
+ * @param p_hwfn
+ * @param ptt
+ * @param sb_phys
+ * @param igu_sb_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt,
+                        dma_addr_t sb_phys,
+                        u16 igu_sb_id,
+                        u16 vf_number,
+                        u8 vf_valid);
+
+/**
+ * @brief qed_int_alloc
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_int_alloc(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_int_free
+ *
+ * @param p_hwfn
+ */
+void qed_int_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_int_setup
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_int_setup(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt);
+
+/**
+ * @brief - Enable Interrupt & Attention for hw function
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode
+ */
+void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
+                       struct qed_ptt *p_ptt,
+                       enum qed_int_mode int_mode);
+
+/**
+ * @brief - Initialize CAU status block entry
+ *
+ * @param p_hwfn
+ * @param p_sb_entry
+ * @param pf_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
+                          struct cau_sb_entry *p_sb_entry,
+                          u8 pf_id,
+                          u16 vf_number,
+                          u8 vf_valid);
+
+#define QED_MAPPING_MEMORY_SIZE(dev)   (NUM_OF_SBS(dev))
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
new file mode 100644 (file)
index 0000000..f72036a
--- /dev/null
@@ -0,0 +1,1704 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include "qed.h"
+#include <linux/qed/qed_chain.h>
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include <linux/qed/qed_eth_if.h>
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+enum qed_rss_caps {
+       QED_RSS_IPV4            = 0x1,
+       QED_RSS_IPV6            = 0x2,
+       QED_RSS_IPV4_TCP        = 0x4,
+       QED_RSS_IPV6_TCP        = 0x8,
+       QED_RSS_IPV4_UDP        = 0x10,
+       QED_RSS_IPV6_UDP        = 0x20,
+};
+
+/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
+#define QED_RSS_IND_TABLE_SIZE 128
+#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
+
+struct qed_rss_params {
+       u8      update_rss_config;
+       u8      rss_enable;
+       u8      rss_eng_id;
+       u8      update_rss_capabilities;
+       u8      update_rss_ind_table;
+       u8      update_rss_key;
+       u8      rss_caps;
+       u8      rss_table_size_log;
+       u16     rss_ind_table[QED_RSS_IND_TABLE_SIZE];
+       u32     rss_key[QED_RSS_KEY_SIZE];
+};
+
+enum qed_filter_opcode {
+       QED_FILTER_ADD,
+       QED_FILTER_REMOVE,
+       QED_FILTER_MOVE,
+       QED_FILTER_REPLACE,     /* Delete all MACs and add new one instead */
+       QED_FILTER_FLUSH,       /* Removes all filters */
+};
+
+enum qed_filter_ucast_type {
+       QED_FILTER_MAC,
+       QED_FILTER_VLAN,
+       QED_FILTER_MAC_VLAN,
+       QED_FILTER_INNER_MAC,
+       QED_FILTER_INNER_VLAN,
+       QED_FILTER_INNER_PAIR,
+       QED_FILTER_INNER_MAC_VNI_PAIR,
+       QED_FILTER_MAC_VNI_PAIR,
+       QED_FILTER_VNI,
+};
+
+struct qed_filter_ucast {
+       enum qed_filter_opcode          opcode;
+       enum qed_filter_ucast_type      type;
+       u8                              is_rx_filter;
+       u8                              is_tx_filter;
+       u8                              vport_to_add_to;
+       u8                              vport_to_remove_from;
+       unsigned char                   mac[ETH_ALEN];
+       u8                              assert_on_error;
+       u16                             vlan;
+       u32                             vni;
+};
+
+struct qed_filter_mcast {
+       /* MOVE is not supported for multicast */
+       enum qed_filter_opcode  opcode;
+       u8                      vport_to_add_to;
+       u8                      vport_to_remove_from;
+       u8                      num_mc_addrs;
+#define QED_MAX_MC_ADDRS        64
+       unsigned char           mac[QED_MAX_MC_ADDRS][ETH_ALEN];
+};
+
+struct qed_filter_accept_flags {
+       u8      update_rx_mode_config;
+       u8      update_tx_mode_config;
+       u8      rx_accept_filter;
+       u8      tx_accept_filter;
+#define QED_ACCEPT_NONE         0x01
+#define QED_ACCEPT_UCAST_MATCHED        0x02
+#define QED_ACCEPT_UCAST_UNMATCHED      0x04
+#define QED_ACCEPT_MCAST_MATCHED        0x08
+#define QED_ACCEPT_MCAST_UNMATCHED      0x10
+#define QED_ACCEPT_BCAST                0x20
+};
+
+struct qed_sp_vport_update_params {
+       u16                             opaque_fid;
+       u8                              vport_id;
+       u8                              update_vport_active_rx_flg;
+       u8                              vport_active_rx_flg;
+       u8                              update_vport_active_tx_flg;
+       u8                              vport_active_tx_flg;
+       u8                              update_approx_mcast_flg;
+       unsigned long                   bins[8];
+       struct qed_rss_params           *rss_params;
+       struct qed_filter_accept_flags  accept_flags;
+};
+
+#define QED_MAX_SGES_NUM 16
+#define CRC32_POLY 0x1edc6f41
+
+static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+                             u32 concrete_fid,
+                             u16 opaque_fid,
+                             u8 vport_id,
+                             u16 mtu,
+                             u8 drop_ttl0_flg,
+                             u8 inner_vlan_removal_en_flg)
+{
+       struct qed_sp_init_request_params params;
+       struct vport_start_ramrod_data *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent =  NULL;
+       int rc = -EINVAL;
+       u16 rx_mode = 0;
+       u8 abs_vport_id = 0;
+
+       rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+       if (rc != 0)
+               return rc;
+
+       memset(&params, 0, sizeof(params));
+       params.ramrod_data_size = sizeof(*p_ramrod);
+       params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                qed_spq_get_cid(p_hwfn),
+                                opaque_fid,
+                                ETH_RAMROD_VPORT_START,
+                                PROTOCOLID_ETH,
+                                &params);
+       if (rc)
+               return rc;
+
+       p_ramrod                = &p_ent->ramrod.vport_start;
+       p_ramrod->vport_id      = abs_vport_id;
+
+       p_ramrod->mtu                   = cpu_to_le16(mtu);
+       p_ramrod->inner_vlan_removal_en = inner_vlan_removal_en_flg;
+       p_ramrod->drop_ttl0_en          = drop_ttl0_flg;
+
+       SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
+       SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
+
+       p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
+
+       /* TPA related fields */
+       memset(&p_ramrod->tpa_param, 0,
+              sizeof(struct eth_vport_tpa_param));
+
+       /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
+       p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
+                                                 concrete_fid);
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
+                       struct vport_update_ramrod_data *p_ramrod,
+                       struct qed_rss_params *p_params)
+{
+       struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
+       u16 abs_l2_queue = 0, capabilities = 0;
+       int rc = 0, i;
+
+       if (!p_params) {
+               p_ramrod->common.update_rss_flg = 0;
+               return rc;
+       }
+
+       BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
+                    ETH_RSS_IND_TABLE_ENTRIES_NUM);
+
+       rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
+       if (rc)
+               return rc;
+
+       p_ramrod->common.update_rss_flg = p_params->update_rss_config;
+       rss->update_rss_capabilities = p_params->update_rss_capabilities;
+       rss->update_rss_ind_table = p_params->update_rss_ind_table;
+       rss->update_rss_key = p_params->update_rss_key;
+
+       rss->rss_mode = p_params->rss_enable ?
+                       ETH_VPORT_RSS_MODE_REGULAR :
+                       ETH_VPORT_RSS_MODE_DISABLED;
+
+       SET_FIELD(capabilities,
+                 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
+                 !!(p_params->rss_caps & QED_RSS_IPV4));
+       SET_FIELD(capabilities,
+                 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
+                 !!(p_params->rss_caps & QED_RSS_IPV6));
+       SET_FIELD(capabilities,
+                 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
+                 !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
+       SET_FIELD(capabilities,
+                 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
+                 !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
+       SET_FIELD(capabilities,
+                 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
+                 !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
+       SET_FIELD(capabilities,
+                 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
+                 !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
+       rss->tbl_size = p_params->rss_table_size_log;
+
+       rss->capabilities = cpu_to_le16(capabilities);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+                  "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
+                  p_ramrod->common.update_rss_flg,
+                  rss->rss_mode, rss->update_rss_capabilities,
+                  capabilities, rss->update_rss_ind_table,
+                  rss->update_rss_key);
+
+       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+               rc = qed_fw_l2_queue(p_hwfn,
+                                    (u8)p_params->rss_ind_table[i],
+                                    &abs_l2_queue);
+               if (rc)
+                       return rc;
+
+               rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
+               DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
+                          i, rss->indirection_table[i]);
+       }
+
+       for (i = 0; i < 10; i++)
+               rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
+
+       return rc;
+}
+
+static void
+qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
+                         struct vport_update_ramrod_data *p_ramrod,
+                         struct qed_filter_accept_flags accept_flags)
+{
+       p_ramrod->common.update_rx_mode_flg =
+               accept_flags.update_rx_mode_config;
+
+       p_ramrod->common.update_tx_mode_flg =
+               accept_flags.update_tx_mode_config;
+
+       /* Set Rx mode accept flags */
+       if (p_ramrod->common.update_rx_mode_flg) {
+               u8 accept_filter = accept_flags.rx_accept_filter;
+               u16 state = 0;
+
+               SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
+                         !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
+                           !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
+               SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
+                         !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
+
+               SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
+                         !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
+                           !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+               SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
+                         (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
+                          !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+               SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
+                         !!(accept_filter & QED_ACCEPT_BCAST));
+
+               p_ramrod->rx_mode.state = cpu_to_le16(state);
+               DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                          "p_ramrod->rx_mode.state = 0x%x\n", state);
+       }
+
+       /* Set Tx mode accept flags */
+       if (p_ramrod->common.update_tx_mode_flg) {
+               u8 accept_filter = accept_flags.tx_accept_filter;
+               u16 state = 0;
+
+               SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
+                         !!(accept_filter & QED_ACCEPT_NONE));
+
+               SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
+                         (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
+                          !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
+               SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
+                         !!(accept_filter & QED_ACCEPT_NONE));
+
+               SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
+                         (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
+                          !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+               SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
+                         !!(accept_filter & QED_ACCEPT_BCAST));
+
+               p_ramrod->tx_mode.state = cpu_to_le16(state);
+               DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                          "p_ramrod->tx_mode.state = 0x%x\n", state);
+       }
+}
+
+static void
+qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
+                       struct vport_update_ramrod_data *p_ramrod,
+                       struct qed_sp_vport_update_params *p_params)
+{
+       int i;
+
+       memset(&p_ramrod->approx_mcast.bins, 0,
+              sizeof(p_ramrod->approx_mcast.bins));
+
+       if (p_params->update_approx_mcast_flg) {
+               p_ramrod->common.update_approx_mcast_flg = 1;
+               for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+                       u32 *p_bins = (u32 *)p_params->bins;
+                       __le32 val = cpu_to_le32(p_bins[i]);
+
+                       p_ramrod->approx_mcast.bins[i] = val;
+               }
+       }
+}
+
+static int
+qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+                   struct qed_sp_vport_update_params *p_params,
+                   enum spq_mode comp_mode,
+                   struct qed_spq_comp_cb *p_comp_data)
+{
+       struct qed_rss_params *p_rss_params = p_params->rss_params;
+       struct vport_update_ramrod_data_cmn *p_cmn;
+       struct qed_sp_init_request_params sp_params;
+       struct vport_update_ramrod_data *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       u8 abs_vport_id = 0;
+       int rc = -EINVAL;
+
+       rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+       if (rc != 0)
+               return rc;
+
+       memset(&sp_params, 0, sizeof(sp_params));
+       sp_params.ramrod_data_size = sizeof(*p_ramrod);
+       sp_params.comp_mode = comp_mode;
+       sp_params.p_comp_data = p_comp_data;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                qed_spq_get_cid(p_hwfn),
+                                p_params->opaque_fid,
+                                ETH_RAMROD_VPORT_UPDATE,
+                                PROTOCOLID_ETH,
+                                &sp_params);
+       if (rc)
+               return rc;
+
+       /* Copy input params to ramrod according to FW struct */
+       p_ramrod = &p_ent->ramrod.vport_update;
+       p_cmn = &p_ramrod->common;
+
+       p_cmn->vport_id = abs_vport_id;
+       p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
+       p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
+       p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
+       p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
+
+       rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
+       if (rc) {
+               /* Return spq entry which is taken in qed_sp_init_request()*/
+               qed_spq_return_entry(p_hwfn, p_ent);
+               return rc;
+       }
+
+       /* Update mcast bins for VFs, PF doesn't use this functionality */
+       qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
+
+       qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
+                            u16 opaque_fid,
+                            u8 vport_id)
+{
+       struct qed_sp_init_request_params sp_params;
+       struct vport_stop_ramrod_data *p_ramrod;
+       struct qed_spq_entry *p_ent;
+       u8 abs_vport_id = 0;
+       int rc;
+
+       rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+       if (rc != 0)
+               return rc;
+
+       memset(&sp_params, 0, sizeof(sp_params));
+       sp_params.ramrod_data_size = sizeof(*p_ramrod);
+       sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                qed_spq_get_cid(p_hwfn),
+                                opaque_fid,
+                                ETH_RAMROD_VPORT_STOP,
+                                PROTOCOLID_ETH,
+                                &sp_params);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.vport_stop;
+       p_ramrod->vport_id = abs_vport_id;
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_filter_accept_cmd(struct qed_dev *cdev,
+                                u8 vport,
+                                struct qed_filter_accept_flags accept_flags,
+                                enum spq_mode comp_mode,
+                                struct qed_spq_comp_cb *p_comp_data)
+{
+       struct qed_sp_vport_update_params vport_update_params;
+       int i, rc;
+
+       /* Prepare and send the vport rx_mode change */
+       memset(&vport_update_params, 0, sizeof(vport_update_params));
+       vport_update_params.vport_id = vport;
+       vport_update_params.accept_flags = accept_flags;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+               rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
+                                        comp_mode, p_comp_data);
+               if (rc != 0) {
+                       DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
+                       return rc;
+               }
+
+               DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                          "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
+                          accept_flags.rx_accept_filter,
+                          accept_flags.tx_accept_filter);
+       }
+
+       return 0;
+}
+
+static int qed_sp_release_queue_cid(
+       struct qed_hwfn *p_hwfn,
+       struct qed_hw_cid_data *p_cid_data)
+{
+       if (!p_cid_data->b_cid_allocated)
+               return 0;
+
+       qed_cxt_release_cid(p_hwfn, p_cid_data->cid);
+
+       p_cid_data->b_cid_allocated = false;
+
+       return 0;
+}
+
+static int
+qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+                           u16 opaque_fid,
+                           u32 cid,
+                           struct qed_queue_start_common_params *params,
+                           u8 stats_id,
+                           u16 bd_max_bytes,
+                           dma_addr_t bd_chain_phys_addr,
+                           dma_addr_t cqe_pbl_addr,
+                           u16 cqe_pbl_size)
+{
+       struct rx_queue_start_ramrod_data *p_ramrod = NULL;
+       struct qed_sp_init_request_params sp_params;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_hw_cid_data *p_rx_cid;
+       u16 abs_rx_q_id = 0;
+       u8 abs_vport_id = 0;
+       int rc = -EINVAL;
+
+       /* Store information for the stop */
+       p_rx_cid                = &p_hwfn->p_rx_cids[params->queue_id];
+       p_rx_cid->cid           = cid;
+       p_rx_cid->opaque_fid    = opaque_fid;
+       p_rx_cid->vport_id      = params->vport_id;
+
+       rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
+       if (rc != 0)
+               return rc;
+
+       rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
+       if (rc != 0)
+               return rc;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+                  opaque_fid, cid, params->queue_id, params->vport_id,
+                  params->sb);
+
+       memset(&sp_params, 0, sizeof(params));
+       sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+       sp_params.ramrod_data_size = sizeof(*p_ramrod);
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                cid, opaque_fid,
+                                ETH_RAMROD_RX_QUEUE_START,
+                                PROTOCOLID_ETH,
+                                &sp_params);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.rx_queue_start;
+
+       p_ramrod->sb_id                 = cpu_to_le16(params->sb);
+       p_ramrod->sb_index              = params->sb_idx;
+       p_ramrod->vport_id              = abs_vport_id;
+       p_ramrod->stats_counter_id      = stats_id;
+       p_ramrod->rx_queue_id           = cpu_to_le16(abs_rx_q_id);
+       p_ramrod->complete_cqe_flg      = 0;
+       p_ramrod->complete_event_flg    = 1;
+
+       p_ramrod->bd_max_bytes  = cpu_to_le16(bd_max_bytes);
+       p_ramrod->bd_base.hi    = DMA_HI_LE(bd_chain_phys_addr);
+       p_ramrod->bd_base.lo    = DMA_LO_LE(bd_chain_phys_addr);
+
+       p_ramrod->num_of_pbl_pages      = cpu_to_le16(cqe_pbl_size);
+       p_ramrod->cqe_pbl_addr.hi       = DMA_HI_LE(cqe_pbl_addr);
+       p_ramrod->cqe_pbl_addr.lo       = DMA_LO_LE(cqe_pbl_addr);
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+       return rc;
+}
+
+static int
+qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
+                         u16 opaque_fid,
+                         struct qed_queue_start_common_params *params,
+                         u16 bd_max_bytes,
+                         dma_addr_t bd_chain_phys_addr,
+                         dma_addr_t cqe_pbl_addr,
+                         u16 cqe_pbl_size,
+                         void __iomem **pp_prod)
+{
+       struct qed_hw_cid_data *p_rx_cid;
+       u64 init_prod_val = 0;
+       u16 abs_l2_queue = 0;
+       u8 abs_stats_id = 0;
+       int rc;
+
+       rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
+       if (rc != 0)
+               return rc;
+
+       rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
+       if (rc != 0)
+               return rc;
+
+       *pp_prod = (u8 __iomem *)p_hwfn->regview +
+                                GTT_BAR0_MAP_REG_MSDM_RAM +
+                                MSTORM_PRODS_OFFSET(abs_l2_queue);
+
+       /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+       __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+                         (u32 *)(&init_prod_val));
+
+       /* Allocate a CID for the queue */
+       p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
+       rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+                                &p_rx_cid->cid);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
+               return rc;
+       }
+       p_rx_cid->b_cid_allocated = true;
+
+       rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
+                                        opaque_fid,
+                                        p_rx_cid->cid,
+                                        params,
+                                        abs_stats_id,
+                                        bd_max_bytes,
+                                        bd_chain_phys_addr,
+                                        cqe_pbl_addr,
+                                        cqe_pbl_size);
+
+       if (rc != 0)
+               qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
+
+       return rc;
+}
+
+static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+                                   u16 rx_queue_id,
+                                   bool eq_completion_only,
+                                   bool cqe_completion)
+{
+       struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+       struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
+       struct qed_sp_init_request_params sp_params;
+       struct qed_spq_entry *p_ent = NULL;
+       u16 abs_rx_q_id = 0;
+       int rc = -EINVAL;
+
+       memset(&sp_params, 0, sizeof(sp_params));
+       sp_params.ramrod_data_size = sizeof(*p_ramrod);
+       sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                p_rx_cid->cid,
+                                p_rx_cid->opaque_fid,
+                                ETH_RAMROD_RX_QUEUE_STOP,
+                                PROTOCOLID_ETH,
+                                &sp_params);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.rx_queue_stop;
+
+       qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+       qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
+       p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+
+       /* Cleaning the queue requires the completion to arrive there.
+        * In addition, VFs require the answer to come as eqe to PF.
+        */
+       p_ramrod->complete_cqe_flg =
+               (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) &&
+                !eq_completion_only) || cqe_completion;
+       p_ramrod->complete_event_flg =
+               !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) ||
+               eq_completion_only;
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+       if (rc)
+               return rc;
+
+       return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
+}
+
+static int
+qed_sp_eth_txq_start_ramrod(struct qed_hwfn  *p_hwfn,
+                           u16  opaque_fid,
+                           u32  cid,
+                           struct qed_queue_start_common_params *p_params,
+                           u8  stats_id,
+                           dma_addr_t pbl_addr,
+                           u16 pbl_size,
+                           union qed_qm_pq_params *p_pq_params)
+{
+       struct tx_queue_start_ramrod_data *p_ramrod = NULL;
+       struct qed_sp_init_request_params sp_params;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_hw_cid_data *p_tx_cid;
+       u8 abs_vport_id;
+       int rc = -EINVAL;
+       u16 pq_id;
+
+       /* Store information for the stop */
+       p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
+       p_tx_cid->cid           = cid;
+       p_tx_cid->opaque_fid    = opaque_fid;
+
+       rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+       if (rc)
+               return rc;
+
+       memset(&sp_params, 0, sizeof(sp_params));
+       sp_params.ramrod_data_size = sizeof(*p_ramrod);
+       sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent, cid,
+                                opaque_fid,
+                                ETH_RAMROD_TX_QUEUE_START,
+                                PROTOCOLID_ETH,
+                                &sp_params);
+       if (rc)
+               return rc;
+
+       p_ramrod                = &p_ent->ramrod.tx_queue_start;
+       p_ramrod->vport_id      = abs_vport_id;
+
+       p_ramrod->sb_id                 = cpu_to_le16(p_params->sb);
+       p_ramrod->sb_index              = p_params->sb_idx;
+       p_ramrod->stats_counter_id      = stats_id;
+       p_ramrod->tc                    = p_pq_params->eth.tc;
+
+       p_ramrod->pbl_size              = cpu_to_le16(pbl_size);
+       p_ramrod->pbl_base_addr.hi      = DMA_HI_LE(pbl_addr);
+       p_ramrod->pbl_base_addr.lo      = DMA_LO_LE(pbl_addr);
+
+       pq_id                   = qed_get_qm_pq(p_hwfn,
+                                               PROTOCOLID_ETH,
+                                               p_pq_params);
+       p_ramrod->qm_pq_id      = cpu_to_le16(pq_id);
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
+                         u16 opaque_fid,
+                         struct qed_queue_start_common_params *p_params,
+                         dma_addr_t pbl_addr,
+                         u16 pbl_size,
+                         void __iomem **pp_doorbell)
+{
+       struct qed_hw_cid_data *p_tx_cid;
+       union qed_qm_pq_params pq_params;
+       u8 abs_stats_id = 0;
+       int rc;
+
+       rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
+       if (rc)
+               return rc;
+
+       p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
+       memset(p_tx_cid, 0, sizeof(*p_tx_cid));
+       memset(&pq_params, 0, sizeof(pq_params));
+
+       /* Allocate a CID for the queue */
+       rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+                                &p_tx_cid->cid);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
+               return rc;
+       }
+       p_tx_cid->b_cid_allocated = true;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+                  opaque_fid, p_tx_cid->cid,
+                  p_params->queue_id, p_params->vport_id, p_params->sb);
+
+       rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
+                                        opaque_fid,
+                                        p_tx_cid->cid,
+                                        p_params,
+                                        abs_stats_id,
+                                        pbl_addr,
+                                        pbl_size,
+                                        &pq_params);
+
+       *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+                                    qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY);
+
+       if (rc)
+               qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
+
+       return rc;
+}
+
+static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
+                                   u16 tx_queue_id)
+{
+       struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+       struct qed_sp_init_request_params sp_params;
+       struct qed_spq_entry *p_ent = NULL;
+       int rc = -EINVAL;
+
+       memset(&sp_params, 0, sizeof(sp_params));
+       sp_params.ramrod_data_size = sizeof(struct tx_queue_stop_ramrod_data);
+       sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                p_tx_cid->cid,
+                                p_tx_cid->opaque_fid,
+                                ETH_RAMROD_TX_QUEUE_STOP,
+                                PROTOCOLID_ETH,
+                                &sp_params);
+       if (rc)
+               return rc;
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+       if (rc)
+               return rc;
+
+       return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
+}
+
+static enum eth_filter_action
+qed_filter_action(enum qed_filter_opcode opcode)
+{
+       enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
+
+       switch (opcode) {
+       case QED_FILTER_ADD:
+               action = ETH_FILTER_ACTION_ADD;
+               break;
+       case QED_FILTER_REMOVE:
+               action = ETH_FILTER_ACTION_REMOVE;
+               break;
+       case QED_FILTER_REPLACE:
+       case QED_FILTER_FLUSH:
+               action = ETH_FILTER_ACTION_REPLACE;
+               break;
+       default:
+               action = MAX_ETH_FILTER_ACTION;
+       }
+
+       return action;
+}
+
+static void qed_set_fw_mac_addr(__le16 *fw_msb,
+                               __le16 *fw_mid,
+                               __le16 *fw_lsb,
+                               u8 *mac)
+{
+       ((u8 *)fw_msb)[0] = mac[1];
+       ((u8 *)fw_msb)[1] = mac[0];
+       ((u8 *)fw_mid)[0] = mac[3];
+       ((u8 *)fw_mid)[1] = mac[2];
+       ((u8 *)fw_lsb)[0] = mac[5];
+       ((u8 *)fw_lsb)[1] = mac[4];
+}
+
+static int
+qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
+                       u16 opaque_fid,
+                       struct qed_filter_ucast *p_filter_cmd,
+                       struct vport_filter_update_ramrod_data **pp_ramrod,
+                       struct qed_spq_entry **pp_ent,
+                       enum spq_mode comp_mode,
+                       struct qed_spq_comp_cb *p_comp_data)
+{
+       u8 vport_to_add_to = 0, vport_to_remove_from = 0;
+       struct vport_filter_update_ramrod_data *p_ramrod;
+       struct qed_sp_init_request_params sp_params;
+       struct eth_filter_cmd *p_first_filter;
+       struct eth_filter_cmd *p_second_filter;
+       enum eth_filter_action action;
+       int rc;
+
+       rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+                         &vport_to_remove_from);
+       if (rc)
+               return rc;
+
+       rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+                         &vport_to_add_to);
+       if (rc)
+               return rc;
+
+       memset(&sp_params, 0, sizeof(sp_params));
+       sp_params.ramrod_data_size = sizeof(**pp_ramrod);
+       sp_params.comp_mode = comp_mode;
+       sp_params.p_comp_data = p_comp_data;
+
+       rc = qed_sp_init_request(p_hwfn, pp_ent,
+                                qed_spq_get_cid(p_hwfn),
+                                opaque_fid,
+                                ETH_RAMROD_FILTERS_UPDATE,
+                                PROTOCOLID_ETH,
+                                &sp_params);
+       if (rc)
+               return rc;
+
+       *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
+       p_ramrod = *pp_ramrod;
+       p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
+       p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
+
+       switch (p_filter_cmd->opcode) {
+       case QED_FILTER_FLUSH:
+               p_ramrod->filter_cmd_hdr.cmd_cnt = 0; break;
+       case QED_FILTER_MOVE:
+               p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
+       default:
+               p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
+       }
+
+       p_first_filter  = &p_ramrod->filter_cmds[0];
+       p_second_filter = &p_ramrod->filter_cmds[1];
+
+       switch (p_filter_cmd->type) {
+       case QED_FILTER_MAC:
+               p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
+       case QED_FILTER_VLAN:
+               p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
+       case QED_FILTER_MAC_VLAN:
+               p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
+       case QED_FILTER_INNER_MAC:
+               p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
+       case QED_FILTER_INNER_VLAN:
+               p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
+       case QED_FILTER_INNER_PAIR:
+               p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
+       case QED_FILTER_INNER_MAC_VNI_PAIR:
+               p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
+               break;
+       case QED_FILTER_MAC_VNI_PAIR:
+               p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
+       case QED_FILTER_VNI:
+               p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
+       }
+
+       if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
+               qed_set_fw_mac_addr(&p_first_filter->mac_msb,
+                                   &p_first_filter->mac_mid,
+                                   &p_first_filter->mac_lsb,
+                                   (u8 *)p_filter_cmd->mac);
+       }
+
+       if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
+               p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
+
+       if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_VNI))
+               p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
+
+       if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
+               p_second_filter->type           = p_first_filter->type;
+               p_second_filter->mac_msb        = p_first_filter->mac_msb;
+               p_second_filter->mac_mid        = p_first_filter->mac_mid;
+               p_second_filter->mac_lsb        = p_first_filter->mac_lsb;
+               p_second_filter->vlan_id        = p_first_filter->vlan_id;
+               p_second_filter->vni            = p_first_filter->vni;
+
+               p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
+
+               p_first_filter->vport_id = vport_to_remove_from;
+
+               p_second_filter->action         = ETH_FILTER_ACTION_ADD;
+               p_second_filter->vport_id       = vport_to_add_to;
+       } else {
+               action = qed_filter_action(p_filter_cmd->opcode);
+
+               if (action == MAX_ETH_FILTER_ACTION) {
+                       DP_NOTICE(p_hwfn,
+                                 "%d is not supported yet\n",
+                                 p_filter_cmd->opcode);
+                       return -EINVAL;
+               }
+
+               p_first_filter->action = action;
+               p_first_filter->vport_id = (p_filter_cmd->opcode ==
+                                           QED_FILTER_REMOVE) ?
+                                          vport_to_remove_from :
+                                          vport_to_add_to;
+       }
+
+       return 0;
+}
+
+static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+                                  u16 opaque_fid,
+                                  struct qed_filter_ucast *p_filter_cmd,
+                                  enum spq_mode comp_mode,
+                                  struct qed_spq_comp_cb *p_comp_data)
+{
+       struct vport_filter_update_ramrod_data  *p_ramrod       = NULL;
+       struct qed_spq_entry                    *p_ent          = NULL;
+       struct eth_filter_cmd_header            *p_header;
+       int                                     rc;
+
+       rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
+                                    &p_ramrod, &p_ent,
+                                    comp_mode, p_comp_data);
+       if (rc != 0) {
+               DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
+               return rc;
+       }
+       p_header = &p_ramrod->filter_cmd_hdr;
+       p_header->assert_on_error = p_filter_cmd->assert_on_error;
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+       if (rc != 0) {
+               DP_ERR(p_hwfn,
+                      "Unicast filter ADD command failed %d\n",
+                      rc);
+               return rc;
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
+                  (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
+                  ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
+                  "REMOVE" :
+                  ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
+                   "MOVE" : "REPLACE")),
+                  (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
+                  ((p_filter_cmd->type == QED_FILTER_VLAN) ?
+                   "VLAN" : "MAC & VLAN"),
+                  p_ramrod->filter_cmd_hdr.cmd_cnt,
+                  p_filter_cmd->is_rx_filter,
+                  p_filter_cmd->is_tx_filter);
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
+                  p_filter_cmd->vport_to_add_to,
+                  p_filter_cmd->vport_to_remove_from,
+                  p_filter_cmd->mac[0],
+                  p_filter_cmd->mac[1],
+                  p_filter_cmd->mac[2],
+                  p_filter_cmd->mac[3],
+                  p_filter_cmd->mac[4],
+                  p_filter_cmd->mac[5],
+                  p_filter_cmd->vlan);
+
+       return 0;
+}
+
+/*******************************************************************************
+ * Description:
+ *         Calculates crc 32 on a buffer
+ *         Note: crc32_length MUST be aligned to 8
+ * Return:
+ ******************************************************************************/
+static u32 qed_calc_crc32c(u8 *crc32_packet,
+                          u32 crc32_length,
+                          u32 crc32_seed,
+                          u8 complement)
+{
+       u32 byte = 0;
+       u32 bit = 0;
+       u8 msb = 0;
+       u8 current_byte = 0;
+       u32 crc32_result = crc32_seed;
+
+       if ((!crc32_packet) ||
+           (crc32_length == 0) ||
+           ((crc32_length % 8) != 0))
+               return crc32_result;
+       for (byte = 0; byte < crc32_length; byte++) {
+               current_byte = crc32_packet[byte];
+               for (bit = 0; bit < 8; bit++) {
+                       msb = (u8)(crc32_result >> 31);
+                       crc32_result = crc32_result << 1;
+                       if (msb != (0x1 & (current_byte >> bit))) {
+                               crc32_result = crc32_result ^ CRC32_POLY;
+                               crc32_result |= 1; /*crc32_result[0] = 1;*/
+                       }
+               }
+       }
+       return crc32_result;
+}
+
+static inline u32 qed_crc32c_le(u32 seed,
+                               u8 *mac,
+                               u32 len)
+{
+       u32 packet_buf[2] = { 0 };
+
+       memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
+       return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
+}
+
+static u8 qed_mcast_bin_from_mac(u8 *mac)
+{
+       u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
+                               mac, ETH_ALEN);
+
+       return crc & 0xff;
+}
+
+static int
+qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
+                       u16 opaque_fid,
+                       struct qed_filter_mcast *p_filter_cmd,
+                       enum spq_mode comp_mode,
+                       struct qed_spq_comp_cb *p_comp_data)
+{
+       unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+       struct vport_update_ramrod_data *p_ramrod = NULL;
+       struct qed_sp_init_request_params sp_params;
+       struct qed_spq_entry *p_ent = NULL;
+       u8 abs_vport_id = 0;
+       int rc, i;
+
+       if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+               rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+                                 &abs_vport_id);
+               if (rc)
+                       return rc;
+       } else {
+               rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+                                 &abs_vport_id);
+               if (rc)
+                       return rc;
+       }
+
+       memset(&sp_params, 0, sizeof(sp_params));
+       sp_params.ramrod_data_size = sizeof(*p_ramrod);
+       sp_params.comp_mode = comp_mode;
+       sp_params.p_comp_data = p_comp_data;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                qed_spq_get_cid(p_hwfn),
+                                p_hwfn->hw_info.opaque_fid,
+                                ETH_RAMROD_VPORT_UPDATE,
+                                PROTOCOLID_ETH,
+                                &sp_params);
+
+       if (rc) {
+               DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
+               return rc;
+       }
+
+       p_ramrod = &p_ent->ramrod.vport_update;
+       p_ramrod->common.update_approx_mcast_flg = 1;
+
+       /* explicitly clear out the entire vector */
+       memset(&p_ramrod->approx_mcast.bins, 0,
+              sizeof(p_ramrod->approx_mcast.bins));
+       memset(bins, 0, sizeof(unsigned long) *
+              ETH_MULTICAST_MAC_BINS_IN_REGS);
+       /* filter ADD op is explicit set op and it removes
+        *  any existing filters for the vport
+        */
+       if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+               for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+                       u32 bit;
+
+                       bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+                       __set_bit(bit, bins);
+               }
+
+               /* Convert to correct endianity */
+               for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+                       u32 *p_bins = (u32 *)bins;
+                       struct vport_update_ramrod_mcast *approx_mcast;
+
+                       approx_mcast = &p_ramrod->approx_mcast;
+                       approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
+               }
+       }
+
+       p_ramrod->common.vport_id = abs_vport_id;
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_filter_mcast_cmd(struct qed_dev *cdev,
+                    struct qed_filter_mcast *p_filter_cmd,
+                    enum spq_mode comp_mode,
+                    struct qed_spq_comp_cb *p_comp_data)
+{
+       int rc = 0;
+       int i;
+
+       /* only ADD and REMOVE operations are supported for multi-cast */
+       if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
+            (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
+           (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
+               return -EINVAL;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               u16 opaque_fid;
+
+               if (rc != 0)
+                       break;
+
+               opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+               rc = qed_sp_eth_filter_mcast(p_hwfn,
+                                            opaque_fid,
+                                            p_filter_cmd,
+                                            comp_mode,
+                                            p_comp_data);
+       }
+       return rc;
+}
+
+static int qed_filter_ucast_cmd(struct qed_dev *cdev,
+                               struct qed_filter_ucast *p_filter_cmd,
+                               enum spq_mode comp_mode,
+                               struct qed_spq_comp_cb *p_comp_data)
+{
+       int rc = 0;
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               u16 opaque_fid;
+
+               if (rc != 0)
+                       break;
+
+               opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+               rc = qed_sp_eth_filter_ucast(p_hwfn,
+                                            opaque_fid,
+                                            p_filter_cmd,
+                                            comp_mode,
+                                            p_comp_data);
+       }
+
+       return rc;
+}
+
+static int qed_fill_eth_dev_info(struct qed_dev *cdev,
+                                struct qed_dev_eth_info *info)
+{
+       int i;
+
+       memset(info, 0, sizeof(*info));
+
+       info->num_tc = 1;
+
+       if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+               for_each_hwfn(cdev, i)
+                       info->num_queues += FEAT_NUM(&cdev->hwfns[i],
+                                                    QED_PF_L2_QUE);
+               if (cdev->int_params.fp_msix_cnt)
+                       info->num_queues = min_t(u8, info->num_queues,
+                                                cdev->int_params.fp_msix_cnt);
+       } else {
+               info->num_queues = cdev->num_hwfns;
+       }
+
+       info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
+       ether_addr_copy(info->port_mac,
+                       cdev->hwfns[0].hw_info.hw_mac_addr);
+
+       qed_fill_dev_info(cdev, &info->common);
+
+       return 0;
+}
+
+static void qed_register_eth_ops(struct qed_dev *cdev,
+                                struct qed_eth_cb_ops *ops,
+                                void *cookie)
+{
+       cdev->protocol_ops.eth  = ops;
+       cdev->ops_cookie        = cookie;
+}
+
+static int qed_start_vport(struct qed_dev *cdev,
+                          u8 vport_id,
+                          u16 mtu,
+                          u8 drop_ttl0_flg,
+                          u8 inner_vlan_removal_en_flg)
+{
+       int rc, i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               rc = qed_sp_vport_start(p_hwfn,
+                                       p_hwfn->hw_info.concrete_fid,
+                                       p_hwfn->hw_info.opaque_fid,
+                                       vport_id,
+                                       mtu,
+                                       drop_ttl0_flg,
+                                       inner_vlan_removal_en_flg);
+
+               if (rc) {
+                       DP_ERR(cdev, "Failed to start VPORT\n");
+                       return rc;
+               }
+
+               qed_hw_start_fastpath(p_hwfn);
+
+               DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+                          "Started V-PORT %d with MTU %d\n",
+                          vport_id, mtu);
+       }
+
+       qed_reset_vport_stats(cdev);
+
+       return 0;
+}
+
+static int qed_stop_vport(struct qed_dev *cdev,
+                         u8 vport_id)
+{
+       int rc, i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               rc = qed_sp_vport_stop(p_hwfn,
+                                      p_hwfn->hw_info.opaque_fid,
+                                      vport_id);
+
+               if (rc) {
+                       DP_ERR(cdev, "Failed to stop VPORT\n");
+                       return rc;
+               }
+       }
+       return 0;
+}
+
+static int qed_update_vport(struct qed_dev *cdev,
+                           struct qed_update_vport_params *params)
+{
+       struct qed_sp_vport_update_params sp_params;
+       struct qed_rss_params sp_rss_params;
+       int rc, i;
+
+       if (!cdev)
+               return -ENODEV;
+
+       memset(&sp_params, 0, sizeof(sp_params));
+       memset(&sp_rss_params, 0, sizeof(sp_rss_params));
+
+       /* Translate protocol params into sp params */
+       sp_params.vport_id = params->vport_id;
+       sp_params.update_vport_active_rx_flg =
+               params->update_vport_active_flg;
+       sp_params.update_vport_active_tx_flg =
+               params->update_vport_active_flg;
+       sp_params.vport_active_rx_flg = params->vport_active_flg;
+       sp_params.vport_active_tx_flg = params->vport_active_flg;
+
+       /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
+        * We need to re-fix the rss values per engine for CMT.
+        */
+       if (cdev->num_hwfns > 1 && params->update_rss_flg) {
+               struct qed_update_vport_rss_params *rss =
+                       &params->rss_params;
+               int k, max = 0;
+
+               /* Find largest entry, since it's possible RSS needs to
+                * be disabled [in case only 1 queue per-hwfn]
+                */
+               for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
+                       max = (max > rss->rss_ind_table[k]) ?
+                               max : rss->rss_ind_table[k];
+
+               /* Either fix RSS values or disable RSS */
+               if (cdev->num_hwfns < max + 1) {
+                       int divisor = (max + cdev->num_hwfns - 1) /
+                               cdev->num_hwfns;
+
+                       DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+                                  "CMT - fixing RSS values (modulo %02x)\n",
+                                  divisor);
+
+                       for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
+                               rss->rss_ind_table[k] =
+                                       rss->rss_ind_table[k] % divisor;
+               } else {
+                       DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+                                  "CMT - 1 queue per-hwfn; Disabling RSS\n");
+                       params->update_rss_flg = 0;
+               }
+       }
+
+       /* Now, update the RSS configuration for actual configuration */
+       if (params->update_rss_flg) {
+               sp_rss_params.update_rss_config = 1;
+               sp_rss_params.rss_enable = 1;
+               sp_rss_params.update_rss_capabilities = 1;
+               sp_rss_params.update_rss_ind_table = 1;
+               sp_rss_params.update_rss_key = 1;
+               sp_rss_params.rss_caps = QED_RSS_IPV4 |
+                                        QED_RSS_IPV6 |
+                                        QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
+               sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
+               memcpy(sp_rss_params.rss_ind_table,
+                      params->rss_params.rss_ind_table,
+                      QED_RSS_IND_TABLE_SIZE * sizeof(u16));
+               memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
+                      QED_RSS_KEY_SIZE * sizeof(u32));
+       }
+       sp_params.rss_params = &sp_rss_params;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+               rc = qed_sp_vport_update(p_hwfn, &sp_params,
+                                        QED_SPQ_MODE_EBLOCK,
+                                        NULL);
+               if (rc) {
+                       DP_ERR(cdev, "Failed to update VPORT\n");
+                       return rc;
+               }
+
+               DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+                          "Updated V-PORT %d: active_flag %d [update %d]\n",
+                          params->vport_id, params->vport_active_flg,
+                          params->update_vport_active_flg);
+       }
+
+       return 0;
+}
+
+static int qed_start_rxq(struct qed_dev *cdev,
+                        struct qed_queue_start_common_params *params,
+                        u16 bd_max_bytes,
+                        dma_addr_t bd_chain_phys_addr,
+                        dma_addr_t cqe_pbl_addr,
+                        u16 cqe_pbl_size,
+                        void __iomem **pp_prod)
+{
+       int rc, hwfn_index;
+       struct qed_hwfn *p_hwfn;
+
+       hwfn_index = params->rss_id % cdev->num_hwfns;
+       p_hwfn = &cdev->hwfns[hwfn_index];
+
+       /* Fix queue ID in 100g mode */
+       params->queue_id /= cdev->num_hwfns;
+
+       rc = qed_sp_eth_rx_queue_start(p_hwfn,
+                                      p_hwfn->hw_info.opaque_fid,
+                                      params,
+                                      bd_max_bytes,
+                                      bd_chain_phys_addr,
+                                      cqe_pbl_addr,
+                                      cqe_pbl_size,
+                                      pp_prod);
+
+       if (rc) {
+               DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id);
+               return rc;
+       }
+
+       DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+                  "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
+                  params->queue_id, params->rss_id, params->vport_id,
+                  params->sb);
+
+       return 0;
+}
+
+static int qed_stop_rxq(struct qed_dev *cdev,
+                       struct qed_stop_rxq_params *params)
+{
+       int rc, hwfn_index;
+       struct qed_hwfn *p_hwfn;
+
+       hwfn_index      = params->rss_id % cdev->num_hwfns;
+       p_hwfn          = &cdev->hwfns[hwfn_index];
+
+       rc = qed_sp_eth_rx_queue_stop(p_hwfn,
+                                     params->rx_queue_id / cdev->num_hwfns,
+                                     params->eq_completion_only,
+                                     false);
+       if (rc) {
+               DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
+               return rc;
+       }
+
+       return 0;
+}
+
+static int qed_start_txq(struct qed_dev *cdev,
+                        struct qed_queue_start_common_params *p_params,
+                        dma_addr_t pbl_addr,
+                        u16 pbl_size,
+                        void __iomem **pp_doorbell)
+{
+       struct qed_hwfn *p_hwfn;
+       int rc, hwfn_index;
+
+       hwfn_index      = p_params->rss_id % cdev->num_hwfns;
+       p_hwfn          = &cdev->hwfns[hwfn_index];
+
+       /* Fix queue ID in 100g mode */
+       p_params->queue_id /= cdev->num_hwfns;
+
+       rc = qed_sp_eth_tx_queue_start(p_hwfn,
+                                      p_hwfn->hw_info.opaque_fid,
+                                      p_params,
+                                      pbl_addr,
+                                      pbl_size,
+                                      pp_doorbell);
+
+       if (rc) {
+               DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
+               return rc;
+       }
+
+       DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+                  "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
+                  p_params->queue_id, p_params->rss_id, p_params->vport_id,
+                  p_params->sb);
+
+       return 0;
+}
+
+#define QED_HW_STOP_RETRY_LIMIT (10)
+static int qed_fastpath_stop(struct qed_dev *cdev)
+{
+       qed_hw_stop_fastpath(cdev);
+
+       return 0;
+}
+
+static int qed_stop_txq(struct qed_dev *cdev,
+                       struct qed_stop_txq_params *params)
+{
+       struct qed_hwfn *p_hwfn;
+       int rc, hwfn_index;
+
+       hwfn_index      = params->rss_id % cdev->num_hwfns;
+       p_hwfn          = &cdev->hwfns[hwfn_index];
+
+       rc = qed_sp_eth_tx_queue_stop(p_hwfn,
+                                     params->tx_queue_id / cdev->num_hwfns);
+       if (rc) {
+               DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
+               return rc;
+       }
+
+       return 0;
+}
+
+static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
+                                       enum qed_filter_rx_mode_type type)
+{
+       struct qed_filter_accept_flags accept_flags;
+
+       memset(&accept_flags, 0, sizeof(accept_flags));
+
+       accept_flags.update_rx_mode_config      = 1;
+       accept_flags.update_tx_mode_config      = 1;
+       accept_flags.rx_accept_filter           = QED_ACCEPT_UCAST_MATCHED |
+                                                 QED_ACCEPT_MCAST_MATCHED |
+                                                 QED_ACCEPT_BCAST;
+       accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+                                       QED_ACCEPT_MCAST_MATCHED |
+                                       QED_ACCEPT_BCAST;
+
+       if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
+               accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
+                                                QED_ACCEPT_MCAST_UNMATCHED;
+       else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
+               accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+
+       return qed_filter_accept_cmd(cdev, 0, accept_flags,
+                                    QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter_ucast(struct qed_dev *cdev,
+                                     struct qed_filter_ucast_params *params)
+{
+       struct qed_filter_ucast ucast;
+
+       if (!params->vlan_valid && !params->mac_valid) {
+               DP_NOTICE(
+                       cdev,
+                       "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
+               return -EINVAL;
+       }
+
+       memset(&ucast, 0, sizeof(ucast));
+       switch (params->type) {
+       case QED_FILTER_XCAST_TYPE_ADD:
+               ucast.opcode = QED_FILTER_ADD;
+               break;
+       case QED_FILTER_XCAST_TYPE_DEL:
+               ucast.opcode = QED_FILTER_REMOVE;
+               break;
+       case QED_FILTER_XCAST_TYPE_REPLACE:
+               ucast.opcode = QED_FILTER_REPLACE;
+               break;
+       default:
+               DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
+                         params->type);
+       }
+
+       if (params->vlan_valid && params->mac_valid) {
+               ucast.type = QED_FILTER_MAC_VLAN;
+               ether_addr_copy(ucast.mac, params->mac);
+               ucast.vlan = params->vlan;
+       } else if (params->mac_valid) {
+               ucast.type = QED_FILTER_MAC;
+               ether_addr_copy(ucast.mac, params->mac);
+       } else {
+               ucast.type = QED_FILTER_VLAN;
+               ucast.vlan = params->vlan;
+       }
+
+       ucast.is_rx_filter = true;
+       ucast.is_tx_filter = true;
+
+       return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter_mcast(struct qed_dev *cdev,
+                                     struct qed_filter_mcast_params *params)
+{
+       struct qed_filter_mcast mcast;
+       int i;
+
+       memset(&mcast, 0, sizeof(mcast));
+       switch (params->type) {
+       case QED_FILTER_XCAST_TYPE_ADD:
+               mcast.opcode = QED_FILTER_ADD;
+               break;
+       case QED_FILTER_XCAST_TYPE_DEL:
+               mcast.opcode = QED_FILTER_REMOVE;
+               break;
+       default:
+               DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
+                         params->type);
+       }
+
+       mcast.num_mc_addrs = params->num;
+       for (i = 0; i < mcast.num_mc_addrs; i++)
+               ether_addr_copy(mcast.mac[i], params->mac[i]);
+
+       return qed_filter_mcast_cmd(cdev, &mcast,
+                                   QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter(struct qed_dev *cdev,
+                               struct qed_filter_params *params)
+{
+       enum qed_filter_rx_mode_type accept_flags;
+
+       switch (params->type) {
+       case QED_FILTER_TYPE_UCAST:
+               return qed_configure_filter_ucast(cdev, &params->filter.ucast);
+       case QED_FILTER_TYPE_MCAST:
+               return qed_configure_filter_mcast(cdev, &params->filter.mcast);
+       case QED_FILTER_TYPE_RX_MODE:
+               accept_flags = params->filter.accept_flags;
+               return qed_configure_filter_rx_mode(cdev, accept_flags);
+       default:
+               DP_NOTICE(cdev, "Unknown filter type %d\n",
+                         (int)params->type);
+               return -EINVAL;
+       }
+}
+
+static int qed_fp_cqe_completion(struct qed_dev *dev,
+                                u8 rss_id,
+                                struct eth_slow_path_rx_cqe *cqe)
+{
+       return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
+                                     cqe);
+}
+
+static const struct qed_eth_ops qed_eth_ops_pass = {
+       .common = &qed_common_ops_pass,
+       .fill_dev_info = &qed_fill_eth_dev_info,
+       .register_ops = &qed_register_eth_ops,
+       .vport_start = &qed_start_vport,
+       .vport_stop = &qed_stop_vport,
+       .vport_update = &qed_update_vport,
+       .q_rx_start = &qed_start_rxq,
+       .q_rx_stop = &qed_stop_rxq,
+       .q_tx_start = &qed_start_txq,
+       .q_tx_stop = &qed_stop_txq,
+       .filter_config = &qed_configure_filter,
+       .fastpath_stop = &qed_fastpath_stop,
+       .eth_cqe_completion = &qed_fp_cqe_completion,
+       .get_vport_stats = &qed_get_vport_stats,
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(u32 version)
+{
+       if (version != QED_ETH_INTERFACE_VERSION) {
+               pr_notice("Cannot supply ethtool operations [%08x != %08x]\n",
+                         version, QED_ETH_INTERFACE_VERSION);
+               return NULL;
+       }
+
+       return &qed_eth_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_eth_ops);
+
+void qed_put_eth_ops(void)
+{
+       /* TODO - reference count for module? */
+}
+EXPORT_SYMBOL(qed_put_eth_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
new file mode 100644 (file)
index 0000000..947c7af
--- /dev/null
@@ -0,0 +1,1169 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/stddef.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/qed/qed_if.h>
+
+#include "qed.h"
+#include "qed_sp.h"
+#include "qed_dev_api.h"
+#include "qed_mcp.h"
+#include "qed_hw.h"
+
+static const char version[] =
+       "QLogic QL4xxx 40G/100G Ethernet Driver qed " DRV_MODULE_VERSION "\n";
+
+MODULE_DESCRIPTION("QLogic 25G/40G/50G/100G Core Module");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define FW_FILE_VERSION                                \
+       __stringify(FW_MAJOR_VERSION) "."       \
+       __stringify(FW_MINOR_VERSION) "."       \
+       __stringify(FW_REVISION_VERSION) "."    \
+       __stringify(FW_ENGINEERING_VERSION)
+
+#define QED_FW_FILE_NAME       \
+       "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
+
+static int __init qed_init(void)
+{
+       pr_notice("qed_init called\n");
+
+       pr_info("%s", version);
+
+       return 0;
+}
+
+static void __exit qed_cleanup(void)
+{
+       pr_notice("qed_cleanup called\n");
+}
+
+module_init(qed_init);
+module_exit(qed_cleanup);
+
+/* Check if the DMA controller on the machine can properly handle the DMA
+ * addressing required by the device.
+*/
+static int qed_set_coherency_mask(struct qed_dev *cdev)
+{
+       struct device *dev = &cdev->pdev->dev;
+
+       if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
+               if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
+                       DP_NOTICE(cdev,
+                                 "Can't request 64-bit consistent allocations\n");
+                       return -EIO;
+               }
+       } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+               DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static void qed_free_pci(struct qed_dev *cdev)
+{
+       struct pci_dev *pdev = cdev->pdev;
+
+       if (cdev->doorbells)
+               iounmap(cdev->doorbells);
+       if (cdev->regview)
+               iounmap(cdev->regview);
+       if (atomic_read(&pdev->enable_cnt) == 1)
+               pci_release_regions(pdev);
+
+       pci_disable_device(pdev);
+}
+
+/* Performs PCI initializations as well as initializing PCI-related parameters
+ * in the device structrue. Returns 0 in case of success.
+ */
+static int qed_init_pci(struct qed_dev *cdev,
+                       struct pci_dev *pdev)
+{
+       int rc;
+
+       cdev->pdev = pdev;
+
+       rc = pci_enable_device(pdev);
+       if (rc) {
+               DP_NOTICE(cdev, "Cannot enable PCI device\n");
+               goto err0;
+       }
+
+       if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+               DP_NOTICE(cdev, "No memory region found in bar #0\n");
+               rc = -EIO;
+               goto err1;
+       }
+
+       if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+               DP_NOTICE(cdev, "No memory region found in bar #2\n");
+               rc = -EIO;
+               goto err1;
+       }
+
+       if (atomic_read(&pdev->enable_cnt) == 1) {
+               rc = pci_request_regions(pdev, "qed");
+               if (rc) {
+                       DP_NOTICE(cdev,
+                                 "Failed to request PCI memory resources\n");
+                       goto err1;
+               }
+               pci_set_master(pdev);
+               pci_save_state(pdev);
+       }
+
+       if (!pci_is_pcie(pdev)) {
+               DP_NOTICE(cdev, "The bus is not PCI Express\n");
+               rc = -EIO;
+               goto err2;
+       }
+
+       cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+       if (cdev->pci_params.pm_cap == 0)
+               DP_NOTICE(cdev, "Cannot find power management capability\n");
+
+       rc = qed_set_coherency_mask(cdev);
+       if (rc)
+               goto err2;
+
+       cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
+       cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
+       cdev->pci_params.irq = pdev->irq;
+
+       cdev->regview = pci_ioremap_bar(pdev, 0);
+       if (!cdev->regview) {
+               DP_NOTICE(cdev, "Cannot map register space, aborting\n");
+               rc = -ENOMEM;
+               goto err2;
+       }
+
+       cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
+       cdev->db_size = pci_resource_len(cdev->pdev, 2);
+       cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
+       if (!cdev->doorbells) {
+               DP_NOTICE(cdev, "Cannot map doorbell space\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+
+err2:
+       pci_release_regions(pdev);
+err1:
+       pci_disable_device(pdev);
+err0:
+       return rc;
+}
+
+int qed_fill_dev_info(struct qed_dev *cdev,
+                     struct qed_dev_info *dev_info)
+{
+       struct qed_ptt  *ptt;
+
+       memset(dev_info, 0, sizeof(struct qed_dev_info));
+
+       dev_info->num_hwfns = cdev->num_hwfns;
+       dev_info->pci_mem_start = cdev->pci_params.mem_start;
+       dev_info->pci_mem_end = cdev->pci_params.mem_end;
+       dev_info->pci_irq = cdev->pci_params.irq;
+       dev_info->is_mf = IS_MF(&cdev->hwfns[0]);
+       ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
+
+       dev_info->fw_major = FW_MAJOR_VERSION;
+       dev_info->fw_minor = FW_MINOR_VERSION;
+       dev_info->fw_rev = FW_REVISION_VERSION;
+       dev_info->fw_eng = FW_ENGINEERING_VERSION;
+       dev_info->mf_mode = cdev->mf_mode;
+
+       qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev);
+
+       ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+       if (ptt) {
+               qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
+                                      &dev_info->flash_size);
+
+               qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
+       }
+
+       return 0;
+}
+
+static void qed_free_cdev(struct qed_dev *cdev)
+{
+       kfree((void *)cdev);
+}
+
+static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
+{
+       struct qed_dev *cdev;
+
+       cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+       if (!cdev)
+               return cdev;
+
+       qed_init_struct(cdev);
+
+       return cdev;
+}
+
+/* Sets the requested power state */
+static int qed_set_power_state(struct qed_dev *cdev,
+                              pci_power_t state)
+{
+       if (!cdev)
+               return -ENODEV;
+
+       DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
+       return 0;
+}
+
+/* probing */
+static struct qed_dev *qed_probe(struct pci_dev *pdev,
+                                enum qed_protocol protocol,
+                                u32 dp_module,
+                                u8 dp_level)
+{
+       struct qed_dev *cdev;
+       int rc;
+
+       cdev = qed_alloc_cdev(pdev);
+       if (!cdev)
+               goto err0;
+
+       cdev->protocol = protocol;
+
+       qed_init_dp(cdev, dp_module, dp_level);
+
+       rc = qed_init_pci(cdev, pdev);
+       if (rc) {
+               DP_ERR(cdev, "init pci failed\n");
+               goto err1;
+       }
+       DP_INFO(cdev, "PCI init completed successfully\n");
+
+       rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
+       if (rc) {
+               DP_ERR(cdev, "hw prepare failed\n");
+               goto err2;
+       }
+
+       DP_INFO(cdev, "qed_probe completed successffuly\n");
+
+       return cdev;
+
+err2:
+       qed_free_pci(cdev);
+err1:
+       qed_free_cdev(cdev);
+err0:
+       return NULL;
+}
+
+static void qed_remove(struct qed_dev *cdev)
+{
+       if (!cdev)
+               return;
+
+       qed_hw_remove(cdev);
+
+       qed_free_pci(cdev);
+
+       qed_set_power_state(cdev, PCI_D3hot);
+
+       qed_free_cdev(cdev);
+}
+
+static void qed_disable_msix(struct qed_dev *cdev)
+{
+       if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+               pci_disable_msix(cdev->pdev);
+               kfree(cdev->int_params.msix_table);
+       } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
+               pci_disable_msi(cdev->pdev);
+       }
+
+       memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
+}
+
+static int qed_enable_msix(struct qed_dev *cdev,
+                          struct qed_int_params *int_params)
+{
+       int i, rc, cnt;
+
+       cnt = int_params->in.num_vectors;
+
+       for (i = 0; i < cnt; i++)
+               int_params->msix_table[i].entry = i;
+
+       rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
+                                  int_params->in.min_msix_cnt, cnt);
+       if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
+           (rc % cdev->num_hwfns)) {
+               pci_disable_msix(cdev->pdev);
+
+               /* If fastpath is initialized, we need at least one interrupt
+                * per hwfn [and the slow path interrupts]. New requested number
+                * should be a multiple of the number of hwfns.
+                */
+               cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
+               DP_NOTICE(cdev,
+                         "Trying to enable MSI-X with less vectors (%d out of %d)\n",
+                         cnt, int_params->in.num_vectors);
+               rc = pci_enable_msix_exact(cdev->pdev,
+                                          int_params->msix_table, cnt);
+               if (!rc)
+                       rc = cnt;
+       }
+
+       if (rc > 0) {
+               /* MSI-x configuration was achieved */
+               int_params->out.int_mode = QED_INT_MODE_MSIX;
+               int_params->out.num_vectors = rc;
+               rc = 0;
+       } else {
+               DP_NOTICE(cdev,
+                         "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
+                         cnt, rc);
+       }
+
+       return rc;
+}
+
+/* This function outputs the int mode and the number of enabled msix vector */
+static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
+{
+       struct qed_int_params *int_params = &cdev->int_params;
+       struct msix_entry *tbl;
+       int rc = 0, cnt;
+
+       switch (int_params->in.int_mode) {
+       case QED_INT_MODE_MSIX:
+               /* Allocate MSIX table */
+               cnt = int_params->in.num_vectors;
+               int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
+               if (!int_params->msix_table) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+
+               /* Enable MSIX */
+               rc = qed_enable_msix(cdev, int_params);
+               if (!rc)
+                       goto out;
+
+               DP_NOTICE(cdev, "Failed to enable MSI-X\n");
+               kfree(int_params->msix_table);
+               if (force_mode)
+                       goto out;
+               /* Fallthrough */
+
+       case QED_INT_MODE_MSI:
+               rc = pci_enable_msi(cdev->pdev);
+               if (!rc) {
+                       int_params->out.int_mode = QED_INT_MODE_MSI;
+                       goto out;
+               }
+
+               DP_NOTICE(cdev, "Failed to enable MSI\n");
+               if (force_mode)
+                       goto out;
+               /* Fallthrough */
+
+       case QED_INT_MODE_INTA:
+                       int_params->out.int_mode = QED_INT_MODE_INTA;
+                       rc = 0;
+                       goto out;
+       default:
+               DP_NOTICE(cdev, "Unknown int_mode value %d\n",
+                         int_params->in.int_mode);
+               rc = -EINVAL;
+       }
+
+out:
+       cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
+
+       return rc;
+}
+
+static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
+                                   int index, void(*handler)(void *))
+{
+       struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
+       int relative_idx = index / cdev->num_hwfns;
+
+       hwfn->simd_proto_handler[relative_idx].func = handler;
+       hwfn->simd_proto_handler[relative_idx].token = token;
+}
+
+static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
+{
+       struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
+       int relative_idx = index / cdev->num_hwfns;
+
+       memset(&hwfn->simd_proto_handler[relative_idx], 0,
+              sizeof(struct qed_simd_fp_handler));
+}
+
+static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
+{
+       tasklet_schedule((struct tasklet_struct *)tasklet);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t qed_single_int(int irq, void *dev_instance)
+{
+       struct qed_dev *cdev = (struct qed_dev *)dev_instance;
+       struct qed_hwfn *hwfn;
+       irqreturn_t rc = IRQ_NONE;
+       u64 status;
+       int i, j;
+
+       for (i = 0; i < cdev->num_hwfns; i++) {
+               status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
+
+               if (!status)
+                       continue;
+
+               hwfn = &cdev->hwfns[i];
+
+               /* Slowpath interrupt */
+               if (unlikely(status & 0x1)) {
+                       tasklet_schedule(hwfn->sp_dpc);
+                       status &= ~0x1;
+                       rc = IRQ_HANDLED;
+               }
+
+               /* Fastpath interrupts */
+               for (j = 0; j < 64; j++) {
+                       if ((0x2ULL << j) & status) {
+                               hwfn->simd_proto_handler[j].func(
+                                       hwfn->simd_proto_handler[j].token);
+                               status &= ~(0x2ULL << j);
+                               rc = IRQ_HANDLED;
+                       }
+               }
+
+               if (unlikely(status))
+                       DP_VERBOSE(hwfn, NETIF_MSG_INTR,
+                                  "got an unknown interrupt status 0x%llx\n",
+                                  status);
+       }
+
+       return rc;
+}
+
+static int qed_slowpath_irq_req(struct qed_dev *cdev)
+{
+       int i = 0, rc = 0;
+
+       if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+               /* Request all the slowpath MSI-X vectors */
+               for (i = 0; i < cdev->num_hwfns; i++) {
+                       snprintf(cdev->hwfns[i].name, NAME_SIZE,
+                                "sp-%d-%02x:%02x.%02x",
+                                i, cdev->pdev->bus->number,
+                                PCI_SLOT(cdev->pdev->devfn),
+                                cdev->hwfns[i].abs_pf_id);
+
+                       rc = request_irq(cdev->int_params.msix_table[i].vector,
+                                        qed_msix_sp_int, 0,
+                                        cdev->hwfns[i].name,
+                                        cdev->hwfns[i].sp_dpc);
+                       if (rc)
+                               break;
+
+                       DP_VERBOSE(&cdev->hwfns[i],
+                                  (NETIF_MSG_INTR | QED_MSG_SP),
+                                  "Requested slowpath MSI-X\n");
+               }
+
+               if (i != cdev->num_hwfns) {
+                       /* Free already request MSI-X vectors */
+                       for (i--; i >= 0; i--) {
+                               unsigned int vec =
+                                       cdev->int_params.msix_table[i].vector;
+                               synchronize_irq(vec);
+                               free_irq(cdev->int_params.msix_table[i].vector,
+                                        cdev->hwfns[i].sp_dpc);
+                       }
+               }
+       } else {
+               unsigned long flags = 0;
+
+               snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
+                        cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
+                        PCI_FUNC(cdev->pdev->devfn));
+
+               if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
+                       flags |= IRQF_SHARED;
+
+               rc = request_irq(cdev->pdev->irq, qed_single_int,
+                                flags, cdev->name, cdev);
+       }
+
+       return rc;
+}
+
+static void qed_slowpath_irq_free(struct qed_dev *cdev)
+{
+       int i;
+
+       if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+               for_each_hwfn(cdev, i) {
+                       synchronize_irq(cdev->int_params.msix_table[i].vector);
+                       free_irq(cdev->int_params.msix_table[i].vector,
+                                cdev->hwfns[i].sp_dpc);
+               }
+       } else {
+               free_irq(cdev->pdev->irq, cdev);
+       }
+}
+
+static int qed_nic_stop(struct qed_dev *cdev)
+{
+       int i, rc;
+
+       rc = qed_hw_stop(cdev);
+
+       for (i = 0; i < cdev->num_hwfns; i++) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               if (p_hwfn->b_sp_dpc_enabled) {
+                       tasklet_disable(p_hwfn->sp_dpc);
+                       p_hwfn->b_sp_dpc_enabled = false;
+                       DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
+                                  "Disabled sp taskelt [hwfn %d] at %p\n",
+                                  i, p_hwfn->sp_dpc);
+               }
+       }
+
+       return rc;
+}
+
+static int qed_nic_reset(struct qed_dev *cdev)
+{
+       int rc;
+
+       rc = qed_hw_reset(cdev);
+       if (rc)
+               return rc;
+
+       qed_resc_free(cdev);
+
+       return 0;
+}
+
+static int qed_nic_setup(struct qed_dev *cdev)
+{
+       int rc;
+
+       rc = qed_resc_alloc(cdev);
+       if (rc)
+               return rc;
+
+       DP_INFO(cdev, "Allocated qed resources\n");
+
+       qed_resc_setup(cdev);
+
+       return rc;
+}
+
+static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
+{
+       int limit = 0;
+
+       /* Mark the fastpath as free/used */
+       cdev->int_params.fp_initialized = cnt ? true : false;
+
+       if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
+               limit = cdev->num_hwfns * 63;
+       else if (cdev->int_params.fp_msix_cnt)
+               limit = cdev->int_params.fp_msix_cnt;
+
+       if (!limit)
+               return -ENOMEM;
+
+       return min_t(int, cnt, limit);
+}
+
+static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
+{
+       memset(info, 0, sizeof(struct qed_int_info));
+
+       if (!cdev->int_params.fp_initialized) {
+               DP_INFO(cdev,
+                       "Protocol driver requested interrupt information, but its support is not yet configured\n");
+               return -EINVAL;
+       }
+
+       /* Need to expose only MSI-X information; Single IRQ is handled solely
+        * by qed.
+        */
+       if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+               int msix_base = cdev->int_params.fp_msix_base;
+
+               info->msix_cnt = cdev->int_params.fp_msix_cnt;
+               info->msix = &cdev->int_params.msix_table[msix_base];
+       }
+
+       return 0;
+}
+
+static int qed_slowpath_setup_int(struct qed_dev *cdev,
+                                 enum qed_int_mode int_mode)
+{
+       int rc, i;
+       u8 num_vectors = 0;
+
+       memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
+
+       cdev->int_params.in.int_mode = int_mode;
+       for_each_hwfn(cdev, i)
+               num_vectors +=  qed_int_get_num_sbs(&cdev->hwfns[i], NULL) + 1;
+       cdev->int_params.in.num_vectors = num_vectors;
+
+       /* We want a minimum of one slowpath and one fastpath vector per hwfn */
+       cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
+
+       rc = qed_set_int_mode(cdev, false);
+       if (rc)  {
+               DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
+               return rc;
+       }
+
+       cdev->int_params.fp_msix_base = cdev->num_hwfns;
+       cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
+                                      cdev->num_hwfns;
+
+       return 0;
+}
+
+u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
+                  u8 *input_buf, u32 max_size, u8 *unzip_buf)
+{
+       int rc;
+
+       p_hwfn->stream->next_in = input_buf;
+       p_hwfn->stream->avail_in = input_len;
+       p_hwfn->stream->next_out = unzip_buf;
+       p_hwfn->stream->avail_out = max_size;
+
+       rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
+
+       if (rc != Z_OK) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
+                          rc);
+               return 0;
+       }
+
+       rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
+       zlib_inflateEnd(p_hwfn->stream);
+
+       if (rc != Z_OK && rc != Z_STREAM_END) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
+                          p_hwfn->stream->msg, rc);
+               return 0;
+       }
+
+       return p_hwfn->stream->total_out / 4;
+}
+
+static int qed_alloc_stream_mem(struct qed_dev *cdev)
+{
+       int i;
+       void *workspace;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
+               if (!p_hwfn->stream)
+                       return -ENOMEM;
+
+               workspace = vzalloc(zlib_inflate_workspacesize());
+               if (!workspace)
+                       return -ENOMEM;
+               p_hwfn->stream->workspace = workspace;
+       }
+
+       return 0;
+}
+
+static void qed_free_stream_mem(struct qed_dev *cdev)
+{
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               if (!p_hwfn->stream)
+                       return;
+
+               vfree(p_hwfn->stream->workspace);
+               kfree(p_hwfn->stream);
+       }
+}
+
+static void qed_update_pf_params(struct qed_dev *cdev,
+                                struct qed_pf_params *params)
+{
+       int i;
+
+       for (i = 0; i < cdev->num_hwfns; i++) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               p_hwfn->pf_params = *params;
+       }
+}
+
+static int qed_slowpath_start(struct qed_dev *cdev,
+                             struct qed_slowpath_params *params)
+{
+       struct qed_mcp_drv_version drv_version;
+       const u8 *data = NULL;
+       struct qed_hwfn *hwfn;
+       int rc;
+
+       rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
+                             &cdev->pdev->dev);
+       if (rc) {
+               DP_NOTICE(cdev,
+                         "Failed to find fw file - /lib/firmware/%s\n",
+                         QED_FW_FILE_NAME);
+               goto err;
+       }
+
+       rc = qed_nic_setup(cdev);
+       if (rc)
+               goto err;
+
+       rc = qed_slowpath_setup_int(cdev, params->int_mode);
+       if (rc)
+               goto err1;
+
+       /* Request the slowpath IRQ */
+       rc = qed_slowpath_irq_req(cdev);
+       if (rc)
+               goto err2;
+
+       /* Allocate stream for unzipping */
+       rc = qed_alloc_stream_mem(cdev);
+       if (rc) {
+               DP_NOTICE(cdev, "Failed to allocate stream memory\n");
+               goto err3;
+       }
+
+       /* Start the slowpath */
+       data = cdev->firmware->data;
+
+       rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode,
+                        true, data);
+       if (rc)
+               goto err3;
+
+       DP_INFO(cdev,
+               "HW initialization and function start completed successfully\n");
+
+       hwfn = QED_LEADING_HWFN(cdev);
+       drv_version.version = (params->drv_major << 24) |
+                             (params->drv_minor << 16) |
+                             (params->drv_rev << 8) |
+                             (params->drv_eng);
+       strlcpy(drv_version.name, params->name,
+               MCP_DRV_VER_STR_SIZE - 4);
+       rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
+                                     &drv_version);
+       if (rc) {
+               DP_NOTICE(cdev, "Failed sending drv version command\n");
+               return rc;
+       }
+
+       return 0;
+
+err3:
+       qed_free_stream_mem(cdev);
+       qed_slowpath_irq_free(cdev);
+err2:
+       qed_disable_msix(cdev);
+err1:
+       qed_resc_free(cdev);
+err:
+       release_firmware(cdev->firmware);
+
+       return rc;
+}
+
+static int qed_slowpath_stop(struct qed_dev *cdev)
+{
+       if (!cdev)
+               return -ENODEV;
+
+       qed_free_stream_mem(cdev);
+
+       qed_nic_stop(cdev);
+       qed_slowpath_irq_free(cdev);
+
+       qed_disable_msix(cdev);
+       qed_nic_reset(cdev);
+
+       release_firmware(cdev->firmware);
+
+       return 0;
+}
+
+static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
+                      char ver_str[VER_SIZE])
+{
+       int i;
+
+       memcpy(cdev->name, name, NAME_SIZE);
+       for_each_hwfn(cdev, i)
+               snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
+
+       memcpy(cdev->ver_str, ver_str, VER_SIZE);
+       cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
+}
+
+static u32 qed_sb_init(struct qed_dev *cdev,
+                      struct qed_sb_info *sb_info,
+                      void *sb_virt_addr,
+                      dma_addr_t sb_phy_addr, u16 sb_id,
+                      enum qed_sb_type type)
+{
+       struct qed_hwfn *p_hwfn;
+       int hwfn_index;
+       u16 rel_sb_id;
+       u8 n_hwfns;
+       u32 rc;
+
+       /* RoCE uses single engine and CMT uses two engines. When using both
+        * we force only a single engine. Storage uses only engine 0 too.
+        */
+       if (type == QED_SB_TYPE_L2_QUEUE)
+               n_hwfns = cdev->num_hwfns;
+       else
+               n_hwfns = 1;
+
+       hwfn_index = sb_id % n_hwfns;
+       p_hwfn = &cdev->hwfns[hwfn_index];
+       rel_sb_id = sb_id / n_hwfns;
+
+       DP_VERBOSE(cdev, NETIF_MSG_INTR,
+                  "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+                  hwfn_index, rel_sb_id, sb_id);
+
+       rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
+                            sb_virt_addr, sb_phy_addr, rel_sb_id);
+
+       return rc;
+}
+
+static u32 qed_sb_release(struct qed_dev *cdev,
+                         struct qed_sb_info *sb_info,
+                         u16 sb_id)
+{
+       struct qed_hwfn *p_hwfn;
+       int hwfn_index;
+       u16 rel_sb_id;
+       u32 rc;
+
+       hwfn_index = sb_id % cdev->num_hwfns;
+       p_hwfn = &cdev->hwfns[hwfn_index];
+       rel_sb_id = sb_id / cdev->num_hwfns;
+
+       DP_VERBOSE(cdev, NETIF_MSG_INTR,
+                  "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+                  hwfn_index, rel_sb_id, sb_id);
+
+       rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
+
+       return rc;
+}
+
+static int qed_set_link(struct qed_dev *cdev,
+                       struct qed_link_params *params)
+{
+       struct qed_hwfn *hwfn;
+       struct qed_mcp_link_params *link_params;
+       struct qed_ptt *ptt;
+       int rc;
+
+       if (!cdev)
+               return -ENODEV;
+
+       /* The link should be set only once per PF */
+       hwfn = &cdev->hwfns[0];
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return -EBUSY;
+
+       link_params = qed_mcp_get_link_params(hwfn);
+       if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
+               link_params->speed.autoneg = params->autoneg;
+       if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
+               link_params->speed.advertised_speeds = 0;
+               if ((params->adv_speeds & SUPPORTED_1000baseT_Half) ||
+                   (params->adv_speeds & SUPPORTED_1000baseT_Full))
+                       link_params->speed.advertised_speeds |=
+                               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+               if (params->adv_speeds & SUPPORTED_10000baseKR_Full)
+                       link_params->speed.advertised_speeds |=
+                               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+               if (params->adv_speeds & SUPPORTED_40000baseLR4_Full)
+                       link_params->speed.advertised_speeds |=
+                               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+               if (params->adv_speeds & 0)
+                       link_params->speed.advertised_speeds |=
+                               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
+               if (params->adv_speeds & 0)
+                       link_params->speed.advertised_speeds |=
+                               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G;
+       }
+       if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
+               link_params->speed.forced_speed = params->forced_speed;
+
+       rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
+
+       qed_ptt_release(hwfn, ptt);
+
+       return rc;
+}
+
+static int qed_get_port_type(u32 media_type)
+{
+       int port_type;
+
+       switch (media_type) {
+       case MEDIA_SFPP_10G_FIBER:
+       case MEDIA_SFP_1G_FIBER:
+       case MEDIA_XFP_FIBER:
+       case MEDIA_KR:
+               port_type = PORT_FIBRE;
+               break;
+       case MEDIA_DA_TWINAX:
+               port_type = PORT_DA;
+               break;
+       case MEDIA_BASE_T:
+               port_type = PORT_TP;
+               break;
+       case MEDIA_NOT_PRESENT:
+               port_type = PORT_NONE;
+               break;
+       case MEDIA_UNSPECIFIED:
+       default:
+               port_type = PORT_OTHER;
+               break;
+       }
+       return port_type;
+}
+
+static void qed_fill_link(struct qed_hwfn *hwfn,
+                         struct qed_link_output *if_link)
+{
+       struct qed_mcp_link_params params;
+       struct qed_mcp_link_state link;
+       struct qed_mcp_link_capabilities link_caps;
+       u32 media_type;
+
+       memset(if_link, 0, sizeof(*if_link));
+
+       /* Prepare source inputs */
+       memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
+       memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
+       memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
+              sizeof(link_caps));
+
+       /* Set the link parameters to pass to protocol driver */
+       if (link.link_up)
+               if_link->link_up = true;
+
+       /* TODO - at the moment assume supported and advertised speed equal */
+       if_link->supported_caps = SUPPORTED_FIBRE;
+       if (params.speed.autoneg)
+               if_link->supported_caps |= SUPPORTED_Autoneg;
+       if (params.pause.autoneg ||
+           (params.pause.forced_rx && params.pause.forced_tx))
+               if_link->supported_caps |= SUPPORTED_Asym_Pause;
+       if (params.pause.autoneg || params.pause.forced_rx ||
+           params.pause.forced_tx)
+               if_link->supported_caps |= SUPPORTED_Pause;
+
+       if_link->advertised_caps = if_link->supported_caps;
+       if (params.speed.advertised_speeds &
+           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+               if_link->advertised_caps |= SUPPORTED_1000baseT_Half |
+                                          SUPPORTED_1000baseT_Full;
+       if (params.speed.advertised_speeds &
+           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+               if_link->advertised_caps |= SUPPORTED_10000baseKR_Full;
+       if (params.speed.advertised_speeds &
+               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+               if_link->advertised_caps |= SUPPORTED_40000baseLR4_Full;
+       if (params.speed.advertised_speeds &
+               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+               if_link->advertised_caps |= 0;
+       if (params.speed.advertised_speeds &
+               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
+               if_link->advertised_caps |= 0;
+
+       if (link_caps.speed_capabilities &
+           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+               if_link->supported_caps |= SUPPORTED_1000baseT_Half |
+                                          SUPPORTED_1000baseT_Full;
+       if (link_caps.speed_capabilities &
+           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+               if_link->supported_caps |= SUPPORTED_10000baseKR_Full;
+       if (link_caps.speed_capabilities &
+               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+               if_link->supported_caps |= SUPPORTED_40000baseLR4_Full;
+       if (link_caps.speed_capabilities &
+               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+               if_link->supported_caps |= 0;
+       if (link_caps.speed_capabilities &
+               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
+               if_link->supported_caps |= 0;
+
+       if (link.link_up)
+               if_link->speed = link.speed;
+
+       /* TODO - fill duplex properly */
+       if_link->duplex = DUPLEX_FULL;
+       qed_mcp_get_media_type(hwfn->cdev, &media_type);
+       if_link->port = qed_get_port_type(media_type);
+
+       if_link->autoneg = params.speed.autoneg;
+
+       if (params.pause.autoneg)
+               if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+       if (params.pause.forced_rx)
+               if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+       if (params.pause.forced_tx)
+               if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+
+       /* Link partner capabilities */
+       if (link.partner_adv_speed &
+           QED_LINK_PARTNER_SPEED_1G_HD)
+               if_link->lp_caps |= SUPPORTED_1000baseT_Half;
+       if (link.partner_adv_speed &
+           QED_LINK_PARTNER_SPEED_1G_FD)
+               if_link->lp_caps |= SUPPORTED_1000baseT_Full;
+       if (link.partner_adv_speed &
+           QED_LINK_PARTNER_SPEED_10G)
+               if_link->lp_caps |= SUPPORTED_10000baseKR_Full;
+       if (link.partner_adv_speed &
+           QED_LINK_PARTNER_SPEED_40G)
+               if_link->lp_caps |= SUPPORTED_40000baseLR4_Full;
+       if (link.partner_adv_speed &
+           QED_LINK_PARTNER_SPEED_50G)
+               if_link->lp_caps |= 0;
+       if (link.partner_adv_speed &
+           QED_LINK_PARTNER_SPEED_100G)
+               if_link->lp_caps |= 0;
+
+       if (link.an_complete)
+               if_link->lp_caps |= SUPPORTED_Autoneg;
+
+       if (link.partner_adv_pause)
+               if_link->lp_caps |= SUPPORTED_Pause;
+       if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
+           link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
+               if_link->lp_caps |= SUPPORTED_Asym_Pause;
+}
+
+static void qed_get_current_link(struct qed_dev *cdev,
+                                struct qed_link_output *if_link)
+{
+       qed_fill_link(&cdev->hwfns[0], if_link);
+}
+
+void qed_link_update(struct qed_hwfn *hwfn)
+{
+       void *cookie = hwfn->cdev->ops_cookie;
+       struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
+       struct qed_link_output if_link;
+
+       qed_fill_link(hwfn, &if_link);
+
+       if (IS_LEAD_HWFN(hwfn) && cookie)
+               op->link_update(cookie, &if_link);
+}
+
+static int qed_drain(struct qed_dev *cdev)
+{
+       struct qed_hwfn *hwfn;
+       struct qed_ptt *ptt;
+       int i, rc;
+
+       for_each_hwfn(cdev, i) {
+               hwfn = &cdev->hwfns[i];
+               ptt = qed_ptt_acquire(hwfn);
+               if (!ptt) {
+                       DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
+                       return -EBUSY;
+               }
+               rc = qed_mcp_drain(hwfn, ptt);
+               if (rc)
+                       return rc;
+               qed_ptt_release(hwfn, ptt);
+       }
+
+       return 0;
+}
+
+const struct qed_common_ops qed_common_ops_pass = {
+       .probe = &qed_probe,
+       .remove = &qed_remove,
+       .set_power_state = &qed_set_power_state,
+       .set_id = &qed_set_id,
+       .update_pf_params = &qed_update_pf_params,
+       .slowpath_start = &qed_slowpath_start,
+       .slowpath_stop = &qed_slowpath_stop,
+       .set_fp_int = &qed_set_int_fp,
+       .get_fp_int = &qed_get_int_fp,
+       .sb_init = &qed_sb_init,
+       .sb_release = &qed_sb_release,
+       .simd_handler_config = &qed_simd_handler_config,
+       .simd_handler_clean = &qed_simd_handler_clean,
+       .set_link = &qed_set_link,
+       .get_link = &qed_get_current_link,
+       .drain = &qed_drain,
+       .update_msglvl = &qed_init_dp,
+       .chain_alloc = &qed_chain_alloc,
+       .chain_free = &qed_chain_free,
+};
+
+u32 qed_get_protocol_version(enum qed_protocol protocol)
+{
+       switch (protocol) {
+       case QED_PROTOCOL_ETH:
+               return QED_ETH_INTERFACE_VERSION;
+       default:
+               return 0;
+       }
+}
+EXPORT_SYMBOL(qed_get_protocol_version);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
new file mode 100644 (file)
index 0000000..20d048c
--- /dev/null
@@ -0,0 +1,860 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#define CHIP_MCP_RESP_ITER_US 10
+
+#define QED_DRV_MB_MAX_RETRIES (500 * 1000)    /* Account for 5 sec */
+#define QED_MCP_RESET_RETRIES  (50 * 1000)     /* Account for 500 msec */
+
+#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val)          \
+       qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
+              _val)
+
+#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
+       qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
+
+#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val)  \
+       DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
+                    offsetof(struct public_drv_mb, _field), _val)
+
+#define DRV_MB_RD(_p_hwfn, _p_ptt, _field)        \
+       DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
+                    offsetof(struct public_drv_mb, _field))
+
+#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
+                 DRV_ID_PDA_COMP_VER_SHIFT)
+
+#define MCP_BYTES_PER_MBIT_SHIFT 17
+
+bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
+{
+       if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
+               return false;
+       return true;
+}
+
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt)
+{
+       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                       PUBLIC_PORT);
+       u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
+
+       p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
+                                                  MFW_PORT(p_hwfn));
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "port_addr = 0x%x, port_id 0x%02x\n",
+                  p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
+}
+
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt)
+{
+       u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
+       u32 tmp, i;
+
+       if (!p_hwfn->mcp_info->public_base)
+               return;
+
+       for (i = 0; i < length; i++) {
+               tmp = qed_rd(p_hwfn, p_ptt,
+                            p_hwfn->mcp_info->mfw_mb_addr +
+                            (i << 2) + sizeof(u32));
+
+               /* The MB data is actually BE; Need to force it to cpu */
+               ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
+                       be32_to_cpu((__force __be32)tmp);
+       }
+}
+
+int qed_mcp_free(struct qed_hwfn *p_hwfn)
+{
+       if (p_hwfn->mcp_info) {
+               kfree(p_hwfn->mcp_info->mfw_mb_cur);
+               kfree(p_hwfn->mcp_info->mfw_mb_shadow);
+       }
+       kfree(p_hwfn->mcp_info);
+
+       return 0;
+}
+
+static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
+                               struct qed_ptt *p_ptt)
+{
+       struct qed_mcp_info *p_info = p_hwfn->mcp_info;
+       u32 drv_mb_offsize, mfw_mb_offsize;
+       u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
+
+       p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
+       if (!p_info->public_base)
+               return 0;
+
+       p_info->public_base |= GRCBASE_MCP;
+
+       /* Calculate the driver and MFW mailbox address */
+       drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
+                               SECTION_OFFSIZE_ADDR(p_info->public_base,
+                                                    PUBLIC_DRV_MB));
+       p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
+                  drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
+
+       /* Set the MFW MB address */
+       mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
+                               SECTION_OFFSIZE_ADDR(p_info->public_base,
+                                                    PUBLIC_MFW_MB));
+       p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+       p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
+
+       /* Get the current driver mailbox sequence before sending
+        * the first command
+        */
+       p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+                            DRV_MSG_SEQ_NUMBER_MASK;
+
+       /* Get current FW pulse sequence */
+       p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
+                               DRV_PULSE_SEQ_MASK;
+
+       p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+
+       return 0;
+}
+
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt)
+{
+       struct qed_mcp_info *p_info;
+       u32 size;
+
+       /* Allocate mcp_info structure */
+       p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_ATOMIC);
+       if (!p_hwfn->mcp_info)
+               goto err;
+       p_info = p_hwfn->mcp_info;
+
+       if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
+               DP_NOTICE(p_hwfn, "MCP is not initialized\n");
+               /* Do not free mcp_info here, since public_base indicate that
+                * the MCP is not initialized
+                */
+               return 0;
+       }
+
+       size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
+       p_info->mfw_mb_cur = kzalloc(size, GFP_ATOMIC);
+       p_info->mfw_mb_shadow =
+               kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
+                               p_info->mfw_mb_length), GFP_ATOMIC);
+       if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
+               goto err;
+
+       /* Initialize the MFW mutex */
+       mutex_init(&p_info->mutex);
+
+       return 0;
+
+err:
+       DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n");
+       qed_mcp_free(p_hwfn);
+       return -ENOMEM;
+}
+
+int qed_mcp_reset(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt)
+{
+       u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
+       u8 delay = CHIP_MCP_RESP_ITER_US;
+       u32 org_mcp_reset_seq, cnt = 0;
+       int rc = 0;
+
+       /* Set drv command along with the updated sequence */
+       org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
+                 (DRV_MSG_CODE_MCP_RESET | seq));
+
+       do {
+               /* Wait for MFW response */
+               udelay(delay);
+               /* Give the FW up to 500 second (50*1000*10usec) */
+       } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
+                                             MISCS_REG_GENERIC_POR_0)) &&
+                (cnt++ < QED_MCP_RESET_RETRIES));
+
+       if (org_mcp_reset_seq !=
+           qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+               DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                          "MCP was reset after %d usec\n", cnt * delay);
+       } else {
+               DP_ERR(p_hwfn, "Failed to reset MCP\n");
+               rc = -EAGAIN;
+       }
+
+       return rc;
+}
+
+static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt,
+                         u32 cmd,
+                         u32 param,
+                         u32 *o_mcp_resp,
+                         u32 *o_mcp_param)
+{
+       u8 delay = CHIP_MCP_RESP_ITER_US;
+       u32 seq, cnt = 1, actual_mb_seq;
+       int rc = 0;
+
+       /* Get actual driver mailbox sequence */
+       actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+                       DRV_MSG_SEQ_NUMBER_MASK;
+
+       /* Use MCP history register to check if MCP reset occurred between
+        * init time and now.
+        */
+       if (p_hwfn->mcp_info->mcp_hist !=
+           qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+               DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
+               qed_load_mcp_offsets(p_hwfn, p_ptt);
+               qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+       }
+       seq = ++p_hwfn->mcp_info->drv_mb_seq;
+
+       /* Set drv param */
+       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+
+       /* Set drv command along with the updated sequence */
+       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "wrote command (%x) to MFW MB param 0x%08x\n",
+                  (cmd | seq), param);
+
+       do {
+               /* Wait for MFW response */
+               udelay(delay);
+               *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+
+               /* Give the FW up to 5 second (500*10ms) */
+       } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
+                (cnt++ < QED_DRV_MB_MAX_RETRIES));
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+                  cnt * delay, *o_mcp_resp, seq);
+
+       /* Is this a reply to our command? */
+       if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
+               *o_mcp_resp &= FW_MSG_CODE_MASK;
+               /* Get the MCP param */
+               *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+       } else {
+               /* FW BUG! */
+               DP_ERR(p_hwfn, "MFW failed to respond!\n");
+               *o_mcp_resp = 0;
+               rc = -EAGAIN;
+       }
+       return rc;
+}
+
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+               struct qed_ptt *p_ptt,
+               u32 cmd,
+               u32 param,
+               u32 *o_mcp_resp,
+               u32 *o_mcp_param)
+{
+       int rc = 0;
+
+       /* MCP not initialized */
+       if (!qed_mcp_is_init(p_hwfn)) {
+               DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+               return -EBUSY;
+       }
+
+       /* Lock Mutex to ensure only single thread is
+        * accessing the MCP at one time
+        */
+       mutex_lock(&p_hwfn->mcp_info->mutex);
+       rc = qed_do_mcp_cmd(p_hwfn, p_ptt, cmd, param,
+                           o_mcp_resp, o_mcp_param);
+       /* Release Mutex */
+       mutex_unlock(&p_hwfn->mcp_info->mutex);
+
+       return rc;
+}
+
+static void qed_mcp_set_drv_ver(struct qed_dev *cdev,
+                               struct qed_hwfn *p_hwfn,
+                               struct qed_ptt *p_ptt)
+{
+       u32 i;
+
+       /* Copy version string to MCP */
+       for (i = 0; i < MCP_DRV_VER_STR_SIZE_DWORD; i++)
+               DRV_MB_WR(p_hwfn, p_ptt, union_data.ver_str[i],
+                         *(u32 *)&cdev->ver_str[i * sizeof(u32)]);
+}
+
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    u32 *p_load_code)
+{
+       struct qed_dev *cdev = p_hwfn->cdev;
+       u32 param;
+       int rc;
+
+       if (!qed_mcp_is_init(p_hwfn)) {
+               DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+               return -EBUSY;
+       }
+
+       /* Save driver's version to shmem */
+       qed_mcp_set_drv_ver(cdev, p_hwfn, p_ptt);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
+                  p_hwfn->mcp_info->drv_mb_seq,
+                  p_hwfn->mcp_info->drv_pulse_seq);
+
+       /* Load Request */
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
+                        (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
+                         cdev->drv_type),
+                        p_load_code, &param);
+
+       /* if mcp fails to respond we must abort */
+       if (rc) {
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+               return rc;
+       }
+
+       /* If MFW refused (e.g. other port is in diagnostic mode) we
+        * must abort. This can happen in the following cases:
+        * - Other port is in diagnostic mode
+        * - Previously loaded function on the engine is not compliant with
+        *   the requester.
+        * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
+        *      -
+        */
+       if (!(*p_load_code) ||
+           ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
+           ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
+           ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
+               DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
+                                      struct qed_ptt *p_ptt,
+                                      bool b_reset)
+{
+       struct qed_mcp_link_state *p_link;
+       u32 status = 0;
+
+       p_link = &p_hwfn->mcp_info->link_output;
+       memset(p_link, 0, sizeof(*p_link));
+       if (!b_reset) {
+               status = qed_rd(p_hwfn, p_ptt,
+                               p_hwfn->mcp_info->port_addr +
+                               offsetof(struct public_port, link_status));
+               DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
+                          "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
+                          status,
+                          (u32)(p_hwfn->mcp_info->port_addr +
+                                offsetof(struct public_port,
+                                         link_status)));
+       } else {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Resetting link indications\n");
+               return;
+       }
+
+       p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+
+       p_link->full_duplex = true;
+       switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
+       case LINK_STATUS_SPEED_AND_DUPLEX_100G:
+               p_link->speed = 100000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_50G:
+               p_link->speed = 50000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_40G:
+               p_link->speed = 40000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_25G:
+               p_link->speed = 25000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_20G:
+               p_link->speed = 20000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_10G:
+               p_link->speed = 10000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
+               p_link->full_duplex = false;
+       /* Fall-through */
+       case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
+               p_link->speed = 1000;
+               break;
+       default:
+               p_link->speed = 0;
+       }
+
+       /* Correct speed according to bandwidth allocation */
+       if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
+               p_link->speed = p_link->speed *
+                               p_hwfn->mcp_info->func_info.bandwidth_max /
+                               100;
+               qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+                              p_link->speed);
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Configured MAX bandwidth to be %08x Mb/sec\n",
+                          p_link->speed);
+       }
+
+       p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
+       p_link->an_complete = !!(status &
+                                LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
+       p_link->parallel_detection = !!(status &
+                                       LINK_STATUS_PARALLEL_DETECTION_USED);
+       p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
+
+       p_link->partner_adv_speed |=
+               (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
+               QED_LINK_PARTNER_SPEED_1G_FD : 0;
+       p_link->partner_adv_speed |=
+               (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
+               QED_LINK_PARTNER_SPEED_1G_HD : 0;
+       p_link->partner_adv_speed |=
+               (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
+               QED_LINK_PARTNER_SPEED_10G : 0;
+       p_link->partner_adv_speed |=
+               (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
+               QED_LINK_PARTNER_SPEED_20G : 0;
+       p_link->partner_adv_speed |=
+               (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
+               QED_LINK_PARTNER_SPEED_40G : 0;
+       p_link->partner_adv_speed |=
+               (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
+               QED_LINK_PARTNER_SPEED_50G : 0;
+       p_link->partner_adv_speed |=
+               (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
+               QED_LINK_PARTNER_SPEED_100G : 0;
+
+       p_link->partner_tx_flow_ctrl_en =
+               !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
+       p_link->partner_rx_flow_ctrl_en =
+               !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
+
+       switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
+       case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
+               p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
+               break;
+       case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
+               p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
+               break;
+       case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
+               p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
+               break;
+       default:
+               p_link->partner_adv_pause = 0;
+       }
+
+       p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
+
+       qed_link_update(p_hwfn);
+}
+
+int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    bool b_up)
+{
+       struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
+       u32 param = 0, reply = 0, cmd;
+       struct pmm_phy_cfg phy_cfg;
+       int rc = 0;
+       u32 i;
+
+       if (!qed_mcp_is_init(p_hwfn)) {
+               DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+               return -EBUSY;
+       }
+
+       /* Set the shmem configuration according to params */
+       memset(&phy_cfg, 0, sizeof(phy_cfg));
+       cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
+       if (!params->speed.autoneg)
+               phy_cfg.speed = params->speed.forced_speed;
+       phy_cfg.pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
+       phy_cfg.pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
+       phy_cfg.pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
+       phy_cfg.adv_speed = params->speed.advertised_speeds;
+       phy_cfg.loopback_mode = params->loopback_mode;
+
+       /* Write the requested configuration to shmem */
+       for (i = 0; i < sizeof(phy_cfg); i += 4)
+               qed_wr(p_hwfn, p_ptt,
+                      p_hwfn->mcp_info->drv_mb_addr +
+                      offsetof(struct public_drv_mb, union_data) + i,
+                      ((u32 *)&phy_cfg)[i >> 2]);
+
+       if (b_up) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
+                          phy_cfg.speed,
+                          phy_cfg.pause,
+                          phy_cfg.adv_speed,
+                          phy_cfg.loopback_mode,
+                          phy_cfg.feature_config_flags);
+       } else {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Resetting link\n");
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
+                  p_hwfn->mcp_info->drv_mb_seq,
+                  p_hwfn->mcp_info->drv_pulse_seq);
+
+       /* Load Request */
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, cmd, 0, &reply, &param);
+
+       /* if mcp fails to respond we must abort */
+       if (rc) {
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+               return rc;
+       }
+
+       /* Reset the link status if needed */
+       if (!b_up)
+               qed_mcp_handle_link_change(p_hwfn, p_ptt, true);
+
+       return 0;
+}
+
+int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt)
+{
+       struct qed_mcp_info *info = p_hwfn->mcp_info;
+       int rc = 0;
+       bool found = false;
+       u16 i;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
+
+       /* Read Messages from MFW */
+       qed_mcp_read_mb(p_hwfn, p_ptt);
+
+       /* Compare current messages to old ones */
+       for (i = 0; i < info->mfw_mb_length; i++) {
+               if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
+                       continue;
+
+               found = true;
+
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
+                          i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
+
+               switch (i) {
+               case MFW_DRV_MSG_LINK_CHANGE:
+                       qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
+                       break;
+               default:
+                       DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
+                       rc = -EINVAL;
+               }
+       }
+
+       /* ACK everything */
+       for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
+               __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
+
+               /* MFW expect answer in BE, so we force write in that format */
+               qed_wr(p_hwfn, p_ptt,
+                      info->mfw_mb_addr + sizeof(u32) +
+                      MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
+                      sizeof(u32) + i * sizeof(u32),
+                      (__force u32)val);
+       }
+
+       if (!found) {
+               DP_NOTICE(p_hwfn,
+                         "Received an MFW message indication but no new message!\n");
+               rc = -EINVAL;
+       }
+
+       /* Copy the new mfw messages into the shadow */
+       memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
+
+       return rc;
+}
+
+int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
+                       u32 *p_mfw_ver)
+{
+       struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
+       struct qed_ptt *p_ptt;
+       u32 global_offsize;
+
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return -EBUSY;
+
+       global_offsize = qed_rd(p_hwfn, p_ptt,
+                               SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
+                                                    public_base,
+                                                    PUBLIC_GLOBAL));
+       *p_mfw_ver = qed_rd(p_hwfn, p_ptt,
+                           SECTION_ADDR(global_offsize, 0) +
+                           offsetof(struct public_global, mfw_ver));
+
+       qed_ptt_release(p_hwfn, p_ptt);
+
+       return 0;
+}
+
+int qed_mcp_get_media_type(struct qed_dev *cdev,
+                          u32 *p_media_type)
+{
+       struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
+       struct qed_ptt  *p_ptt;
+
+       if (!qed_mcp_is_init(p_hwfn)) {
+               DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+               return -EBUSY;
+       }
+
+       *p_media_type = MEDIA_UNSPECIFIED;
+
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return -EBUSY;
+
+       *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+                              offsetof(struct public_port, media_type));
+
+       qed_ptt_release(p_hwfn, p_ptt);
+
+       return 0;
+}
+
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt,
+                                 struct public_func *p_data,
+                                 int pfid)
+{
+       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                       PUBLIC_FUNC);
+       u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+       u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+       u32 i, size;
+
+       memset(p_data, 0, sizeof(*p_data));
+
+       size = min_t(u32, sizeof(*p_data),
+                    QED_SECTION_SIZE(mfw_path_offsize));
+       for (i = 0; i < size / sizeof(u32); i++)
+               ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
+                                           func_addr + (i << 2));
+
+       return size;
+}
+
+static int
+qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
+                       struct public_func *p_info,
+                       enum qed_pci_personality *p_proto)
+{
+       int rc = 0;
+
+       switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
+       case FUNC_MF_CFG_PROTOCOL_ETHERNET:
+               *p_proto = QED_PCI_ETH;
+               break;
+       default:
+               rc = -EINVAL;
+       }
+
+       return rc;
+}
+
+int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt)
+{
+       struct qed_mcp_function_info *info;
+       struct public_func shmem_info;
+
+       qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+                              MCP_PF_ID(p_hwfn));
+       info = &p_hwfn->mcp_info->func_info;
+
+       info->pause_on_host = (shmem_info.config &
+                              FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
+
+       if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
+                                   &info->protocol)) {
+               DP_ERR(p_hwfn, "Unknown personality %08x\n",
+                      (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
+               return -EINVAL;
+       }
+
+       if (p_hwfn->cdev->mf_mode != SF) {
+               info->bandwidth_min = (shmem_info.config &
+                                      FUNC_MF_CFG_MIN_BW_MASK) >>
+                                     FUNC_MF_CFG_MIN_BW_SHIFT;
+               if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
+                       DP_INFO(p_hwfn,
+                               "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+                               info->bandwidth_min);
+                       info->bandwidth_min = 1;
+               }
+
+               info->bandwidth_max = (shmem_info.config &
+                                      FUNC_MF_CFG_MAX_BW_MASK) >>
+                                     FUNC_MF_CFG_MAX_BW_SHIFT;
+               if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
+                       DP_INFO(p_hwfn,
+                               "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+                               info->bandwidth_max);
+                       info->bandwidth_max = 100;
+               }
+       }
+
+       if (shmem_info.mac_upper || shmem_info.mac_lower) {
+               info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
+               info->mac[1] = (u8)(shmem_info.mac_upper);
+               info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
+               info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
+               info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
+               info->mac[5] = (u8)(shmem_info.mac_lower);
+       } else {
+               DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
+       }
+
+       info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
+                        (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
+       info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
+                        (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
+
+       info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
+
+       DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
+                  "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
+               info->pause_on_host, info->protocol,
+               info->bandwidth_min, info->bandwidth_max,
+               info->mac[0], info->mac[1], info->mac[2],
+               info->mac[3], info->mac[4], info->mac[5],
+               info->wwn_port, info->wwn_node, info->ovlan);
+
+       return 0;
+}
+
+struct qed_mcp_link_params
+*qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
+{
+       if (!p_hwfn || !p_hwfn->mcp_info)
+               return NULL;
+       return &p_hwfn->mcp_info->link_input;
+}
+
+struct qed_mcp_link_state
+*qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
+{
+       if (!p_hwfn || !p_hwfn->mcp_info)
+               return NULL;
+       return &p_hwfn->mcp_info->link_output;
+}
+
+struct qed_mcp_link_capabilities
+*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
+{
+       if (!p_hwfn || !p_hwfn->mcp_info)
+               return NULL;
+       return &p_hwfn->mcp_info->link_capabilities;
+}
+
+int qed_mcp_drain(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt)
+{
+       u32 resp = 0, param = 0;
+       int rc;
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt,
+                        DRV_MSG_CODE_NIG_DRAIN, 100,
+                        &resp, &param);
+
+       /* Wait for the drain to complete before returning */
+       msleep(120);
+
+       return rc;
+}
+
+int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt,
+                          u32 *p_flash_size)
+{
+       u32 flash_size;
+
+       flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
+       flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
+                     MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
+       flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
+
+       *p_flash_size = flash_size;
+
+       return 0;
+}
+
+int
+qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt,
+                        struct qed_mcp_drv_version *p_ver)
+{
+       int rc = 0;
+       u32 param = 0, reply = 0, i;
+
+       if (!qed_mcp_is_init(p_hwfn)) {
+               DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+               return -EBUSY;
+       }
+
+       DRV_MB_WR(p_hwfn, p_ptt, union_data.drv_version.version,
+                 p_ver->version);
+       /* Copy version string to shmem */
+       for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / 4; i++) {
+               DRV_MB_WR(p_hwfn, p_ptt,
+                         union_data.drv_version.name[i * sizeof(u32)],
+                         *(u32 *)&p_ver->name[i * sizeof(u32)]);
+       }
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0, &reply,
+                        &param);
+       if (rc) {
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+               return rc;
+       }
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
new file mode 100644 (file)
index 0000000..dbaae58
--- /dev/null
@@ -0,0 +1,369 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_MCP_H
+#define _QED_MCP_H
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include "qed_hsi.h"
+
+struct qed_mcp_link_speed_params {
+       bool    autoneg;
+       u32     advertised_speeds;      /* bitmask of DRV_SPEED_CAPABILITY */
+       u32     forced_speed;      /* In Mb/s */
+};
+
+struct qed_mcp_link_pause_params {
+       bool    autoneg;
+       bool    forced_rx;
+       bool    forced_tx;
+};
+
+struct qed_mcp_link_params {
+       struct qed_mcp_link_speed_params        speed;
+       struct qed_mcp_link_pause_params        pause;
+       u32                                  loopback_mode;
+};
+
+struct qed_mcp_link_capabilities {
+       u32 speed_capabilities;
+};
+
+struct qed_mcp_link_state {
+       bool    link_up;
+
+       u32     speed; /* In Mb/s */
+       bool    full_duplex;
+
+       bool    an;
+       bool    an_complete;
+       bool    parallel_detection;
+       bool    pfc_enabled;
+
+#define QED_LINK_PARTNER_SPEED_1G_HD    BIT(0)
+#define QED_LINK_PARTNER_SPEED_1G_FD    BIT(1)
+#define QED_LINK_PARTNER_SPEED_10G      BIT(2)
+#define QED_LINK_PARTNER_SPEED_20G      BIT(3)
+#define QED_LINK_PARTNER_SPEED_40G      BIT(4)
+#define QED_LINK_PARTNER_SPEED_50G      BIT(5)
+#define QED_LINK_PARTNER_SPEED_100G     BIT(6)
+       u32     partner_adv_speed;
+
+       bool    partner_tx_flow_ctrl_en;
+       bool    partner_rx_flow_ctrl_en;
+
+#define QED_LINK_PARTNER_SYMMETRIC_PAUSE (1)
+#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE (2)
+#define QED_LINK_PARTNER_BOTH_PAUSE (3)
+       u8      partner_adv_pause;
+
+       bool    sfp_tx_fault;
+};
+
+struct qed_mcp_function_info {
+       u8                              pause_on_host;
+
+       enum qed_pci_personality        protocol;
+
+       u8                              bandwidth_min;
+       u8                              bandwidth_max;
+
+       u8                              mac[ETH_ALEN];
+
+       u64                             wwn_port;
+       u64                             wwn_node;
+
+#define QED_MCP_VLAN_UNSET              (0xffff)
+       u16                             ovlan;
+};
+
+struct qed_mcp_nvm_common {
+       u32     offset;
+       u32     param;
+       u32     resp;
+       u32     cmd;
+};
+
+struct qed_mcp_drv_version {
+       u32     version;
+       u8      name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+/**
+ * @brief - returns the link params of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link params
+ */
+struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *);
+
+/**
+ * @brief - return the link state of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link state
+ */
+struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *);
+
+/**
+ * @brief - return the link capabilities of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link capabilities
+ */
+struct qed_mcp_link_capabilities
+       *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief Request the MFW to set the the link according to 'link_input'.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_up - raise link if `true'. Reset link if `false'.
+ *
+ * @return int
+ */
+int qed_mcp_set_link(struct qed_hwfn   *p_hwfn,
+                    struct qed_ptt     *p_ptt,
+                    bool               b_up);
+
+/**
+ * @brief Get the management firmware version value
+ *
+ * @param cdev       - qed dev pointer
+ * @param mfw_ver    - mfw version value
+ *
+ * @return int - 0 - operation was successul.
+ */
+int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
+                       u32 *mfw_ver);
+
+/**
+ * @brief Get media type value of the port.
+ *
+ * @param cdev      - qed dev pointer
+ * @param mfw_ver    - media type value
+ *
+ * @return int -
+ *      0 - Operation was successul.
+ *      -EBUSY - Operation failed
+ */
+int qed_mcp_get_media_type(struct qed_dev      *cdev,
+                          u32                  *media_type);
+
+/**
+ * @brief General function for sending commands to the MCP
+ *        mailbox. It acquire mutex lock for the entire
+ *        operation, from sending the request until the MCP
+ *        response. Waiting for MCP response will be checked up
+ *        to 5 seconds every 5ms.
+ *
+ * @param p_hwfn     - hw function
+ * @param p_ptt      - PTT required for register access
+ * @param cmd        - command to be sent to the MCP.
+ * @param param      - Optional param
+ * @param o_mcp_resp - The MCP response code (exclude sequence).
+ * @param o_mcp_param- Optional parameter provided by the MCP
+ *                     response
+ * @return int - 0 - operation
+ * was successul.
+ */
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+               struct qed_ptt *p_ptt,
+               u32 cmd,
+               u32 param,
+               u32 *o_mcp_resp,
+               u32 *o_mcp_param);
+
+/**
+ * @brief - drains the nig, allowing completion to pass in case of pauses.
+ *          (Should be called only from sleepable context)
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int qed_mcp_drain(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt);
+
+/**
+ * @brief Get the flash size value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_flash_size  - flash size in bytes to be filled.
+ *
+ * @return int - 0 - operation was successul.
+ */
+int qed_mcp_get_flash_size(struct qed_hwfn     *p_hwfn,
+                          struct qed_ptt       *p_ptt,
+                          u32 *p_flash_size);
+
+/**
+ * @brief Send driver version to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param version - Version value
+ * @param name - Protocol driver name
+ *
+ * @return int - 0 - operation was successul.
+ */
+int
+qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt,
+                        struct qed_mcp_drv_version *p_ver);
+
+/* Using hwfn number (and not pf_num) is required since in CMT mode,
+ * same pf_num may be used by two different hwfn
+ * TODO - this shouldn't really be in .h file, but until all fields
+ * required during hw-init will be placed in their correct place in shmem
+ * we need it in qed_dev.c [for readin the nvram reflection in shmem].
+ */
+#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (QED_IS_BB((p_hwfn)->cdev) ?               \
+                                           ((rel_pfid) |                      \
+                                            ((p_hwfn)->abs_pf_id & 1) << 3) : \
+                                           rel_pfid)
+#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
+
+/* TODO - this is only correct as long as only BB is supported, and
+ * no port-swapping is implemented; Afterwards we'll need to fix it.
+ */
+#define MFW_PORT(_p_hwfn)       ((_p_hwfn)->abs_pf_id %        \
+                                ((_p_hwfn)->cdev->num_ports_in_engines * 2))
+struct qed_mcp_info {
+       struct mutex                            mutex; /* MCP access lock */
+       u32                                     public_base;
+       u32                                     drv_mb_addr;
+       u32                                     mfw_mb_addr;
+       u32                                     port_addr;
+       u16                                     drv_mb_seq;
+       u16                                     drv_pulse_seq;
+       struct qed_mcp_link_params              link_input;
+       struct qed_mcp_link_state               link_output;
+       struct qed_mcp_link_capabilities        link_capabilities;
+       struct qed_mcp_function_info            func_info;
+       u8                                      *mfw_mb_cur;
+       u8                                      *mfw_mb_shadow;
+       u16                                     mfw_mb_length;
+       u16                                     mcp_hist;
+};
+
+/**
+ * @brief Initialize the interface with the MCP
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int
+ */
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt);
+
+/**
+ * @brief Initialize the port interface with the MCP
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * Can only be called after `num_ports_in_engines' is set
+ */
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt);
+/**
+ * @brief Releases resources allocated during the init process.
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int
+ */
+
+int qed_mcp_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief This function is called from the DPC context. After
+ * pointing PTT to the mfw mb, check for events sent by the MCP
+ * to the driver and ack them. In case a critical event
+ * detected, it will be handled here, otherwise the work will be
+ * queued to a sleepable work-queue.
+ *
+ * @param p_hwfn - HW function
+ * @param p_ptt - PTT required for register access
+ * @return int - 0 - operation
+ * was successul.
+ */
+int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt);
+
+/**
+ * @brief Sends a LOAD_REQ to the MFW, and in case operation
+ *        succeed, returns whether this PF is the first on the
+ *        chip/engine/port or function. This function should be
+ *        called when driver is ready to accept MFW events after
+ *        Storms initializations are done.
+ *
+ * @param p_hwfn       - hw function
+ * @param p_ptt        - PTT required for register access
+ * @param p_load_code  - The MCP response param containing one
+ *      of the following:
+ *      FW_MSG_CODE_DRV_LOAD_ENGINE
+ *      FW_MSG_CODE_DRV_LOAD_PORT
+ *      FW_MSG_CODE_DRV_LOAD_FUNCTION
+ * @return int -
+ *      0 - Operation was successul.
+ *      -EBUSY - Operation failed
+ */
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    u32 *p_load_code);
+
+/**
+ * @brief Read the MFW mailbox into Current buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt);
+
+/**
+ * @brief - calls during init to read shmem of all function-related info.
+ *
+ * @param p_hwfn
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt);
+
+/**
+ * @brief - Reset the MCP using mailbox command.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_reset(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt);
+
+/**
+ * @brief indicates whether the MFW objects [under mcp_info] are accessible
+ *
+ * @param p_hwfn
+ *
+ * @return true iff MFW is running and mcp_info is initialized
+ */
+bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
new file mode 100644 (file)
index 0000000..7a5ce59
--- /dev/null
@@ -0,0 +1,366 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef REG_ADDR_H
+#define REG_ADDR_H
+
+#define  CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \
+       0
+
+#define  CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE          ( \
+               0xfff << 0)
+
+#define  CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \
+       12
+
+#define  CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE           ( \
+               0xfff << 12)
+
+#define  CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \
+       24
+
+#define  CDU_REG_CID_ADDR_PARAMS_NCIB                  ( \
+               0xff << 24)
+
+#define  XSDM_REG_OPERATION_GEN \
+       0xf80408UL
+#define  NIG_REG_RX_BRB_OUT_EN \
+       0x500e18UL
+#define  NIG_REG_STORM_OUT_EN \
+       0x500e08UL
+#define  PSWRQ2_REG_L2P_VALIDATE_VFID \
+       0x240c50UL
+#define  PGLUE_B_REG_USE_CLIENTID_IN_TAG       \
+       0x2aae04UL
+#define  PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER       \
+       0x2aa16cUL
+#define  BAR0_MAP_REG_MSDM_RAM \
+       0x1d00000UL
+#define  BAR0_MAP_REG_USDM_RAM \
+       0x1d80000UL
+#define  BAR0_MAP_REG_PSDM_RAM \
+       0x1f00000UL
+#define  BAR0_MAP_REG_TSDM_RAM \
+       0x1c80000UL
+#define  NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
+       0x5011f4UL
+#define  PRS_REG_SEARCH_TCP \
+       0x1f0400UL
+#define  PRS_REG_SEARCH_UDP \
+       0x1f0404UL
+#define  PRS_REG_SEARCH_FCOE \
+       0x1f0408UL
+#define  PRS_REG_SEARCH_ROCE \
+       0x1f040cUL
+#define  PRS_REG_SEARCH_OPENFLOW       \
+       0x1f0434UL
+#define  TM_REG_PF_ENABLE_CONN \
+       0x2c043cUL
+#define  TM_REG_PF_ENABLE_TASK \
+       0x2c0444UL
+#define  TM_REG_PF_SCAN_ACTIVE_CONN \
+       0x2c04fcUL
+#define  TM_REG_PF_SCAN_ACTIVE_TASK \
+       0x2c0500UL
+#define  IGU_REG_LEADING_EDGE_LATCH \
+       0x18082cUL
+#define  IGU_REG_TRAILING_EDGE_LATCH \
+       0x180830UL
+#define  QM_REG_USG_CNT_PF_TX \
+       0x2f2eacUL
+#define  QM_REG_USG_CNT_PF_OTHER       \
+       0x2f2eb0UL
+#define  DORQ_REG_PF_DB_ENABLE \
+       0x100508UL
+#define  QM_REG_PF_EN \
+       0x2f2ea4UL
+#define  TCFC_REG_STRONG_ENABLE_PF \
+       0x2d0708UL
+#define  CCFC_REG_STRONG_ENABLE_PF \
+       0x2e0708UL
+#define  PGLUE_B_REG_PGL_ADDR_88_F0 \
+       0x2aa404UL
+#define  PGLUE_B_REG_PGL_ADDR_8C_F0 \
+       0x2aa408UL
+#define  PGLUE_B_REG_PGL_ADDR_90_F0 \
+       0x2aa40cUL
+#define  PGLUE_B_REG_PGL_ADDR_94_F0 \
+       0x2aa410UL
+#define  PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
+       0x2aa138UL
+#define  PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \
+       0x2aa174UL
+#define  MISC_REG_GEN_PURP_CR0 \
+       0x008c80UL
+#define  MCP_REG_SCRATCH       \
+       0xe20000UL
+#define  CNIG_REG_NW_PORT_MODE_BB_B0 \
+       0x218200UL
+#define  MISCS_REG_CHIP_NUM \
+       0x00976cUL
+#define  MISCS_REG_CHIP_REV \
+       0x009770UL
+#define  MISCS_REG_CMT_ENABLED_FOR_PAIR \
+       0x00971cUL
+#define  MISCS_REG_CHIP_TEST_REG       \
+       0x009778UL
+#define  MISCS_REG_CHIP_METAL \
+       0x009774UL
+#define  BRB_REG_HEADER_SIZE \
+       0x340804UL
+#define  BTB_REG_HEADER_SIZE \
+       0xdb0804UL
+#define  CAU_REG_LONG_TIMEOUT_THRESHOLD \
+       0x1c0708UL
+#define  CCFC_REG_ACTIVITY_COUNTER \
+       0x2e8800UL
+#define  CDU_REG_CID_ADDR_PARAMS       \
+       0x580900UL
+#define  DBG_REG_CLIENT_ENABLE \
+       0x010004UL
+#define  DMAE_REG_INIT \
+       0x00c000UL
+#define  DORQ_REG_IFEN \
+       0x100040UL
+#define  GRC_REG_TIMEOUT_EN \
+       0x050404UL
+#define  IGU_REG_BLOCK_CONFIGURATION \
+       0x180040UL
+#define  MCM_REG_INIT \
+       0x1200000UL
+#define  MCP2_REG_DBG_DWORD_ENABLE \
+       0x052404UL
+#define  MISC_REG_PORT_MODE \
+       0x008c00UL
+#define  MISCS_REG_CLK_100G_MODE       \
+       0x009070UL
+#define  MSDM_REG_ENABLE_IN1 \
+       0xfc0004UL
+#define  MSEM_REG_ENABLE_IN \
+       0x1800004UL
+#define  NIG_REG_CM_HDR \
+       0x500840UL
+#define  NCSI_REG_CONFIG       \
+       0x040200UL
+#define  PBF_REG_INIT \
+       0xd80000UL
+#define  PTU_REG_ATC_INIT_ARRAY \
+       0x560000UL
+#define  PCM_REG_INIT \
+       0x1100000UL
+#define  PGLUE_B_REG_ADMIN_PER_PF_REGION       \
+       0x2a9000UL
+#define  PRM_REG_DISABLE_PRM \
+       0x230000UL
+#define  PRS_REG_SOFT_RST \
+       0x1f0000UL
+#define  PSDM_REG_ENABLE_IN1 \
+       0xfa0004UL
+#define  PSEM_REG_ENABLE_IN \
+       0x1600004UL
+#define  PSWRQ_REG_DBG_SELECT \
+       0x280020UL
+#define  PSWRQ2_REG_CDUT_P_SIZE \
+       0x24000cUL
+#define  PSWHST_REG_DISCARD_INTERNAL_WRITES \
+       0x2a0040UL
+#define  PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
+       0x29e050UL
+#define  PSWRD_REG_DBG_SELECT \
+       0x29c040UL
+#define  PSWRD2_REG_CONF11 \
+       0x29d064UL
+#define  PSWWR_REG_USDM_FULL_TH \
+       0x29a040UL
+#define  PSWWR2_REG_CDU_FULL_TH2       \
+       0x29b040UL
+#define  QM_REG_MAXPQSIZE_0 \
+       0x2f0434UL
+#define  RSS_REG_RSS_INIT_EN \
+       0x238804UL
+#define  RDIF_REG_STOP_ON_ERROR \
+       0x300040UL
+#define  SRC_REG_SOFT_RST \
+       0x23874cUL
+#define  TCFC_REG_ACTIVITY_COUNTER \
+       0x2d8800UL
+#define  TCM_REG_INIT \
+       0x1180000UL
+#define  TM_REG_PXP_READ_DATA_FIFO_INIT \
+       0x2c0014UL
+#define  TSDM_REG_ENABLE_IN1 \
+       0xfb0004UL
+#define  TSEM_REG_ENABLE_IN \
+       0x1700004UL
+#define  TDIF_REG_STOP_ON_ERROR \
+       0x310040UL
+#define  UCM_REG_INIT \
+       0x1280000UL
+#define  UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
+       0x051004UL
+#define  USDM_REG_ENABLE_IN1 \
+       0xfd0004UL
+#define  USEM_REG_ENABLE_IN \
+       0x1900004UL
+#define  XCM_REG_INIT \
+       0x1000000UL
+#define  XSDM_REG_ENABLE_IN1 \
+       0xf80004UL
+#define  XSEM_REG_ENABLE_IN \
+       0x1400004UL
+#define  YCM_REG_INIT \
+       0x1080000UL
+#define  YSDM_REG_ENABLE_IN1 \
+       0xf90004UL
+#define  YSEM_REG_ENABLE_IN \
+       0x1500004UL
+#define  XYLD_REG_SCBD_STRICT_PRIO \
+       0x4c0000UL
+#define  TMLD_REG_SCBD_STRICT_PRIO \
+       0x4d0000UL
+#define  MULD_REG_SCBD_STRICT_PRIO \
+       0x4e0000UL
+#define  YULD_REG_SCBD_STRICT_PRIO \
+       0x4c8000UL
+#define  MISC_REG_SHARED_MEM_ADDR \
+       0x008c20UL
+#define  DMAE_REG_GO_C0 \
+       0x00c048UL
+#define  DMAE_REG_GO_C1 \
+       0x00c04cUL
+#define  DMAE_REG_GO_C2 \
+       0x00c050UL
+#define  DMAE_REG_GO_C3 \
+       0x00c054UL
+#define  DMAE_REG_GO_C4 \
+       0x00c058UL
+#define  DMAE_REG_GO_C5 \
+       0x00c05cUL
+#define  DMAE_REG_GO_C6 \
+       0x00c060UL
+#define  DMAE_REG_GO_C7 \
+       0x00c064UL
+#define  DMAE_REG_GO_C8 \
+       0x00c068UL
+#define  DMAE_REG_GO_C9 \
+       0x00c06cUL
+#define  DMAE_REG_GO_C10       \
+       0x00c070UL
+#define  DMAE_REG_GO_C11       \
+       0x00c074UL
+#define  DMAE_REG_GO_C12       \
+       0x00c078UL
+#define  DMAE_REG_GO_C13       \
+       0x00c07cUL
+#define  DMAE_REG_GO_C14       \
+       0x00c080UL
+#define  DMAE_REG_GO_C15       \
+       0x00c084UL
+#define  DMAE_REG_GO_C16       \
+       0x00c088UL
+#define  DMAE_REG_GO_C17       \
+       0x00c08cUL
+#define  DMAE_REG_GO_C18       \
+       0x00c090UL
+#define  DMAE_REG_GO_C19       \
+       0x00c094UL
+#define  DMAE_REG_GO_C20       \
+       0x00c098UL
+#define  DMAE_REG_GO_C21       \
+       0x00c09cUL
+#define  DMAE_REG_GO_C22       \
+       0x00c0a0UL
+#define  DMAE_REG_GO_C23       \
+       0x00c0a4UL
+#define  DMAE_REG_GO_C24       \
+       0x00c0a8UL
+#define  DMAE_REG_GO_C25       \
+       0x00c0acUL
+#define  DMAE_REG_GO_C26       \
+       0x00c0b0UL
+#define  DMAE_REG_GO_C27       \
+       0x00c0b4UL
+#define  DMAE_REG_GO_C28       \
+       0x00c0b8UL
+#define  DMAE_REG_GO_C29       \
+       0x00c0bcUL
+#define  DMAE_REG_GO_C30       \
+       0x00c0c0UL
+#define  DMAE_REG_GO_C31       \
+       0x00c0c4UL
+#define  DMAE_REG_CMD_MEM \
+       0x00c800UL
+#define  QM_REG_MAXPQSIZETXSEL_0       \
+       0x2f0440UL
+#define  QM_REG_SDMCMDREADY \
+       0x2f1e10UL
+#define  QM_REG_SDMCMDADDR \
+       0x2f1e04UL
+#define  QM_REG_SDMCMDDATALSB \
+       0x2f1e08UL
+#define  QM_REG_SDMCMDDATAMSB \
+       0x2f1e0cUL
+#define  QM_REG_SDMCMDGO       \
+       0x2f1e14UL
+#define  QM_REG_RLPFCRD \
+       0x2f4d80UL
+#define  QM_REG_RLPFINCVAL \
+       0x2f4c80UL
+#define  QM_REG_RLGLBLCRD \
+       0x2f4400UL
+#define  QM_REG_RLGLBLINCVAL \
+       0x2f3400UL
+#define  IGU_REG_ATTENTION_ENABLE \
+       0x18083cUL
+#define  IGU_REG_ATTN_MSG_ADDR_L       \
+       0x180820UL
+#define  IGU_REG_ATTN_MSG_ADDR_H       \
+       0x180824UL
+#define  MISC_REG_AEU_GENERAL_ATTN_0 \
+       0x008400UL
+#define  CAU_REG_SB_ADDR_MEMORY \
+       0x1c8000UL
+#define  CAU_REG_SB_VAR_MEMORY \
+       0x1c6000UL
+#define  CAU_REG_PI_MEMORY \
+       0x1d0000UL
+#define  IGU_REG_PF_CONFIGURATION \
+       0x180800UL
+#define  MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
+       0x00849cUL
+#define  MISC_REG_AEU_MASK_ATTN_IGU \
+       0x008494UL
+#define  IGU_REG_CLEANUP_STATUS_0 \
+       0x180980UL
+#define  IGU_REG_CLEANUP_STATUS_1 \
+       0x180a00UL
+#define  IGU_REG_CLEANUP_STATUS_2 \
+       0x180a80UL
+#define  IGU_REG_CLEANUP_STATUS_3 \
+       0x180b00UL
+#define  IGU_REG_CLEANUP_STATUS_4 \
+       0x180b80UL
+#define  IGU_REG_COMMAND_REG_32LSB_DATA \
+       0x180840UL
+#define  IGU_REG_COMMAND_REG_CTRL \
+       0x180848UL
+#define  IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN     ( \
+               0x1 << 1)
+#define  IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN      ( \
+               0x1 << 0)
+#define  IGU_REG_MAPPING_MEMORY \
+       0x184000UL
+#define  MISCS_REG_GENERIC_POR_0       \
+       0x0096d4UL
+#define  MCP_REG_NVM_CFG4 \
+       0xe0642cUL
+#define  MCP_REG_NVM_CFG4_FLASH_SIZE   ( \
+               0x7 << 0)
+#define  MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
+       0
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
new file mode 100644 (file)
index 0000000..31a1f1e
--- /dev/null
@@ -0,0 +1,360 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_SP_H
+#define _QED_SP_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+
+enum spq_mode {
+       QED_SPQ_MODE_BLOCK,     /* Client will poll a designated mem. address */
+       QED_SPQ_MODE_CB,        /* Client supplies a callback */
+       QED_SPQ_MODE_EBLOCK,    /* QED should block until completion */
+};
+
+struct qed_spq_comp_cb {
+       void    (*function)(struct qed_hwfn *,
+                           void *,
+                           union event_ring_data *,
+                           u8 fw_return_code);
+       void    *cookie;
+};
+
+/**
+ * @brief qed_eth_cqe_completion - handles the completion of a
+ *        ramrod on the cqe ring
+ *
+ * @param p_hwfn
+ * @param cqe
+ *
+ * @return int
+ */
+int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
+                          struct eth_slow_path_rx_cqe *cqe);
+
+/**
+ *  @file
+ *
+ *  QED Slow-hwfn queue interface
+ */
+
+union ramrod_data {
+       struct pf_start_ramrod_data pf_start;
+       struct rx_queue_start_ramrod_data rx_queue_start;
+       struct rx_queue_update_ramrod_data rx_queue_update;
+       struct rx_queue_stop_ramrod_data rx_queue_stop;
+       struct tx_queue_start_ramrod_data tx_queue_start;
+       struct tx_queue_stop_ramrod_data tx_queue_stop;
+       struct vport_start_ramrod_data vport_start;
+       struct vport_stop_ramrod_data vport_stop;
+       struct vport_update_ramrod_data vport_update;
+       struct vport_filter_update_ramrod_data vport_filter_update;
+};
+
+#define EQ_MAX_CREDIT   0xffffffff
+
+enum spq_priority {
+       QED_SPQ_PRIORITY_NORMAL,
+       QED_SPQ_PRIORITY_HIGH,
+};
+
+union qed_spq_req_comp {
+       struct qed_spq_comp_cb  cb;
+       u64                     *done_addr;
+};
+
+struct qed_spq_comp_done {
+       u64     done;
+       u8      fw_return_code;
+};
+
+struct qed_spq_entry {
+       struct list_head                list;
+
+       u8                              flags;
+
+       /* HSI slow path element */
+       struct slow_path_element        elem;
+
+       union ramrod_data               ramrod;
+
+       enum spq_priority               priority;
+
+       /* pending queue for this entry */
+       struct list_head                *queue;
+
+       enum spq_mode                   comp_mode;
+       struct qed_spq_comp_cb          comp_cb;
+       struct qed_spq_comp_done        comp_done; /* SPQ_MODE_EBLOCK */
+};
+
+struct qed_eq {
+       struct qed_chain        chain;
+       u8                      eq_sb_index;    /* index within the SB */
+       __le16                  *p_fw_cons;     /* ptr to index value */
+};
+
+struct qed_consq {
+       struct qed_chain chain;
+};
+
+struct qed_spq {
+       spinlock_t              lock; /* SPQ lock */
+
+       struct list_head        unlimited_pending;
+       struct list_head        pending;
+       struct list_head        completion_pending;
+       struct list_head        free_pool;
+
+       struct qed_chain        chain;
+
+       /* allocated dma-able memory for spq entries (+ramrod data) */
+       dma_addr_t              p_phys;
+       struct qed_spq_entry    *p_virt;
+
+       /* Used as index for completions (returns on EQ by FW) */
+       u16                     echo_idx;
+
+       /* Statistics */
+       u32                     unlimited_pending_count;
+       u32                     normal_count;
+       u32                     high_count;
+       u32                     comp_sent_count;
+       u32                     comp_count;
+
+       u32                     cid;
+};
+
+/**
+ * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that
+ *        Pends it to the future list.
+ *
+ * @param p_hwfn
+ * @param p_req
+ *
+ * @return int
+ */
+int qed_spq_post(struct qed_hwfn *p_hwfn,
+                struct qed_spq_entry *p_ent,
+                u8 *fw_return_code);
+
+/**
+ * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_spq_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ */
+void qed_spq_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_deallocate - Deallocates the given SPQ struct.
+ *
+ * @param p_hwfn
+ */
+void qed_spq_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_get_entry - Obtain an entrry from the spq
+ *        free pool list.
+ *
+ *
+ *
+ * @param p_hwfn
+ * @param pp_ent
+ *
+ * @return int
+ */
+int
+qed_spq_get_entry(struct qed_hwfn *p_hwfn,
+                 struct qed_spq_entry **pp_ent);
+
+/**
+ * @brief qed_spq_return_entry - Return an entry to spq free
+ *                                 pool list
+ *
+ * @param p_hwfn
+ * @param p_ent
+ */
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+                         struct qed_spq_entry *p_ent);
+/**
+ * @brief qed_eq_allocate - Allocates & initializes an EQ struct
+ *
+ * @param p_hwfn
+ * @param num_elem number of elements in the eq
+ *
+ * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ */
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
+                           u16 num_elem);
+
+/**
+ * @brief qed_eq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_eq_setup(struct qed_hwfn *p_hwfn,
+                 struct qed_eq *p_eq);
+
+/**
+ * @brief qed_eq_deallocate - deallocates the given EQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_eq_free(struct qed_hwfn *p_hwfn,
+                struct qed_eq *p_eq);
+
+/**
+ * @brief qed_eq_prod_update - update the FW with default EQ producer
+ *
+ * @param p_hwfn
+ * @param prod
+ */
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
+                       u16 prod);
+
+/**
+ * @brief qed_eq_completion - Completes currently pending EQ elements
+ *
+ * @param p_hwfn
+ * @param cookie
+ *
+ * @return int
+ */
+int qed_eq_completion(struct qed_hwfn *p_hwfn,
+                     void *cookie);
+
+/**
+ * @brief qed_spq_completion - Completes a single event
+ *
+ * @param p_hwfn
+ * @param echo - echo value from cookie (used for determining completion)
+ * @param p_data - data from cookie (used in callback function if applicable)
+ *
+ * @return int
+ */
+int qed_spq_completion(struct qed_hwfn *p_hwfn,
+                      __le16 echo,
+                      u8 fw_return_code,
+                      union event_ring_data *p_data);
+
+/**
+ * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
+ *
+ * @param p_hwfn
+ *
+ * @return u32 - SPQ CID
+ */
+u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_consq_alloc - Allocates & initializes an ConsQ
+ *        struct
+ *
+ * @param p_hwfn
+ *
+ * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ */
+struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_consq_setup - Reset the ConsQ to its start
+ *        state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_consq_setup(struct qed_hwfn *p_hwfn,
+                    struct qed_consq *p_consq);
+
+/**
+ * @brief qed_consq_free - deallocates the given ConsQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_consq_free(struct qed_hwfn *p_hwfn,
+                   struct qed_consq *p_consq);
+
+/**
+ * @file
+ *
+ * @brief Slow-hwfn low-level commands (Ramrods) function definitions.
+ */
+
+#define QED_SP_EQ_COMPLETION  0x01
+#define QED_SP_CQE_COMPLETION 0x02
+
+struct qed_sp_init_request_params {
+       size_t                  ramrod_data_size;
+       enum spq_mode           comp_mode;
+       struct qed_spq_comp_cb *p_comp_data;
+};
+
+int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+                       struct qed_spq_entry **pp_ent,
+                       u32 cid,
+                       u16 opaque_fid,
+                       u8 cmd,
+                       u8 protocol,
+                       struct qed_sp_init_request_params *p_params);
+
+/**
+ * @brief qed_sp_pf_start - PF Function Start Ramrod
+ *
+ * This ramrod is sent to initialize a physical function (PF). It will
+ * configure the function related parameters and write its completion to the
+ * event ring specified in the parameters.
+ *
+ * Ramrods complete on the common event ring for the PF. This ring is
+ * allocated by the driver on host memory and its parameters are written
+ * to the internal RAM of the UStorm by the Function Start Ramrod.
+ *
+ * @param p_hwfn
+ * @param mode
+ *
+ * @return int
+ */
+
+int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+                   enum mf_mode mode);
+
+/**
+ * @brief qed_sp_pf_stop - PF Function Stop Ramrod
+ *
+ * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
+ * sent and the last completion written to the PFs Event Ring. This ramrod also
+ * deletes the context for the Slowhwfn connection on this PF.
+ *
+ * @note Not required for first packet.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
new file mode 100644 (file)
index 0000000..6f78791
--- /dev/null
@@ -0,0 +1,170 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include "qed.h"
+#include <linux/qed/qed_chain.h>
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+                       struct qed_spq_entry **pp_ent,
+                       u32 cid,
+                       u16 opaque_fid,
+                       u8 cmd,
+                       u8 protocol,
+                       struct qed_sp_init_request_params *p_params)
+{
+       int rc = -EINVAL;
+       struct qed_spq_entry *p_ent = NULL;
+       u32 opaque_cid = opaque_fid << 16 | cid;
+
+       if (!pp_ent)
+               return -ENOMEM;
+
+       rc = qed_spq_get_entry(p_hwfn, pp_ent);
+
+       if (rc != 0)
+               return rc;
+
+       p_ent = *pp_ent;
+
+       p_ent->elem.hdr.cid             = cpu_to_le32(opaque_cid);
+       p_ent->elem.hdr.cmd_id          = cmd;
+       p_ent->elem.hdr.protocol_id     = protocol;
+
+       p_ent->priority         = QED_SPQ_PRIORITY_NORMAL;
+       p_ent->comp_mode        = p_params->comp_mode;
+       p_ent->comp_done.done   = 0;
+
+       switch (p_ent->comp_mode) {
+       case QED_SPQ_MODE_EBLOCK:
+               p_ent->comp_cb.cookie = &p_ent->comp_done;
+               break;
+
+       case QED_SPQ_MODE_BLOCK:
+               if (!p_params->p_comp_data)
+                       return -EINVAL;
+
+               p_ent->comp_cb.cookie = p_params->p_comp_data->cookie;
+               break;
+
+       case QED_SPQ_MODE_CB:
+               if (!p_params->p_comp_data)
+                       p_ent->comp_cb.function = NULL;
+               else
+                       p_ent->comp_cb = *p_params->p_comp_data;
+               break;
+
+       default:
+               DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
+                         p_ent->comp_mode);
+               return -EINVAL;
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                  "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
+                  opaque_cid, cmd, protocol,
+                  (unsigned long)&p_ent->ramrod,
+                  D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
+                          QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+                          "MODE_CB"));
+       if (p_params->ramrod_data_size)
+               memset(&p_ent->ramrod, 0, p_params->ramrod_data_size);
+
+       return 0;
+}
+
+int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+                   enum mf_mode mode)
+{
+       struct qed_sp_init_request_params params;
+       struct pf_start_ramrod_data *p_ramrod = NULL;
+       u16 sb = qed_int_get_sp_sb_id(p_hwfn);
+       u8 sb_index = p_hwfn->p_eq->eq_sb_index;
+       struct qed_spq_entry *p_ent = NULL;
+       int rc = -EINVAL;
+
+       /* update initial eq producer */
+       qed_eq_prod_update(p_hwfn,
+                          qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));
+
+       memset(&params, 0, sizeof(params));
+       params.ramrod_data_size = sizeof(*p_ramrod);
+       params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn,
+                                &p_ent,
+                                qed_spq_get_cid(p_hwfn),
+                                p_hwfn->hw_info.opaque_fid,
+                                COMMON_RAMROD_PF_START,
+                                PROTOCOLID_COMMON,
+                                &params);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.pf_start;
+
+       p_ramrod->event_ring_sb_id      = cpu_to_le16(sb);
+       p_ramrod->event_ring_sb_index   = sb_index;
+       p_ramrod->path_id               = QED_PATH_ID(p_hwfn);
+       p_ramrod->dont_log_ramrods      = 0;
+       p_ramrod->log_type_mask         = cpu_to_le16(0xf);
+       p_ramrod->mf_mode = mode;
+       p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+
+       /* Place EQ address in RAMROD */
+       p_ramrod->event_ring_pbl_addr.hi =
+                       DMA_HI_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
+       p_ramrod->event_ring_pbl_addr.lo =
+                       DMA_LO_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
+       p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt;
+
+       p_ramrod->consolid_q_pbl_addr.hi =
+                       DMA_HI_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
+       p_ramrod->consolid_q_pbl_addr.lo =
+                       DMA_LO_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
+
+       p_hwfn->hw_info.personality = PERSONALITY_ETH;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                  "Setting event_ring_sb [id %04x index %02x], mf [%s] outer_tag [%d]\n",
+                  sb, sb_index,
+                  (p_ramrod->mf_mode == SF) ? "SF" : "Multi-Pf",
+                  p_ramrod->outer_tag);
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
+{
+       struct qed_sp_init_request_params params;
+       struct qed_spq_entry *p_ent = NULL;
+       int rc = -EINVAL;
+
+       memset(&params, 0, sizeof(params));
+       params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent, qed_spq_get_cid(p_hwfn),
+                                p_hwfn->hw_info.opaque_fid,
+                                COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
+                                &params);
+       if (rc)
+               return rc;
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
new file mode 100644 (file)
index 0000000..7c0b845
--- /dev/null
@@ -0,0 +1,860 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+/***************************************************************************
+* Structures & Definitions
+***************************************************************************/
+
+#define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
+#define SPQ_BLOCK_SLEEP_LENGTH          (1000)
+
+/***************************************************************************
+* Blocking Imp. (BLOCK/EBLOCK mode)
+***************************************************************************/
+static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
+                               void *cookie,
+                               union event_ring_data *data,
+                               u8 fw_return_code)
+{
+       struct qed_spq_comp_done *comp_done;
+
+       comp_done = (struct qed_spq_comp_done *)cookie;
+
+       comp_done->done                 = 0x1;
+       comp_done->fw_return_code       = fw_return_code;
+
+       /* make update visible to waiting thread */
+       smp_wmb();
+}
+
+static int qed_spq_block(struct qed_hwfn *p_hwfn,
+                        struct qed_spq_entry *p_ent,
+                        u8 *p_fw_ret)
+{
+       int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+       struct qed_spq_comp_done *comp_done;
+       int rc;
+
+       comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
+       while (sleep_count) {
+               /* validate we receive completion update */
+               smp_rmb();
+               if (comp_done->done == 1) {
+                       if (p_fw_ret)
+                               *p_fw_ret = comp_done->fw_return_code;
+                       return 0;
+               }
+               usleep_range(5000, 10000);
+               sleep_count--;
+       }
+
+       DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
+       rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+       if (rc != 0)
+               DP_NOTICE(p_hwfn, "MCP drain failed\n");
+
+       /* Retry after drain */
+       sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+       while (sleep_count) {
+               /* validate we receive completion update */
+               smp_rmb();
+               if (comp_done->done == 1) {
+                       if (p_fw_ret)
+                               *p_fw_ret = comp_done->fw_return_code;
+                       return 0;
+               }
+               usleep_range(5000, 10000);
+               sleep_count--;
+       }
+
+       if (comp_done->done == 1) {
+               if (p_fw_ret)
+                       *p_fw_ret = comp_done->fw_return_code;
+               return 0;
+       }
+
+       DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
+
+       return -EBUSY;
+}
+
+/***************************************************************************
+* SPQ entries inner API
+***************************************************************************/
+static int
+qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
+                  struct qed_spq_entry *p_ent)
+{
+       p_ent->elem.hdr.echo = 0;
+       p_hwfn->p_spq->echo_idx++;
+       p_ent->flags = 0;
+
+       switch (p_ent->comp_mode) {
+       case QED_SPQ_MODE_EBLOCK:
+       case QED_SPQ_MODE_BLOCK:
+               p_ent->comp_cb.function = qed_spq_blocking_cb;
+               break;
+       case QED_SPQ_MODE_CB:
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
+                         p_ent->comp_mode);
+               return -EINVAL;
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                  "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
+                  p_ent->elem.hdr.cid,
+                  p_ent->elem.hdr.cmd_id,
+                  p_ent->elem.hdr.protocol_id,
+                  p_ent->elem.data_ptr.hi,
+                  p_ent->elem.data_ptr.lo,
+                  D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
+                          QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+                          "MODE_CB"));
+
+       return 0;
+}
+
+/***************************************************************************
+* HSI access
+***************************************************************************/
+static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
+                                 struct qed_spq *p_spq)
+{
+       u16                             pq;
+       struct qed_cxt_info             cxt_info;
+       struct core_conn_context        *p_cxt;
+       union qed_qm_pq_params          pq_params;
+       int                             rc;
+
+       cxt_info.iid = p_spq->cid;
+
+       rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
+
+       if (rc < 0) {
+               DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
+                         p_spq->cid);
+               return;
+       }
+
+       p_cxt = cxt_info.p_cxt;
+
+       SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+                 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+       SET_FIELD(p_cxt->xstorm_ag_context.flags1,
+                 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+       SET_FIELD(p_cxt->xstorm_ag_context.flags9,
+                 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+
+       /* QM physical queue */
+       memset(&pq_params, 0, sizeof(pq_params));
+       pq_params.core.tc = LB_TC;
+       pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+       p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
+
+       p_cxt->xstorm_st_context.spq_base_lo =
+               DMA_LO_LE(p_spq->chain.p_phys_addr);
+       p_cxt->xstorm_st_context.spq_base_hi =
+               DMA_HI_LE(p_spq->chain.p_phys_addr);
+
+       p_cxt->xstorm_st_context.consolid_base_addr.lo =
+               DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
+       p_cxt->xstorm_st_context.consolid_base_addr.hi =
+               DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
+}
+
+static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
+                          struct qed_spq *p_spq,
+                          struct qed_spq_entry *p_ent)
+{
+       struct qed_chain                *p_chain = &p_hwfn->p_spq->chain;
+       struct slow_path_element        *elem;
+       struct core_db_data             db;
+
+       elem = qed_chain_produce(p_chain);
+       if (!elem) {
+               DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
+               return -EINVAL;
+       }
+
+       *elem = p_ent->elem; /* struct assignment */
+
+       /* send a doorbell on the slow hwfn session */
+       memset(&db, 0, sizeof(db));
+       SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+       SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+       SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
+                 DQ_XCM_CORE_SPQ_PROD_CMD);
+       db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
+       /* validate producer is up to-date */
+       rmb();
+
+       db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
+
+       /* do not reorder */
+       barrier();
+
+       DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
+
+       /* make sure doorbell is rang */
+       mmiowb();
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                  "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
+                  qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
+                  p_spq->cid, db.params, db.agg_flags,
+                  qed_chain_get_prod_idx(p_chain));
+
+       return 0;
+}
+
+/***************************************************************************
+* Asynchronous events
+***************************************************************************/
+static int
+qed_async_event_completion(struct qed_hwfn *p_hwfn,
+                          struct event_ring_entry *p_eqe)
+{
+       DP_NOTICE(p_hwfn,
+                 "Unknown Async completion for protocol: %d\n",
+                  p_eqe->protocol_id);
+       return -EINVAL;
+}
+
+/***************************************************************************
+* EQ API
+***************************************************************************/
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
+                       u16 prod)
+{
+       u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
+                  USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
+
+       REG_WR16(p_hwfn, addr, prod);
+
+       /* keep prod updates ordered */
+       mmiowb();
+}
+
+int qed_eq_completion(struct qed_hwfn *p_hwfn,
+                     void *cookie)
+
+{
+       struct qed_eq *p_eq = cookie;
+       struct qed_chain *p_chain = &p_eq->chain;
+       int rc = 0;
+
+       /* take a snapshot of the FW consumer */
+       u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
+
+       /* Need to guarantee the fw_cons index we use points to a usuable
+        * element (to comply with our chain), so our macros would comply
+        */
+       if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
+           qed_chain_get_usable_per_page(p_chain))
+               fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
+
+       /* Complete current segment of eq entries */
+       while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
+               struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
+
+               if (!p_eqe) {
+                       rc = -EINVAL;
+                       break;
+               }
+
+               DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                          "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
+                          p_eqe->opcode,
+                          p_eqe->protocol_id,
+                          p_eqe->reserved0,
+                          le16_to_cpu(p_eqe->echo),
+                          p_eqe->fw_return_code,
+                          p_eqe->flags);
+
+               if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
+                       if (qed_async_event_completion(p_hwfn, p_eqe))
+                               rc = -EINVAL;
+               } else if (qed_spq_completion(p_hwfn,
+                                             p_eqe->echo,
+                                             p_eqe->fw_return_code,
+                                             &p_eqe->data)) {
+                       rc = -EINVAL;
+               }
+
+               qed_chain_recycle_consumed(p_chain);
+       }
+
+       qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
+
+       return rc;
+}
+
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
+                           u16 num_elem)
+{
+       struct qed_eq *p_eq;
+
+       /* Allocate EQ struct */
+       p_eq = kzalloc(sizeof(*p_eq), GFP_ATOMIC);
+       if (!p_eq) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
+               return NULL;
+       }
+
+       /* Allocate and initialize EQ chain*/
+       if (qed_chain_alloc(p_hwfn->cdev,
+                           QED_CHAIN_USE_TO_PRODUCE,
+                           QED_CHAIN_MODE_PBL,
+                           num_elem,
+                           sizeof(union event_ring_element),
+                           &p_eq->chain)) {
+               DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
+               goto eq_allocate_fail;
+       }
+
+       /* register EQ completion on the SP SB */
+       qed_int_register_cb(p_hwfn,
+                           qed_eq_completion,
+                           p_eq,
+                           &p_eq->eq_sb_index,
+                           &p_eq->p_fw_cons);
+
+       return p_eq;
+
+eq_allocate_fail:
+       qed_eq_free(p_hwfn, p_eq);
+       return NULL;
+}
+
+void qed_eq_setup(struct qed_hwfn *p_hwfn,
+                 struct qed_eq *p_eq)
+{
+       qed_chain_reset(&p_eq->chain);
+}
+
+void qed_eq_free(struct qed_hwfn *p_hwfn,
+                struct qed_eq *p_eq)
+{
+       if (!p_eq)
+               return;
+       qed_chain_free(p_hwfn->cdev, &p_eq->chain);
+       kfree(p_eq);
+}
+
+/***************************************************************************
+* CQE API - manipulate EQ functionality
+***************************************************************************/
+static int qed_cqe_completion(
+       struct qed_hwfn *p_hwfn,
+       struct eth_slow_path_rx_cqe *cqe,
+       enum protocol_type protocol)
+{
+       /* @@@tmp - it's possible we'll eventually want to handle some
+        * actual commands that can arrive here, but for now this is only
+        * used to complete the ramrod using the echo value on the cqe
+        */
+       return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
+}
+
+int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
+                          struct eth_slow_path_rx_cqe *cqe)
+{
+       int rc;
+
+       rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
+       if (rc)
+               DP_NOTICE(p_hwfn,
+                         "Failed to handle RXQ CQE [cmd 0x%02x]\n",
+                         cqe->ramrod_cmd_id);
+
+       return rc;
+}
+
+/***************************************************************************
+* Slow hwfn Queue (spq)
+***************************************************************************/
+void qed_spq_setup(struct qed_hwfn *p_hwfn)
+{
+       struct qed_spq          *p_spq  = p_hwfn->p_spq;
+       struct qed_spq_entry    *p_virt = NULL;
+       dma_addr_t              p_phys  = 0;
+       unsigned int            i       = 0;
+
+       INIT_LIST_HEAD(&p_spq->pending);
+       INIT_LIST_HEAD(&p_spq->completion_pending);
+       INIT_LIST_HEAD(&p_spq->free_pool);
+       INIT_LIST_HEAD(&p_spq->unlimited_pending);
+       spin_lock_init(&p_spq->lock);
+
+       /* SPQ empty pool */
+       p_phys  = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
+       p_virt  = p_spq->p_virt;
+
+       for (i = 0; i < p_spq->chain.capacity; i++) {
+               p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
+               p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
+
+               list_add_tail(&p_virt->list, &p_spq->free_pool);
+
+               p_virt++;
+               p_phys += sizeof(struct qed_spq_entry);
+       }
+
+       /* Statistics */
+       p_spq->normal_count             = 0;
+       p_spq->comp_count               = 0;
+       p_spq->comp_sent_count          = 0;
+       p_spq->unlimited_pending_count  = 0;
+       p_spq->echo_idx                 = 0;
+
+       /* SPQ cid, cannot fail */
+       qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
+       qed_spq_hw_initialize(p_hwfn, p_spq);
+
+       /* reset the chain itself */
+       qed_chain_reset(&p_spq->chain);
+}
+
+int qed_spq_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_spq          *p_spq  = NULL;
+       dma_addr_t              p_phys  = 0;
+       struct qed_spq_entry    *p_virt = NULL;
+
+       /* SPQ struct */
+       p_spq =
+               kzalloc(sizeof(struct qed_spq), GFP_ATOMIC);
+       if (!p_spq) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
+               return -ENOMEM;
+       }
+
+       /* SPQ ring  */
+       if (qed_chain_alloc(p_hwfn->cdev,
+                           QED_CHAIN_USE_TO_PRODUCE,
+                           QED_CHAIN_MODE_SINGLE,
+                           0,   /* N/A when the mode is SINGLE */
+                           sizeof(struct slow_path_element),
+                           &p_spq->chain)) {
+               DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
+               goto spq_allocate_fail;
+       }
+
+       /* allocate and fill the SPQ elements (incl. ramrod data list) */
+       p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                   p_spq->chain.capacity *
+                                   sizeof(struct qed_spq_entry),
+                                   &p_phys,
+                                   GFP_KERNEL);
+
+       if (!p_virt)
+               goto spq_allocate_fail;
+
+       p_spq->p_virt = p_virt;
+       p_spq->p_phys = p_phys;
+       p_hwfn->p_spq = p_spq;
+
+       return 0;
+
+spq_allocate_fail:
+       qed_chain_free(p_hwfn->cdev, &p_spq->chain);
+       kfree(p_spq);
+       return -ENOMEM;
+}
+
+void qed_spq_free(struct qed_hwfn *p_hwfn)
+{
+       struct qed_spq *p_spq = p_hwfn->p_spq;
+
+       if (!p_spq)
+               return;
+
+       if (p_spq->p_virt)
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 p_spq->chain.capacity *
+                                 sizeof(struct qed_spq_entry),
+                                 p_spq->p_virt,
+                                 p_spq->p_phys);
+
+       qed_chain_free(p_hwfn->cdev, &p_spq->chain);
+       ;
+       kfree(p_spq);
+}
+
+int
+qed_spq_get_entry(struct qed_hwfn *p_hwfn,
+                 struct qed_spq_entry **pp_ent)
+{
+       struct qed_spq *p_spq = p_hwfn->p_spq;
+       struct qed_spq_entry *p_ent = NULL;
+       int rc = 0;
+
+       spin_lock_bh(&p_spq->lock);
+
+       if (list_empty(&p_spq->free_pool)) {
+               p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
+               if (!p_ent) {
+                       rc = -ENOMEM;
+                       goto out_unlock;
+               }
+               p_ent->queue = &p_spq->unlimited_pending;
+       } else {
+               p_ent = list_first_entry(&p_spq->free_pool,
+                                        struct qed_spq_entry,
+                                        list);
+               list_del(&p_ent->list);
+               p_ent->queue = &p_spq->pending;
+       }
+
+       *pp_ent = p_ent;
+
+out_unlock:
+       spin_unlock_bh(&p_spq->lock);
+       return rc;
+}
+
+/* Locked variant; Should be called while the SPQ lock is taken */
+static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+                                  struct qed_spq_entry *p_ent)
+{
+       list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
+}
+
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+                         struct qed_spq_entry *p_ent)
+{
+       spin_lock_bh(&p_hwfn->p_spq->lock);
+       __qed_spq_return_entry(p_hwfn, p_ent);
+       spin_unlock_bh(&p_hwfn->p_spq->lock);
+}
+
+/**
+ * @brief qed_spq_add_entry - adds a new entry to the pending
+ *        list. Should be used while lock is being held.
+ *
+ * Addes an entry to the pending list is there is room (en empty
+ * element is available in the free_pool), or else places the
+ * entry in the unlimited_pending pool.
+ *
+ * @param p_hwfn
+ * @param p_ent
+ * @param priority
+ *
+ * @return int
+ */
+static int
+qed_spq_add_entry(struct qed_hwfn *p_hwfn,
+                 struct qed_spq_entry *p_ent,
+                 enum spq_priority priority)
+{
+       struct qed_spq *p_spq = p_hwfn->p_spq;
+
+       if (p_ent->queue == &p_spq->unlimited_pending) {
+               struct qed_spq_entry *p_en2;
+
+               if (list_empty(&p_spq->free_pool)) {
+                       list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
+                       p_spq->unlimited_pending_count++;
+
+                       return 0;
+               }
+
+               p_en2 = list_first_entry(&p_spq->free_pool,
+                                        struct qed_spq_entry,
+                                        list);
+               list_del(&p_en2->list);
+
+               /* Strcut assignment */
+               *p_en2 = *p_ent;
+
+               kfree(p_ent);
+
+               p_ent = p_en2;
+       }
+
+       /* entry is to be placed in 'pending' queue */
+       switch (priority) {
+       case QED_SPQ_PRIORITY_NORMAL:
+               list_add_tail(&p_ent->list, &p_spq->pending);
+               p_spq->normal_count++;
+               break;
+       case QED_SPQ_PRIORITY_HIGH:
+               list_add(&p_ent->list, &p_spq->pending);
+               p_spq->high_count++;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/***************************************************************************
+* Accessor
+***************************************************************************/
+u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
+{
+       if (!p_hwfn->p_spq)
+               return 0xffffffff;      /* illegal */
+       return p_hwfn->p_spq->cid;
+}
+
+/***************************************************************************
+* Posting new Ramrods
+***************************************************************************/
+static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
+                            struct list_head *head,
+                            u32 keep_reserve)
+{
+       struct qed_spq *p_spq = p_hwfn->p_spq;
+       int rc;
+
+       while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
+              !list_empty(head)) {
+               struct qed_spq_entry *p_ent =
+                       list_first_entry(head, struct qed_spq_entry, list);
+               list_del(&p_ent->list);
+               list_add_tail(&p_ent->list, &p_spq->completion_pending);
+               p_spq->comp_sent_count++;
+
+               rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
+               if (rc) {
+                       list_del(&p_ent->list);
+                       __qed_spq_return_entry(p_hwfn, p_ent);
+                       return rc;
+               }
+       }
+
+       return 0;
+}
+
+static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
+{
+       struct qed_spq *p_spq = p_hwfn->p_spq;
+       struct qed_spq_entry *p_ent = NULL;
+
+       while (!list_empty(&p_spq->free_pool)) {
+               if (list_empty(&p_spq->unlimited_pending))
+                       break;
+
+               p_ent = list_first_entry(&p_spq->unlimited_pending,
+                                        struct qed_spq_entry,
+                                        list);
+               if (!p_ent)
+                       return -EINVAL;
+
+               list_del(&p_ent->list);
+
+               qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+       }
+
+       return qed_spq_post_list(p_hwfn, &p_spq->pending,
+                                SPQ_HIGH_PRI_RESERVE_DEFAULT);
+}
+
+int qed_spq_post(struct qed_hwfn *p_hwfn,
+                struct qed_spq_entry *p_ent,
+                u8 *fw_return_code)
+{
+       int rc = 0;
+       struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
+       bool b_ret_ent = true;
+
+       if (!p_hwfn)
+               return -EINVAL;
+
+       if (!p_ent) {
+               DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
+               return -EINVAL;
+       }
+
+       /* Complete the entry */
+       rc = qed_spq_fill_entry(p_hwfn, p_ent);
+
+       spin_lock_bh(&p_spq->lock);
+
+       /* Check return value after LOCK is taken for cleaner error flow */
+       if (rc)
+               goto spq_post_fail;
+
+       /* Add the request to the pending queue */
+       rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+       if (rc)
+               goto spq_post_fail;
+
+       rc = qed_spq_pend_post(p_hwfn);
+       if (rc) {
+               /* Since it's possible that pending failed for a different
+                * entry [although unlikely], the failed entry was already
+                * dealt with; No need to return it here.
+                */
+               b_ret_ent = false;
+               goto spq_post_fail;
+       }
+
+       spin_unlock_bh(&p_spq->lock);
+
+       if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
+               /* For entries in QED BLOCK mode, the completion code cannot
+                * perform the necessary cleanup - if it did, we couldn't
+                * access p_ent here to see whether it's successful or not.
+                * Thus, after gaining the answer perform the cleanup here.
+                */
+               rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+               if (rc)
+                       goto spq_post_fail2;
+
+               /* return to pool */
+               qed_spq_return_entry(p_hwfn, p_ent);
+       }
+       return rc;
+
+spq_post_fail2:
+       spin_lock_bh(&p_spq->lock);
+       list_del(&p_ent->list);
+       qed_chain_return_produced(&p_spq->chain);
+
+spq_post_fail:
+       /* return to the free pool */
+       if (b_ret_ent)
+               __qed_spq_return_entry(p_hwfn, p_ent);
+       spin_unlock_bh(&p_spq->lock);
+
+       return rc;
+}
+
+int qed_spq_completion(struct qed_hwfn *p_hwfn,
+                      __le16 echo,
+                      u8 fw_return_code,
+                      union event_ring_data *p_data)
+{
+       struct qed_spq          *p_spq;
+       struct qed_spq_entry    *p_ent = NULL;
+       struct qed_spq_entry    *tmp;
+       struct qed_spq_entry    *found = NULL;
+       int                     rc;
+
+       if (!p_hwfn)
+               return -EINVAL;
+
+       p_spq = p_hwfn->p_spq;
+       if (!p_spq)
+               return -EINVAL;
+
+       spin_lock_bh(&p_spq->lock);
+       list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
+                                list) {
+               if (p_ent->elem.hdr.echo == echo) {
+                       list_del(&p_ent->list);
+
+                       qed_chain_return_produced(&p_spq->chain);
+                       p_spq->comp_count++;
+                       found = p_ent;
+                       break;
+               }
+       }
+
+       /* Release lock before callback, as callback may post
+        * an additional ramrod.
+        */
+       spin_unlock_bh(&p_spq->lock);
+
+       if (!found) {
+               DP_NOTICE(p_hwfn,
+                         "Failed to find an entry this EQE completes\n");
+               return -EEXIST;
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
+                  p_ent->comp_cb.function, p_ent->comp_cb.cookie);
+       if (found->comp_cb.function)
+               found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
+                                       fw_return_code);
+
+       if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
+               /* EBLOCK is responsible for freeing its own entry */
+               qed_spq_return_entry(p_hwfn, found);
+
+       /* Attempt to post pending requests */
+       spin_lock_bh(&p_spq->lock);
+       rc = qed_spq_pend_post(p_hwfn);
+       spin_unlock_bh(&p_spq->lock);
+
+       return rc;
+}
+
+struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_consq *p_consq;
+
+       /* Allocate ConsQ struct */
+       p_consq = kzalloc(sizeof(*p_consq), GFP_ATOMIC);
+       if (!p_consq) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
+               return NULL;
+       }
+
+       /* Allocate and initialize EQ chain*/
+       if (qed_chain_alloc(p_hwfn->cdev,
+                           QED_CHAIN_USE_TO_PRODUCE,
+                           QED_CHAIN_MODE_PBL,
+                           QED_CHAIN_PAGE_SIZE / 0x80,
+                           0x80,
+                           &p_consq->chain)) {
+               DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
+               goto consq_allocate_fail;
+       }
+
+       return p_consq;
+
+consq_allocate_fail:
+       qed_consq_free(p_hwfn, p_consq);
+       return NULL;
+}
+
+void qed_consq_setup(struct qed_hwfn *p_hwfn,
+                    struct qed_consq *p_consq)
+{
+       qed_chain_reset(&p_consq->chain);
+}
+
+void qed_consq_free(struct qed_hwfn *p_hwfn,
+                   struct qed_consq *p_consq)
+{
+       if (!p_consq)
+               return;
+       qed_chain_free(p_hwfn->cdev, &p_consq->chain);
+       kfree(p_consq);
+}
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
new file mode 100644 (file)
index 0000000..06ff90d
--- /dev/null
@@ -0,0 +1,3 @@
+obj-$(CONFIG_QEDE) := qede.o
+
+qede-y := qede_main.o qede_ethtool.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
new file mode 100644 (file)
index 0000000..ea00d5f
--- /dev/null
@@ -0,0 +1,285 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#ifndef _QEDE_H_
+#define _QEDE_H_
+#include <linux/compiler.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/bitmap.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/eth_common.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_eth_if.h>
+
+#define QEDE_MAJOR_VERSION             8
+#define QEDE_MINOR_VERSION             4
+#define QEDE_REVISION_VERSION          0
+#define QEDE_ENGINEERING_VERSION       0
+#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
+               __stringify(QEDE_MINOR_VERSION) "."             \
+               __stringify(QEDE_REVISION_VERSION) "."          \
+               __stringify(QEDE_ENGINEERING_VERSION)
+
+#define QEDE_ETH_INTERFACE_VERSION     300
+
+#define DRV_MODULE_SYM         qede
+
+struct qede_stats {
+       u64 no_buff_discards;
+       u64 rx_ucast_bytes;
+       u64 rx_mcast_bytes;
+       u64 rx_bcast_bytes;
+       u64 rx_ucast_pkts;
+       u64 rx_mcast_pkts;
+       u64 rx_bcast_pkts;
+       u64 mftag_filter_discards;
+       u64 mac_filter_discards;
+       u64 tx_ucast_bytes;
+       u64 tx_mcast_bytes;
+       u64 tx_bcast_bytes;
+       u64 tx_ucast_pkts;
+       u64 tx_mcast_pkts;
+       u64 tx_bcast_pkts;
+       u64 tx_err_drop_pkts;
+       u64 coalesced_pkts;
+       u64 coalesced_events;
+       u64 coalesced_aborts_num;
+       u64 non_coalesced_pkts;
+       u64 coalesced_bytes;
+
+       /* port */
+       u64 rx_64_byte_packets;
+       u64 rx_127_byte_packets;
+       u64 rx_255_byte_packets;
+       u64 rx_511_byte_packets;
+       u64 rx_1023_byte_packets;
+       u64 rx_1518_byte_packets;
+       u64 rx_1522_byte_packets;
+       u64 rx_2047_byte_packets;
+       u64 rx_4095_byte_packets;
+       u64 rx_9216_byte_packets;
+       u64 rx_16383_byte_packets;
+       u64 rx_crc_errors;
+       u64 rx_mac_crtl_frames;
+       u64 rx_pause_frames;
+       u64 rx_pfc_frames;
+       u64 rx_align_errors;
+       u64 rx_carrier_errors;
+       u64 rx_oversize_packets;
+       u64 rx_jabbers;
+       u64 rx_undersize_packets;
+       u64 rx_fragments;
+       u64 tx_64_byte_packets;
+       u64 tx_65_to_127_byte_packets;
+       u64 tx_128_to_255_byte_packets;
+       u64 tx_256_to_511_byte_packets;
+       u64 tx_512_to_1023_byte_packets;
+       u64 tx_1024_to_1518_byte_packets;
+       u64 tx_1519_to_2047_byte_packets;
+       u64 tx_2048_to_4095_byte_packets;
+       u64 tx_4096_to_9216_byte_packets;
+       u64 tx_9217_to_16383_byte_packets;
+       u64 tx_pause_frames;
+       u64 tx_pfc_frames;
+       u64 tx_lpi_entry_count;
+       u64 tx_total_collisions;
+       u64 brb_truncates;
+       u64 brb_discards;
+       u64 tx_mac_ctrl_frames;
+};
+
+struct qede_dev {
+       struct qed_dev                  *cdev;
+       struct net_device               *ndev;
+       struct pci_dev                  *pdev;
+
+       u32                             dp_module;
+       u8                              dp_level;
+
+       const struct qed_eth_ops        *ops;
+
+       struct qed_dev_eth_info dev_info;
+#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
+#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \
+                                (edev)->dev_info.num_tc)
+
+       struct qede_fastpath            *fp_array;
+       u16                             num_rss;
+       u8                              num_tc;
+#define QEDE_RSS_CNT(edev)             ((edev)->num_rss)
+#define QEDE_TSS_CNT(edev)             ((edev)->num_rss *      \
+                                        (edev)->num_tc)
+#define QEDE_TSS_IDX(edev, txqidx)     ((txqidx) % (edev)->num_rss)
+#define QEDE_TC_IDX(edev, txqidx)      ((txqidx) / (edev)->num_rss)
+#define QEDE_TX_QUEUE(edev, txqidx)    \
+       (&(edev)->fp_array[QEDE_TSS_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX( \
+                                                       (edev), (txqidx))])
+
+       struct qed_int_info             int_info;
+       unsigned char                   primary_mac[ETH_ALEN];
+
+       /* Smaller private varaiant of the RTNL lock */
+       struct mutex                    qede_lock;
+       u32                             state; /* Protected by qede_lock */
+       u16                             rx_buf_size;
+       /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVERHEAD                   (ETH_HLEN + 8 + 8)
+       /* Max supported alignment is 256 (8 shift)
+        * minimal alignment shift 6 is optimal for 57xxx HW performance
+        */
+#define QEDE_RX_ALIGN_SHIFT            max(6, min(8, L1_CACHE_SHIFT))
+       /* We assume skb_build() uses sizeof(struct skb_shared_info) bytes
+        * at the end of skb->data, to avoid wasting a full cache line.
+        * This reduces memory use (skb->truesize).
+        */
+#define QEDE_FW_RX_ALIGN_END                                   \
+       max_t(u64, 1UL << QEDE_RX_ALIGN_SHIFT,                  \
+             SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+       struct qede_stats               stats;
+       struct qed_update_vport_rss_params      rss_params;
+       u16                     q_num_rx_buffers; /* Must be a power of two */
+       u16                     q_num_tx_buffers; /* Must be a power of two */
+
+       struct delayed_work             sp_task;
+       unsigned long                   sp_flags;
+};
+
+enum QEDE_STATE {
+       QEDE_STATE_CLOSED,
+       QEDE_STATE_OPEN,
+};
+
+#define HILO_U64(hi, lo)               ((((u64)(hi)) << 32) + (lo))
+
+#define        MAX_NUM_TC      8
+#define        MAX_NUM_PRI     8
+
+/* The driver supports the new build_skb() API:
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after the frame was DMA-ed.
+ */
+struct sw_rx_data {
+       u8 *data;
+
+       DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct qede_rx_queue {
+       __le16                  *hw_cons_ptr;
+       struct sw_rx_data       *sw_rx_ring;
+       u16                     sw_rx_cons;
+       u16                     sw_rx_prod;
+       struct qed_chain        rx_bd_ring;
+       struct qed_chain        rx_comp_ring;
+       void __iomem            *hw_rxq_prod_addr;
+
+       int                     rx_buf_size;
+
+       u16                     num_rx_buffers;
+       u16                     rxq_id;
+
+       u64                     rx_hw_errors;
+       u64                     rx_alloc_errors;
+};
+
+union db_prod {
+       struct eth_db_data data;
+       u32             raw;
+};
+
+struct sw_tx_bd {
+       struct sk_buff *skb;
+       u8 flags;
+/* Set on the first BD descriptor when there is a split BD */
+#define QEDE_TSO_SPLIT_BD              BIT(0)
+};
+
+struct qede_tx_queue {
+       int                     index; /* Queue index */
+       __le16                  *hw_cons_ptr;
+       struct sw_tx_bd         *sw_tx_ring;
+       u16                     sw_tx_cons;
+       u16                     sw_tx_prod;
+       struct qed_chain        tx_pbl;
+       void __iomem            *doorbell_addr;
+       union db_prod           tx_db;
+
+       u16                     num_tx_buffers;
+};
+
+#define BD_UNMAP_ADDR(bd)              HILO_U64(le32_to_cpu((bd)->addr.hi), \
+                                                le32_to_cpu((bd)->addr.lo))
+#define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len)                          \
+       do {                                                            \
+               (bd)->addr.hi = cpu_to_le32(upper_32_bits(maddr));      \
+               (bd)->addr.lo = cpu_to_le32(lower_32_bits(maddr));      \
+               (bd)->nbytes = cpu_to_le16(len);                        \
+       } while (0)
+#define BD_UNMAP_LEN(bd)               (le16_to_cpu((bd)->nbytes))
+
+struct qede_fastpath {
+       struct qede_dev *edev;
+       u8                      rss_id;
+       struct napi_struct      napi;
+       struct qed_sb_info      *sb_info;
+       struct qede_rx_queue    *rxq;
+       struct qede_tx_queue    *txqs;
+
+#define VEC_NAME_SIZE  (sizeof(((struct net_device *)0)->name) + 8)
+       char    name[VEC_NAME_SIZE];
+};
+
+/* Debug print definitions */
+#define DP_NAME(edev) ((edev)->ndev->name)
+
+#define XMIT_PLAIN             0
+#define XMIT_L4_CSUM           BIT(0)
+#define XMIT_LSO               BIT(1)
+#define XMIT_ENC               BIT(2)
+
+#define QEDE_CSUM_ERROR                        BIT(0)
+#define QEDE_CSUM_UNNECESSARY          BIT(1)
+
+#define QEDE_SP_RX_MODE                1
+
+union qede_reload_args {
+       u16 mtu;
+};
+
+void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
+void qede_set_ethtool_ops(struct net_device *netdev);
+void qede_reload(struct qede_dev *edev,
+                void (*func)(struct qede_dev *edev,
+                             union qede_reload_args *args),
+                union qede_reload_args *args);
+int qede_change_mtu(struct net_device *dev, int new_mtu);
+void qede_fill_by_demand_stats(struct qede_dev *edev);
+
+#define RX_RING_SIZE_POW       13
+#define RX_RING_SIZE           BIT(RX_RING_SIZE_POW)
+#define NUM_RX_BDS_MAX         (RX_RING_SIZE - 1)
+#define NUM_RX_BDS_MIN         128
+#define NUM_RX_BDS_DEF         NUM_RX_BDS_MAX
+
+#define TX_RING_SIZE_POW       13
+#define TX_RING_SIZE           BIT(TX_RING_SIZE_POW)
+#define NUM_TX_BDS_MAX         (TX_RING_SIZE - 1)
+#define NUM_TX_BDS_MIN         128
+#define NUM_TX_BDS_DEF         NUM_TX_BDS_MAX
+
+#define        for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
+
+#endif /* _QEDE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
new file mode 100644 (file)
index 0000000..3a36247
--- /dev/null
@@ -0,0 +1,385 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/capability.h>
+#include "qede.h"
+
+#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
+#define QEDE_STAT_STRING(stat_name) (#stat_name)
+#define _QEDE_STAT(stat_name, pf_only) \
+        {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
+#define QEDE_PF_STAT(stat_name)                _QEDE_STAT(stat_name, true)
+#define QEDE_STAT(stat_name)           _QEDE_STAT(stat_name, false)
+
+#define QEDE_RQSTAT_OFFSET(stat_name) \
+        (offsetof(struct qede_rx_queue, stat_name))
+#define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
+#define QEDE_RQSTAT(stat_name) \
+        {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
+static const struct {
+       u64 offset;
+       char string[ETH_GSTRING_LEN];
+} qede_rqstats_arr[] = {
+       QEDE_RQSTAT(rx_hw_errors),
+       QEDE_RQSTAT(rx_alloc_errors),
+};
+
+#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
+#define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \
+       (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\
+                   qede_rqstats_arr[(sindex)].offset)))
+static const struct {
+       u64 offset;
+       char string[ETH_GSTRING_LEN];
+       bool pf_only;
+} qede_stats_arr[] = {
+       QEDE_STAT(rx_ucast_bytes),
+       QEDE_STAT(rx_mcast_bytes),
+       QEDE_STAT(rx_bcast_bytes),
+       QEDE_STAT(rx_ucast_pkts),
+       QEDE_STAT(rx_mcast_pkts),
+       QEDE_STAT(rx_bcast_pkts),
+
+       QEDE_STAT(tx_ucast_bytes),
+       QEDE_STAT(tx_mcast_bytes),
+       QEDE_STAT(tx_bcast_bytes),
+       QEDE_STAT(tx_ucast_pkts),
+       QEDE_STAT(tx_mcast_pkts),
+       QEDE_STAT(tx_bcast_pkts),
+
+       QEDE_PF_STAT(rx_64_byte_packets),
+       QEDE_PF_STAT(rx_127_byte_packets),
+       QEDE_PF_STAT(rx_255_byte_packets),
+       QEDE_PF_STAT(rx_511_byte_packets),
+       QEDE_PF_STAT(rx_1023_byte_packets),
+       QEDE_PF_STAT(rx_1518_byte_packets),
+       QEDE_PF_STAT(rx_1522_byte_packets),
+       QEDE_PF_STAT(rx_2047_byte_packets),
+       QEDE_PF_STAT(rx_4095_byte_packets),
+       QEDE_PF_STAT(rx_9216_byte_packets),
+       QEDE_PF_STAT(rx_16383_byte_packets),
+       QEDE_PF_STAT(tx_64_byte_packets),
+       QEDE_PF_STAT(tx_65_to_127_byte_packets),
+       QEDE_PF_STAT(tx_128_to_255_byte_packets),
+       QEDE_PF_STAT(tx_256_to_511_byte_packets),
+       QEDE_PF_STAT(tx_512_to_1023_byte_packets),
+       QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
+       QEDE_PF_STAT(tx_1519_to_2047_byte_packets),
+       QEDE_PF_STAT(tx_2048_to_4095_byte_packets),
+       QEDE_PF_STAT(tx_4096_to_9216_byte_packets),
+       QEDE_PF_STAT(tx_9217_to_16383_byte_packets),
+
+       QEDE_PF_STAT(rx_mac_crtl_frames),
+       QEDE_PF_STAT(tx_mac_ctrl_frames),
+       QEDE_PF_STAT(rx_pause_frames),
+       QEDE_PF_STAT(tx_pause_frames),
+       QEDE_PF_STAT(rx_pfc_frames),
+       QEDE_PF_STAT(tx_pfc_frames),
+
+       QEDE_PF_STAT(rx_crc_errors),
+       QEDE_PF_STAT(rx_align_errors),
+       QEDE_PF_STAT(rx_carrier_errors),
+       QEDE_PF_STAT(rx_oversize_packets),
+       QEDE_PF_STAT(rx_jabbers),
+       QEDE_PF_STAT(rx_undersize_packets),
+       QEDE_PF_STAT(rx_fragments),
+       QEDE_PF_STAT(tx_lpi_entry_count),
+       QEDE_PF_STAT(tx_total_collisions),
+       QEDE_PF_STAT(brb_truncates),
+       QEDE_PF_STAT(brb_discards),
+       QEDE_STAT(no_buff_discards),
+       QEDE_PF_STAT(mftag_filter_discards),
+       QEDE_PF_STAT(mac_filter_discards),
+       QEDE_STAT(tx_err_drop_pkts),
+
+       QEDE_STAT(coalesced_pkts),
+       QEDE_STAT(coalesced_events),
+       QEDE_STAT(coalesced_aborts_num),
+       QEDE_STAT(non_coalesced_pkts),
+       QEDE_STAT(coalesced_bytes),
+};
+
+#define QEDE_STATS_DATA(dev, index) \
+       (*((u64 *)(((char *)(dev)) + offsetof(struct qede_dev, stats) \
+                       + qede_stats_arr[(index)].offset)))
+
+#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
+
+static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
+{
+       int i, j, k;
+
+       for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
+               strcpy(buf + j * ETH_GSTRING_LEN,
+                      qede_stats_arr[i].string);
+               j++;
+       }
+
+       for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++)
+               strcpy(buf + j * ETH_GSTRING_LEN,
+                      qede_rqstats_arr[k].string);
+}
+
+static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               qede_get_strings_stats(edev, buf);
+               break;
+       default:
+               DP_VERBOSE(edev, QED_MSG_DEBUG,
+                          "Unsupported stringset 0x%08x\n", stringset);
+       }
+}
+
+static void qede_get_ethtool_stats(struct net_device *dev,
+                                  struct ethtool_stats *stats, u64 *buf)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       int sidx, cnt = 0;
+       int qid;
+
+       qede_fill_by_demand_stats(edev);
+
+       mutex_lock(&edev->qede_lock);
+
+       for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++)
+               buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
+
+       for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
+               buf[cnt] = 0;
+               for (qid = 0; qid < edev->num_rss; qid++)
+                       buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid);
+               cnt++;
+       }
+
+       mutex_unlock(&edev->qede_lock);
+}
+
+static int qede_get_sset_count(struct net_device *dev, int stringset)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       int num_stats = QEDE_NUM_STATS;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               return num_stats + QEDE_NUM_RQSTATS;
+
+       default:
+               DP_VERBOSE(edev, QED_MSG_DEBUG,
+                          "Unsupported stringset 0x%08x\n", stringset);
+               return -EINVAL;
+       }
+}
+
+static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qed_link_output current_link;
+
+       memset(&current_link, 0, sizeof(current_link));
+       edev->ops->common->get_link(edev->cdev, &current_link);
+
+       cmd->supported = current_link.supported_caps;
+       cmd->advertising = current_link.advertised_caps;
+       if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) {
+               ethtool_cmd_speed_set(cmd, current_link.speed);
+               cmd->duplex = current_link.duplex;
+       } else {
+               cmd->duplex = DUPLEX_UNKNOWN;
+               ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+       }
+       cmd->port = current_link.port;
+       cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
+                                               AUTONEG_DISABLE;
+       cmd->lp_advertising = current_link.lp_caps;
+
+       return 0;
+}
+
+static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qed_link_output current_link;
+       struct qed_link_params params;
+       u32 speed;
+
+       if (edev->dev_info.common.is_mf) {
+               DP_INFO(edev,
+                       "Link parameters can not be changed in MF mode\n");
+               return -EOPNOTSUPP;
+       }
+
+       memset(&current_link, 0, sizeof(current_link));
+       memset(&params, 0, sizeof(params));
+       edev->ops->common->get_link(edev->cdev, &current_link);
+
+       speed = ethtool_cmd_speed(cmd);
+       params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS;
+       params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG;
+       if (cmd->autoneg == AUTONEG_ENABLE) {
+               params.autoneg = true;
+               params.forced_speed = 0;
+               params.adv_speeds = cmd->advertising;
+       } else { /* forced speed */
+               params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED;
+               params.autoneg = false;
+               params.forced_speed = speed;
+               switch (speed) {
+               case SPEED_10000:
+                       if (!(current_link.supported_caps &
+                           SUPPORTED_10000baseKR_Full)) {
+                               DP_INFO(edev, "10G speed not supported\n");
+                               return -EINVAL;
+                       }
+                       params.adv_speeds = SUPPORTED_10000baseKR_Full;
+                       break;
+               case SPEED_40000:
+                       if (!(current_link.supported_caps &
+                           SUPPORTED_40000baseLR4_Full)) {
+                               DP_INFO(edev, "40G speed not supported\n");
+                               return -EINVAL;
+                       }
+                       params.adv_speeds = SUPPORTED_40000baseLR4_Full;
+                       break;
+               default:
+                       DP_INFO(edev, "Unsupported speed %u\n", speed);
+                       return -EINVAL;
+               }
+       }
+
+       params.link_up = true;
+       edev->ops->common->set_link(edev->cdev, &params);
+
+       return 0;
+}
+
+static void qede_get_drvinfo(struct net_device *ndev,
+                            struct ethtool_drvinfo *info)
+{
+       char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN];
+       struct qede_dev *edev = netdev_priv(ndev);
+
+       strlcpy(info->driver, "qede", sizeof(info->driver));
+       strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+
+       snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
+                edev->dev_info.common.fw_major,
+                edev->dev_info.common.fw_minor,
+                edev->dev_info.common.fw_rev,
+                edev->dev_info.common.fw_eng);
+
+       snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
+                (edev->dev_info.common.mfw_rev >> 24) & 0xFF,
+                (edev->dev_info.common.mfw_rev >> 16) & 0xFF,
+                (edev->dev_info.common.mfw_rev >> 8) & 0xFF,
+                edev->dev_info.common.mfw_rev & 0xFF);
+
+       if ((strlen(storm) + strlen(mfw) + strlen("mfw storm  ")) <
+           sizeof(info->fw_version)) {
+               snprintf(info->fw_version, sizeof(info->fw_version),
+                        "mfw %s storm %s", mfw, storm);
+       } else {
+               snprintf(info->fw_version, sizeof(info->fw_version),
+                        "%s %s", mfw, storm);
+       }
+
+       strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
+}
+
+static u32 qede_get_msglevel(struct net_device *ndev)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+
+       return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) |
+              edev->dp_module;
+}
+
+static void qede_set_msglevel(struct net_device *ndev, u32 level)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+       u32 dp_module = 0;
+       u8 dp_level = 0;
+
+       qede_config_debug(level, &dp_module, &dp_level);
+
+       edev->dp_level = dp_level;
+       edev->dp_module = dp_module;
+       edev->ops->common->update_msglvl(edev->cdev,
+                                        dp_module, dp_level);
+}
+
+static u32 qede_get_link(struct net_device *dev)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qed_link_output current_link;
+
+       memset(&current_link, 0, sizeof(current_link));
+       edev->ops->common->get_link(edev->cdev, &current_link);
+
+       return current_link.link_up;
+}
+
+static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args)
+{
+       edev->ndev->mtu = args->mtu;
+}
+
+/* Netdevice NDOs */
+#define ETH_MAX_JUMBO_PACKET_SIZE      9600
+#define ETH_MIN_PACKET_SIZE            60
+int qede_change_mtu(struct net_device *ndev, int new_mtu)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+       union qede_reload_args args;
+
+       if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
+           ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
+               DP_ERR(edev, "Can't support requested MTU size\n");
+               return -EINVAL;
+       }
+
+       DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+                  "Configuring MTU size of %d\n", new_mtu);
+
+       /* Set the mtu field and re-start the interface if needed*/
+       args.mtu = new_mtu;
+
+       if (netif_running(edev->ndev))
+               qede_reload(edev, &qede_update_mtu, &args);
+
+       qede_update_mtu(edev, &args);
+
+       return 0;
+}
+
+static const struct ethtool_ops qede_ethtool_ops = {
+       .get_settings = qede_get_settings,
+       .set_settings = qede_set_settings,
+       .get_drvinfo = qede_get_drvinfo,
+       .get_msglevel = qede_get_msglevel,
+       .set_msglevel = qede_set_msglevel,
+       .get_link = qede_get_link,
+       .get_strings = qede_get_strings,
+       .get_ethtool_stats = qede_get_ethtool_stats,
+       .get_sset_count = qede_get_sset_count,
+
+};
+
+void qede_set_ethtool_ops(struct net_device *dev)
+{
+       dev->ethtool_ops = &qede_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
new file mode 100644 (file)
index 0000000..f4657a2
--- /dev/null
@@ -0,0 +1,2584 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/version.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/io.h>
+#include <linux/netdev_features.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <net/vxlan.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/pkt_sched.h>
+#include <linux/ethtool.h>
+#include <linux/in.h>
+#include <linux/random.h>
+#include <net/ip6_checksum.h>
+#include <linux/bitops.h>
+
+#include "qede.h"
+
+static const char version[] = "QLogic QL4xxx 40G/100G Ethernet Driver qede "
+                             DRV_MODULE_VERSION "\n";
+
+MODULE_DESCRIPTION("QLogic 40G/100G Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static uint debug;
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, " Default debug msglevel");
+
+static const struct qed_eth_ops *qed_ops;
+
+#define CHIP_NUM_57980S_40             0x1634
+#define CHIP_NUM_57980S_10             0x1635
+#define CHIP_NUM_57980S_MF             0x1636
+#define CHIP_NUM_57980S_100            0x1644
+#define CHIP_NUM_57980S_50             0x1654
+#define CHIP_NUM_57980S_25             0x1656
+
+#ifndef PCI_DEVICE_ID_NX2_57980E
+#define PCI_DEVICE_ID_57980S_40                CHIP_NUM_57980S_40
+#define PCI_DEVICE_ID_57980S_10                CHIP_NUM_57980S_10
+#define PCI_DEVICE_ID_57980S_MF                CHIP_NUM_57980S_MF
+#define PCI_DEVICE_ID_57980S_100       CHIP_NUM_57980S_100
+#define PCI_DEVICE_ID_57980S_50                CHIP_NUM_57980S_50
+#define PCI_DEVICE_ID_57980S_25                CHIP_NUM_57980S_25
+#endif
+
+static const struct pci_device_id qede_pci_tbl[] = {
+       { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 },
+       { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 },
+       { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 },
+       { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 },
+       { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 },
+       { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 },
+       { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
+
+static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+
+#define TX_TIMEOUT             (5 * HZ)
+
+static void qede_remove(struct pci_dev *pdev);
+static int qede_alloc_rx_buffer(struct qede_dev *edev,
+                               struct qede_rx_queue *rxq);
+static void qede_link_update(void *dev, struct qed_link_output *link);
+
+static struct pci_driver qede_pci_driver = {
+       .name = "qede",
+       .id_table = qede_pci_tbl,
+       .probe = qede_probe,
+       .remove = qede_remove,
+};
+
+static struct qed_eth_cb_ops qede_ll_ops = {
+       {
+               .link_update = qede_link_update,
+       },
+};
+
+static int qede_netdev_event(struct notifier_block *this, unsigned long event,
+                            void *ptr)
+{
+       struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+       struct ethtool_drvinfo drvinfo;
+       struct qede_dev *edev;
+
+       /* Currently only support name change */
+       if (event != NETDEV_CHANGENAME)
+               goto done;
+
+       /* Check whether this is a qede device */
+       if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
+               goto done;
+
+       memset(&drvinfo, 0, sizeof(drvinfo));
+       ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
+       if (strcmp(drvinfo.driver, "qede"))
+               goto done;
+       edev = netdev_priv(ndev);
+
+       /* Notify qed of the name change */
+       if (!edev->ops || !edev->ops->common)
+               goto done;
+       edev->ops->common->set_id(edev->cdev, edev->ndev->name,
+                                 "qede");
+
+done:
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block qede_netdev_notifier = {
+       .notifier_call = qede_netdev_event,
+};
+
+static
+int __init qede_init(void)
+{
+       int ret;
+       u32 qed_ver;
+
+       pr_notice("qede_init: %s\n", version);
+
+       qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
+       if (qed_ver !=  QEDE_ETH_INTERFACE_VERSION) {
+               pr_notice("Version mismatch [%08x != %08x]\n",
+                         qed_ver,
+                         QEDE_ETH_INTERFACE_VERSION);
+               return -EINVAL;
+       }
+
+       qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION);
+       if (!qed_ops) {
+               pr_notice("Failed to get qed ethtool operations\n");
+               return -EINVAL;
+       }
+
+       /* Must register notifier before pci ops, since we might miss
+        * interface rename after pci probe and netdev registeration.
+        */
+       ret = register_netdevice_notifier(&qede_netdev_notifier);
+       if (ret) {
+               pr_notice("Failed to register netdevice_notifier\n");
+               qed_put_eth_ops();
+               return -EINVAL;
+       }
+
+       ret = pci_register_driver(&qede_pci_driver);
+       if (ret) {
+               pr_notice("Failed to register driver\n");
+               unregister_netdevice_notifier(&qede_netdev_notifier);
+               qed_put_eth_ops();
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void __exit qede_cleanup(void)
+{
+       pr_notice("qede_cleanup called\n");
+
+       unregister_netdevice_notifier(&qede_netdev_notifier);
+       pci_unregister_driver(&qede_pci_driver);
+       qed_put_eth_ops();
+}
+
+module_init(qede_init);
+module_exit(qede_cleanup);
+
+/* -------------------------------------------------------------------------
+ * START OF FAST-PATH
+ * -------------------------------------------------------------------------
+ */
+
+/* Unmap the data and free skb */
+static int qede_free_tx_pkt(struct qede_dev *edev,
+                           struct qede_tx_queue *txq,
+                           int *len)
+{
+       u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
+       struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+       struct eth_tx_1st_bd *first_bd;
+       struct eth_tx_bd *tx_data_bd;
+       int bds_consumed = 0;
+       int nbds;
+       bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
+       int i, split_bd_len = 0;
+
+       if (unlikely(!skb)) {
+               DP_ERR(edev,
+                      "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
+                      idx, txq->sw_tx_cons, txq->sw_tx_prod);
+               return -1;
+       }
+
+       *len = skb->len;
+
+       first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+
+       bds_consumed++;
+
+       nbds = first_bd->data.nbds;
+
+       if (data_split) {
+               struct eth_tx_bd *split = (struct eth_tx_bd *)
+                       qed_chain_consume(&txq->tx_pbl);
+               split_bd_len = BD_UNMAP_LEN(split);
+               bds_consumed++;
+       }
+       dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+                      BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+       /* Unmap the data of the skb frags */
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
+               tx_data_bd = (struct eth_tx_bd *)
+                       qed_chain_consume(&txq->tx_pbl);
+               dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+                              BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+       }
+
+       while (bds_consumed++ < nbds)
+               qed_chain_consume(&txq->tx_pbl);
+
+       /* Free skb */
+       dev_kfree_skb_any(skb);
+       txq->sw_tx_ring[idx].skb = NULL;
+       txq->sw_tx_ring[idx].flags = 0;
+
+       return 0;
+}
+
+/* Unmap the data and free skb when mapping failed during start_xmit */
+static void qede_free_failed_tx_pkt(struct qede_dev *edev,
+                                   struct qede_tx_queue *txq,
+                                   struct eth_tx_1st_bd *first_bd,
+                                   int nbd,
+                                   bool data_split)
+{
+       u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+       struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+       struct eth_tx_bd *tx_data_bd;
+       int i, split_bd_len = 0;
+
+       /* Return prod to its position before this skb was handled */
+       qed_chain_set_prod(&txq->tx_pbl,
+                          le16_to_cpu(txq->tx_db.data.bd_prod),
+                          first_bd);
+
+       first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+
+       if (data_split) {
+               struct eth_tx_bd *split = (struct eth_tx_bd *)
+                                         qed_chain_produce(&txq->tx_pbl);
+               split_bd_len = BD_UNMAP_LEN(split);
+               nbd--;
+       }
+
+       dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+                      BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+       /* Unmap the data of the skb frags */
+       for (i = 0; i < nbd; i++) {
+               tx_data_bd = (struct eth_tx_bd *)
+                       qed_chain_produce(&txq->tx_pbl);
+               if (tx_data_bd->nbytes)
+                       dma_unmap_page(&edev->pdev->dev,
+                                      BD_UNMAP_ADDR(tx_data_bd),
+                                      BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+       }
+
+       /* Return again prod to its position before this skb was handled */
+       qed_chain_set_prod(&txq->tx_pbl,
+                          le16_to_cpu(txq->tx_db.data.bd_prod),
+                          first_bd);
+
+       /* Free skb */
+       dev_kfree_skb_any(skb);
+       txq->sw_tx_ring[idx].skb = NULL;
+       txq->sw_tx_ring[idx].flags = 0;
+}
+
+static u32 qede_xmit_type(struct qede_dev *edev,
+                         struct sk_buff *skb,
+                         int *ipv6_ext)
+{
+       u32 rc = XMIT_L4_CSUM;
+       __be16 l3_proto;
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return XMIT_PLAIN;
+
+       l3_proto = vlan_get_protocol(skb);
+       if (l3_proto == htons(ETH_P_IPV6) &&
+           (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+               *ipv6_ext = 1;
+
+       if (skb_is_gso(skb))
+               rc |= XMIT_LSO;
+
+       return rc;
+}
+
+static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
+                                        struct eth_tx_2nd_bd *second_bd,
+                                        struct eth_tx_3rd_bd *third_bd)
+{
+       u8 l4_proto;
+       u16 bd2_bits = 0, bd2_bits2 = 0;
+
+       bd2_bits2 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
+
+       bd2_bits |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
+                    ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
+                   << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
+
+       bd2_bits2 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
+                     ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
+
+       if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
+               l4_proto = ipv6_hdr(skb)->nexthdr;
+       else
+               l4_proto = ip_hdr(skb)->protocol;
+
+       if (l4_proto == IPPROTO_UDP)
+               bd2_bits2 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
+
+       if (third_bd) {
+               third_bd->data.bitfields |=
+                       ((tcp_hdrlen(skb) / 4) &
+                        ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
+                       ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT;
+       }
+
+       second_bd->data.bitfields = cpu_to_le16(bd2_bits);
+       second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
+}
+
+static int map_frag_to_bd(struct qede_dev *edev,
+                         skb_frag_t *frag,
+                         struct eth_tx_bd *bd)
+{
+       dma_addr_t mapping;
+
+       /* Map skb non-linear frag data for DMA */
+       mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
+                                  skb_frag_size(frag),
+                                  DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+               DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
+               return -ENOMEM;
+       }
+
+       /* Setup the data pointer of the frag data */
+       BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
+
+       return 0;
+}
+
+/* Main transmit function */
+static
+netdev_tx_t qede_start_xmit(struct sk_buff *skb,
+                           struct net_device *ndev)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+       struct netdev_queue *netdev_txq;
+       struct qede_tx_queue *txq;
+       struct eth_tx_1st_bd *first_bd;
+       struct eth_tx_2nd_bd *second_bd = NULL;
+       struct eth_tx_3rd_bd *third_bd = NULL;
+       struct eth_tx_bd *tx_data_bd = NULL;
+       u16 txq_index;
+       u8 nbd = 0;
+       dma_addr_t mapping;
+       int rc, frag_idx = 0, ipv6_ext = 0;
+       u8 xmit_type;
+       u16 idx;
+       u16 hlen;
+       bool data_split;
+
+       /* Get tx-queue context and netdev index */
+       txq_index = skb_get_queue_mapping(skb);
+       WARN_ON(txq_index >= QEDE_TSS_CNT(edev));
+       txq = QEDE_TX_QUEUE(edev, txq_index);
+       netdev_txq = netdev_get_tx_queue(ndev, txq_index);
+
+       /* Current code doesn't support SKB linearization, since the max number
+        * of skb frags can be passed in the FW HSI.
+        */
+       BUILD_BUG_ON(MAX_SKB_FRAGS > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET);
+
+       WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
+                              (MAX_SKB_FRAGS + 1));
+
+       xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
+
+       /* Fill the entry in the SW ring and the BDs in the FW ring */
+       idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+       txq->sw_tx_ring[idx].skb = skb;
+       first_bd = (struct eth_tx_1st_bd *)
+                  qed_chain_produce(&txq->tx_pbl);
+       memset(first_bd, 0, sizeof(*first_bd));
+       first_bd->data.bd_flags.bitfields =
+               1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+
+       /* Map skb linear data for DMA and set in the first BD */
+       mapping = dma_map_single(&edev->pdev->dev, skb->data,
+                                skb_headlen(skb), DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+               DP_NOTICE(edev, "SKB mapping failed\n");
+               qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
+               return NETDEV_TX_OK;
+       }
+       nbd++;
+       BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+       /* In case there is IPv6 with extension headers or LSO we need 2nd and
+        * 3rd BDs.
+        */
+       if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
+               second_bd = (struct eth_tx_2nd_bd *)
+                       qed_chain_produce(&txq->tx_pbl);
+               memset(second_bd, 0, sizeof(*second_bd));
+
+               nbd++;
+               third_bd = (struct eth_tx_3rd_bd *)
+                       qed_chain_produce(&txq->tx_pbl);
+               memset(third_bd, 0, sizeof(*third_bd));
+
+               nbd++;
+               /* We need to fill in additional data in second_bd... */
+               tx_data_bd = (struct eth_tx_bd *)second_bd;
+       }
+
+       if (skb_vlan_tag_present(skb)) {
+               first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
+               first_bd->data.bd_flags.bitfields |=
+                       1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
+       }
+
+       /* Fill the parsing flags & params according to the requested offload */
+       if (xmit_type & XMIT_L4_CSUM) {
+               /* We don't re-calculate IP checksum as it is already done by
+                * the upper stack
+                */
+               first_bd->data.bd_flags.bitfields |=
+                       1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+
+               /* If the packet is IPv6 with extension header, indicate that
+                * to FW and pass few params, since the device cracker doesn't
+                * support parsing IPv6 with extension header/s.
+                */
+               if (unlikely(ipv6_ext))
+                       qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
+       }
+
+       if (xmit_type & XMIT_LSO) {
+               first_bd->data.bd_flags.bitfields |=
+                       (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
+               third_bd->data.lso_mss =
+                       cpu_to_le16(skb_shinfo(skb)->gso_size);
+
+               first_bd->data.bd_flags.bitfields |=
+               1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+               hlen = skb_transport_header(skb) +
+                      tcp_hdrlen(skb) - skb->data;
+
+               /* @@@TBD - if will not be removed need to check */
+               third_bd->data.bitfields |=
+                       (1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+
+               /* Make life easier for FW guys who can't deal with header and
+                * data on same BD. If we need to split, use the second bd...
+                */
+               if (unlikely(skb_headlen(skb) > hlen)) {
+                       DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+                                  "TSO split header size is %d (%x:%x)\n",
+                                  first_bd->nbytes, first_bd->addr.hi,
+                                  first_bd->addr.lo);
+
+                       mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
+                                          le32_to_cpu(first_bd->addr.lo)) +
+                                          hlen;
+
+                       BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
+                                             le16_to_cpu(first_bd->nbytes) -
+                                             hlen);
+
+                       /* this marks the BD as one that has no
+                        * individual mapping
+                        */
+                       txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
+
+                       first_bd->nbytes = cpu_to_le16(hlen);
+
+                       tx_data_bd = (struct eth_tx_bd *)third_bd;
+                       data_split = true;
+               }
+       }
+
+       /* Handle fragmented skb */
+       /* special handle for frags inside 2nd and 3rd bds.. */
+       while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
+               rc = map_frag_to_bd(edev,
+                                   &skb_shinfo(skb)->frags[frag_idx],
+                                   tx_data_bd);
+               if (rc) {
+                       qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
+                                               data_split);
+                       return NETDEV_TX_OK;
+               }
+
+               if (tx_data_bd == (struct eth_tx_bd *)second_bd)
+                       tx_data_bd = (struct eth_tx_bd *)third_bd;
+               else
+                       tx_data_bd = NULL;
+
+               frag_idx++;
+       }
+
+       /* map last frags into 4th, 5th .... */
+       for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
+               tx_data_bd = (struct eth_tx_bd *)
+                            qed_chain_produce(&txq->tx_pbl);
+
+               memset(tx_data_bd, 0, sizeof(*tx_data_bd));
+
+               rc = map_frag_to_bd(edev,
+                                   &skb_shinfo(skb)->frags[frag_idx],
+                                   tx_data_bd);
+               if (rc) {
+                       qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
+                                               data_split);
+                       return NETDEV_TX_OK;
+               }
+       }
+
+       /* update the first BD with the actual num BDs */
+       first_bd->data.nbds = nbd;
+
+       netdev_tx_sent_queue(netdev_txq, skb->len);
+
+       skb_tx_timestamp(skb);
+
+       /* Advance packet producer only before sending the packet since mapping
+        * of pages may fail.
+        */
+       txq->sw_tx_prod++;
+
+       /* 'next page' entries are counted in the producer value */
+       txq->tx_db.data.bd_prod =
+               cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+
+       /* wmb makes sure that the BDs data is updated before updating the
+        * producer, otherwise FW may read old data from the BDs.
+        */
+       wmb();
+       barrier();
+       writel(txq->tx_db.raw, txq->doorbell_addr);
+
+       /* mmiowb is needed to synchronize doorbell writes from more than one
+        * processor. It guarantees that the write arrives to the device before
+        * the queue lock is released and another start_xmit is called (possibly
+        * on another CPU). Without this barrier, the next doorbell can bypass
+        * this doorbell. This is applicable to IA64/Altix systems.
+        */
+       mmiowb();
+
+       if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
+                     < (MAX_SKB_FRAGS + 1))) {
+               netif_tx_stop_queue(netdev_txq);
+               DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+                          "Stop queue was called\n");
+               /* paired memory barrier is in qede_tx_int(), we have to keep
+                * ordering of set_bit() in netif_tx_stop_queue() and read of
+                * fp->bd_tx_cons
+                */
+               smp_mb();
+
+               if (qed_chain_get_elem_left(&txq->tx_pbl)
+                    >= (MAX_SKB_FRAGS + 1) &&
+                   (edev->state == QEDE_STATE_OPEN)) {
+                       netif_tx_wake_queue(netdev_txq);
+                       DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+                                  "Wake queue was called\n");
+               }
+       }
+
+       return NETDEV_TX_OK;
+}
+
+static int qede_txq_has_work(struct qede_tx_queue *txq)
+{
+       u16 hw_bd_cons;
+
+       /* Tell compiler that consumer and producer can change */
+       barrier();
+       hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+       if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
+               return 0;
+
+       return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
+}
+
+static int qede_tx_int(struct qede_dev *edev,
+                      struct qede_tx_queue *txq)
+{
+       struct netdev_queue *netdev_txq;
+       u16 hw_bd_cons;
+       unsigned int pkts_compl = 0, bytes_compl = 0;
+       int rc;
+
+       netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+
+       hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+       barrier();
+
+       while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+               int len = 0;
+
+               rc = qede_free_tx_pkt(edev, txq, &len);
+               if (rc) {
+                       DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
+                                 hw_bd_cons,
+                                 qed_chain_get_cons_idx(&txq->tx_pbl));
+                       break;
+               }
+
+               bytes_compl += len;
+               pkts_compl++;
+               txq->sw_tx_cons++;
+       }
+
+       netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
+
+       /* Need to make the tx_bd_cons update visible to start_xmit()
+        * before checking for netif_tx_queue_stopped().  Without the
+        * memory barrier, there is a small possibility that
+        * start_xmit() will miss it and cause the queue to be stopped
+        * forever.
+        * On the other hand we need an rmb() here to ensure the proper
+        * ordering of bit testing in the following
+        * netif_tx_queue_stopped(txq) call.
+        */
+       smp_mb();
+
+       if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
+               /* Taking tx_lock is needed to prevent reenabling the queue
+                * while it's empty. This could have happen if rx_action() gets
+                * suspended in qede_tx_int() after the condition before
+                * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
+                *
+                * stops the queue->sees fresh tx_bd_cons->releases the queue->
+                * sends some packets consuming the whole queue again->
+                * stops the queue
+                */
+
+               __netif_tx_lock(netdev_txq, smp_processor_id());
+
+               if ((netif_tx_queue_stopped(netdev_txq)) &&
+                   (edev->state == QEDE_STATE_OPEN) &&
+                   (qed_chain_get_elem_left(&txq->tx_pbl)
+                     >= (MAX_SKB_FRAGS + 1))) {
+                       netif_tx_wake_queue(netdev_txq);
+                       DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
+                                  "Wake queue was called\n");
+               }
+
+               __netif_tx_unlock(netdev_txq);
+       }
+
+       return 0;
+}
+
+static bool qede_has_rx_work(struct qede_rx_queue *rxq)
+{
+       u16 hw_comp_cons, sw_comp_cons;
+
+       /* Tell compiler that status block fields can change */
+       barrier();
+
+       hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+       sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+       return hw_comp_cons != sw_comp_cons;
+}
+
+static bool qede_has_tx_work(struct qede_fastpath *fp)
+{
+       u8 tc;
+
+       for (tc = 0; tc < fp->edev->num_tc; tc++)
+               if (qede_txq_has_work(&fp->txqs[tc]))
+                       return true;
+       return false;
+}
+
+/* This function copies the Rx buffer from the CONS position to the PROD
+ * position, since we failed to allocate a new Rx buffer.
+ */
+static void qede_reuse_rx_data(struct qede_rx_queue *rxq)
+{
+       struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
+       struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+       struct sw_rx_data *sw_rx_data_cons =
+               &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+       struct sw_rx_data *sw_rx_data_prod =
+               &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+
+       dma_unmap_addr_set(sw_rx_data_prod, mapping,
+                          dma_unmap_addr(sw_rx_data_cons, mapping));
+
+       sw_rx_data_prod->data = sw_rx_data_cons->data;
+       memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
+
+       rxq->sw_rx_cons++;
+       rxq->sw_rx_prod++;
+}
+
+static inline void qede_update_rx_prod(struct qede_dev *edev,
+                                      struct qede_rx_queue *rxq)
+{
+       u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
+       u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
+       struct eth_rx_prod_data rx_prods = {0};
+
+       /* Update producers */
+       rx_prods.bd_prod = cpu_to_le16(bd_prod);
+       rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
+
+       /* Make sure that the BD and SGE data is updated before updating the
+        * producers since FW might read the BD/SGE right after the producer
+        * is updated.
+        */
+       wmb();
+
+       internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
+                       (u32 *)&rx_prods);
+
+       /* mmiowb is needed to synchronize doorbell writes from more than one
+        * processor. It guarantees that the write arrives to the device before
+        * the napi lock is released and another qede_poll is called (possibly
+        * on another CPU). Without this barrier, the next doorbell can bypass
+        * this doorbell. This is applicable to IA64/Altix systems.
+        */
+       mmiowb();
+}
+
+static u32 qede_get_rxhash(struct qede_dev *edev,
+                          u8 bitfields,
+                          __le32 rss_hash,
+                          enum pkt_hash_types *rxhash_type)
+{
+       enum rss_hash_type htype;
+
+       htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+
+       if ((edev->ndev->features & NETIF_F_RXHASH) && htype) {
+               *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
+                               (htype == RSS_HASH_TYPE_IPV6)) ?
+                               PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
+               return le32_to_cpu(rss_hash);
+       }
+       *rxhash_type = PKT_HASH_TYPE_NONE;
+       return 0;
+}
+
+static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
+{
+       skb_checksum_none_assert(skb);
+
+       if (csum_flag & QEDE_CSUM_UNNECESSARY)
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static inline void qede_skb_receive(struct qede_dev *edev,
+                                   struct qede_fastpath *fp,
+                                   struct sk_buff *skb,
+                                   u16 vlan_tag)
+{
+       if (vlan_tag)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                      vlan_tag);
+
+       napi_gro_receive(&fp->napi, skb);
+}
+
+static u8 qede_check_csum(u16 flag)
+{
+       u16 csum_flag = 0;
+       u8 csum = 0;
+
+       if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+            PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+               csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+                            PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+               csum = QEDE_CSUM_UNNECESSARY;
+       }
+
+       csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+                    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+       if (csum_flag & flag)
+               return QEDE_CSUM_ERROR;
+
+       return csum;
+}
+
+static int qede_rx_int(struct qede_fastpath *fp, int budget)
+{
+       struct qede_dev *edev = fp->edev;
+       struct qede_rx_queue *rxq = fp->rxq;
+
+       u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag;
+       int rx_pkt = 0;
+       u8 csum_flag;
+
+       hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+       sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+       /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+        * / BD in the while-loop before reading hw_comp_cons. If the CQE is
+        * read before it is written by FW, then FW writes CQE and SB, and then
+        * the CPU reads the hw_comp_cons, it will use an old CQE.
+        */
+       rmb();
+
+       /* Loop to complete all indicated BDs */
+       while (sw_comp_cons != hw_comp_cons) {
+               struct eth_fast_path_rx_reg_cqe *fp_cqe;
+               enum pkt_hash_types rxhash_type;
+               enum eth_rx_cqe_type cqe_type;
+               struct sw_rx_data *sw_rx_data;
+               union eth_rx_cqe *cqe;
+               struct sk_buff *skb;
+               u16 len, pad;
+               u32 rx_hash;
+               u8 *data;
+
+               /* Get the CQE from the completion ring */
+               cqe = (union eth_rx_cqe *)
+                       qed_chain_consume(&rxq->rx_comp_ring);
+               cqe_type = cqe->fast_path_regular.type;
+
+               if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
+                       edev->ops->eth_cqe_completion(
+                                       edev->cdev, fp->rss_id,
+                                       (struct eth_slow_path_rx_cqe *)cqe);
+                       goto next_cqe;
+               }
+
+               /* Get the data from the SW ring */
+               sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+               sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+               data = sw_rx_data->data;
+
+               fp_cqe = &cqe->fast_path_regular;
+               len =  le16_to_cpu(fp_cqe->pkt_len);
+               pad = fp_cqe->placement_offset;
+
+               /* For every Rx BD consumed, we allocate a new BD so the BD ring
+                * is always with a fixed size. If allocation fails, we take the
+                * consumed BD and return it to the ring in the PROD position.
+                * The packet that was received on that BD will be dropped (and
+                * not passed to the upper stack).
+                */
+               if (likely(qede_alloc_rx_buffer(edev, rxq) == 0)) {
+                       dma_unmap_single(&edev->pdev->dev,
+                                        dma_unmap_addr(sw_rx_data, mapping),
+                                        rxq->rx_buf_size, DMA_FROM_DEVICE);
+
+                       /* If this is an error packet then drop it */
+                       parse_flag =
+                       le16_to_cpu(cqe->fast_path_regular.pars_flags.flags);
+                       csum_flag = qede_check_csum(parse_flag);
+                       if (csum_flag == QEDE_CSUM_ERROR) {
+                               DP_NOTICE(edev,
+                                         "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
+                                         sw_comp_cons, parse_flag);
+                               rxq->rx_hw_errors++;
+                               kfree(data);
+                               goto next_rx;
+                       }
+
+                       skb = build_skb(data, 0);
+
+                       if (unlikely(!skb)) {
+                               DP_NOTICE(edev,
+                                         "Build_skb failed, dropping incoming packet\n");
+                               kfree(data);
+                               rxq->rx_alloc_errors++;
+                               goto next_rx;
+                       }
+
+                       skb_reserve(skb, pad);
+
+               } else {
+                       DP_NOTICE(edev,
+                                 "New buffer allocation failed, dropping incoming packet and reusing its buffer\n");
+                       qede_reuse_rx_data(rxq);
+                       rxq->rx_alloc_errors++;
+                       goto next_cqe;
+               }
+
+               sw_rx_data->data = NULL;
+
+               skb_put(skb, len);
+
+               skb->protocol = eth_type_trans(skb, edev->ndev);
+
+               rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
+                                         fp_cqe->rss_hash,
+                                         &rxhash_type);
+
+               skb_set_hash(skb, rx_hash, rxhash_type);
+
+               qede_set_skb_csum(skb, csum_flag);
+
+               skb_record_rx_queue(skb, fp->rss_id);
+
+               qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
+
+               qed_chain_consume(&rxq->rx_bd_ring);
+
+next_rx:
+               rxq->sw_rx_cons++;
+               rx_pkt++;
+
+next_cqe: /* don't consume bd rx buffer */
+               qed_chain_recycle_consumed(&rxq->rx_comp_ring);
+               sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+               /* CR TPA - revisit how to handle budget in TPA perhaps
+                * increase on "end"
+                */
+               if (rx_pkt == budget)
+                       break;
+       } /* repeat while sw_comp_cons != hw_comp_cons... */
+
+       /* Update producers */
+       qede_update_rx_prod(edev, rxq);
+
+       return rx_pkt;
+}
+
+static int qede_poll(struct napi_struct *napi, int budget)
+{
+       int work_done = 0;
+       struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
+                                                napi);
+       struct qede_dev *edev = fp->edev;
+
+       while (1) {
+               u8 tc;
+
+               for (tc = 0; tc < edev->num_tc; tc++)
+                       if (qede_txq_has_work(&fp->txqs[tc]))
+                               qede_tx_int(edev, &fp->txqs[tc]);
+
+               if (qede_has_rx_work(fp->rxq)) {
+                       work_done += qede_rx_int(fp, budget - work_done);
+
+                       /* must not complete if we consumed full budget */
+                       if (work_done >= budget)
+                               break;
+               }
+
+               /* Fall out from the NAPI loop if needed */
+               if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) {
+                       qed_sb_update_sb_idx(fp->sb_info);
+                       /* *_has_*_work() reads the status block,
+                        * thus we need to ensure that status block indices
+                        * have been actually read (qed_sb_update_sb_idx)
+                        * prior to this check (*_has_*_work) so that
+                        * we won't write the "newer" value of the status block
+                        * to HW (if there was a DMA right after
+                        * qede_has_rx_work and if there is no rmb, the memory
+                        * reading (qed_sb_update_sb_idx) may be postponed
+                        * to right before *_ack_sb). In this case there
+                        * will never be another interrupt until there is
+                        * another update of the status block, while there
+                        * is still unhandled work.
+                        */
+                       rmb();
+
+                       if (!(qede_has_rx_work(fp->rxq) ||
+                             qede_has_tx_work(fp))) {
+                               napi_complete(napi);
+                               /* Update and reenable interrupts */
+                               qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
+                                          1 /*update*/);
+                               break;
+                       }
+               }
+       }
+
+       return work_done;
+}
+
+static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
+{
+       struct qede_fastpath *fp = fp_cookie;
+
+       qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
+
+       napi_schedule_irqoff(&fp->napi);
+       return IRQ_HANDLED;
+}
+
+/* -------------------------------------------------------------------------
+ * END OF FAST-PATH
+ * -------------------------------------------------------------------------
+ */
+
+static int qede_open(struct net_device *ndev);
+static int qede_close(struct net_device *ndev);
+static int qede_set_mac_addr(struct net_device *ndev, void *p);
+static void qede_set_rx_mode(struct net_device *ndev);
+static void qede_config_rx_mode(struct net_device *ndev);
+
+static int qede_set_ucast_rx_mac(struct qede_dev *edev,
+                                enum qed_filter_xcast_params_type opcode,
+                                unsigned char mac[ETH_ALEN])
+{
+       struct qed_filter_params filter_cmd;
+
+       memset(&filter_cmd, 0, sizeof(filter_cmd));
+       filter_cmd.type = QED_FILTER_TYPE_UCAST;
+       filter_cmd.filter.ucast.type = opcode;
+       filter_cmd.filter.ucast.mac_valid = 1;
+       ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
+
+       return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+void qede_fill_by_demand_stats(struct qede_dev *edev)
+{
+       struct qed_eth_stats stats;
+
+       edev->ops->get_vport_stats(edev->cdev, &stats);
+       edev->stats.no_buff_discards = stats.no_buff_discards;
+       edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
+       edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
+       edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
+       edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
+       edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
+       edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
+       edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
+       edev->stats.mac_filter_discards = stats.mac_filter_discards;
+
+       edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
+       edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
+       edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
+       edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
+       edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
+       edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
+       edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
+       edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
+       edev->stats.coalesced_events = stats.tpa_coalesced_events;
+       edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
+       edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
+       edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
+
+       edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
+       edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets;
+       edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets;
+       edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets;
+       edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets;
+       edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets;
+       edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets;
+       edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets;
+       edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets;
+       edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets;
+       edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets;
+       edev->stats.rx_crc_errors = stats.rx_crc_errors;
+       edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
+       edev->stats.rx_pause_frames = stats.rx_pause_frames;
+       edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
+       edev->stats.rx_align_errors = stats.rx_align_errors;
+       edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
+       edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
+       edev->stats.rx_jabbers = stats.rx_jabbers;
+       edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
+       edev->stats.rx_fragments = stats.rx_fragments;
+       edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
+       edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
+       edev->stats.tx_128_to_255_byte_packets =
+                               stats.tx_128_to_255_byte_packets;
+       edev->stats.tx_256_to_511_byte_packets =
+                               stats.tx_256_to_511_byte_packets;
+       edev->stats.tx_512_to_1023_byte_packets =
+                               stats.tx_512_to_1023_byte_packets;
+       edev->stats.tx_1024_to_1518_byte_packets =
+                               stats.tx_1024_to_1518_byte_packets;
+       edev->stats.tx_1519_to_2047_byte_packets =
+                               stats.tx_1519_to_2047_byte_packets;
+       edev->stats.tx_2048_to_4095_byte_packets =
+                               stats.tx_2048_to_4095_byte_packets;
+       edev->stats.tx_4096_to_9216_byte_packets =
+                               stats.tx_4096_to_9216_byte_packets;
+       edev->stats.tx_9217_to_16383_byte_packets =
+                               stats.tx_9217_to_16383_byte_packets;
+       edev->stats.tx_pause_frames = stats.tx_pause_frames;
+       edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
+       edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
+       edev->stats.tx_total_collisions = stats.tx_total_collisions;
+       edev->stats.brb_truncates = stats.brb_truncates;
+       edev->stats.brb_discards = stats.brb_discards;
+       edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
+}
+
+static struct rtnl_link_stats64 *qede_get_stats64(
+                           struct net_device *dev,
+                           struct rtnl_link_stats64 *stats)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       qede_fill_by_demand_stats(edev);
+
+       stats->rx_packets = edev->stats.rx_ucast_pkts +
+                           edev->stats.rx_mcast_pkts +
+                           edev->stats.rx_bcast_pkts;
+       stats->tx_packets = edev->stats.tx_ucast_pkts +
+                           edev->stats.tx_mcast_pkts +
+                           edev->stats.tx_bcast_pkts;
+
+       stats->rx_bytes = edev->stats.rx_ucast_bytes +
+                         edev->stats.rx_mcast_bytes +
+                         edev->stats.rx_bcast_bytes;
+
+       stats->tx_bytes = edev->stats.tx_ucast_bytes +
+                         edev->stats.tx_mcast_bytes +
+                         edev->stats.tx_bcast_bytes;
+
+       stats->tx_errors = edev->stats.tx_err_drop_pkts;
+       stats->multicast = edev->stats.rx_mcast_pkts +
+                          edev->stats.rx_bcast_pkts;
+
+       stats->rx_fifo_errors = edev->stats.no_buff_discards;
+
+       stats->collisions = edev->stats.tx_total_collisions;
+       stats->rx_crc_errors = edev->stats.rx_crc_errors;
+       stats->rx_frame_errors = edev->stats.rx_align_errors;
+
+       return stats;
+}
+
+static const struct net_device_ops qede_netdev_ops = {
+       .ndo_open = qede_open,
+       .ndo_stop = qede_close,
+       .ndo_start_xmit = qede_start_xmit,
+       .ndo_set_rx_mode = qede_set_rx_mode,
+       .ndo_set_mac_address = qede_set_mac_addr,
+       .ndo_validate_addr = eth_validate_addr,
+       .ndo_change_mtu = qede_change_mtu,
+       .ndo_get_stats64 = qede_get_stats64,
+};
+
+/* -------------------------------------------------------------------------
+ * START OF PROBE / REMOVE
+ * -------------------------------------------------------------------------
+ */
+
+static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
+                                           struct pci_dev *pdev,
+                                           struct qed_dev_eth_info *info,
+                                           u32 dp_module,
+                                           u8 dp_level)
+{
+       struct net_device *ndev;
+       struct qede_dev *edev;
+
+       ndev = alloc_etherdev_mqs(sizeof(*edev),
+                                 info->num_queues,
+                                 info->num_queues);
+       if (!ndev) {
+               pr_err("etherdev allocation failed\n");
+               return NULL;
+       }
+
+       edev = netdev_priv(ndev);
+       edev->ndev = ndev;
+       edev->cdev = cdev;
+       edev->pdev = pdev;
+       edev->dp_module = dp_module;
+       edev->dp_level = dp_level;
+       edev->ops = qed_ops;
+       edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
+       edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+
+       DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n");
+
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+
+       memset(&edev->stats, 0, sizeof(edev->stats));
+       memcpy(&edev->dev_info, info, sizeof(*info));
+
+       edev->num_tc = edev->dev_info.num_tc;
+
+       return edev;
+}
+
+static void qede_init_ndev(struct qede_dev *edev)
+{
+       struct net_device *ndev = edev->ndev;
+       struct pci_dev *pdev = edev->pdev;
+       u32 hw_features;
+
+       pci_set_drvdata(pdev, ndev);
+
+       ndev->mem_start = edev->dev_info.common.pci_mem_start;
+       ndev->base_addr = ndev->mem_start;
+       ndev->mem_end = edev->dev_info.common.pci_mem_end;
+       ndev->irq = edev->dev_info.common.pci_irq;
+
+       ndev->watchdog_timeo = TX_TIMEOUT;
+
+       ndev->netdev_ops = &qede_netdev_ops;
+
+       qede_set_ethtool_ops(ndev);
+
+       /* user-changeble features */
+       hw_features = NETIF_F_GRO | NETIF_F_SG |
+                     NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                     NETIF_F_TSO | NETIF_F_TSO6;
+
+       ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
+                             NETIF_F_HIGHDMA;
+       ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
+                        NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
+                        NETIF_F_HW_VLAN_CTAG_TX;
+
+       ndev->hw_features = hw_features;
+
+       /* Set network device HW mac */
+       ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
+}
+
+/* This function converts from 32b param to two params of level and module
+ * Input 32b decoding:
+ * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
+ * 'happy' flow, e.g. memory allocation failed.
+ * b30 - enable all INFO prints. INFO prints are for major steps in the flow
+ * and provide important parameters.
+ * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
+ * module. VERBOSE prints are for tracking the specific flow in low level.
+ *
+ * Notice that the level should be that of the lowest required logs.
+ */
+void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
+{
+       *p_dp_level = QED_LEVEL_NOTICE;
+       *p_dp_module = 0;
+
+       if (debug & QED_LOG_VERBOSE_MASK) {
+               *p_dp_level = QED_LEVEL_VERBOSE;
+               *p_dp_module = (debug & 0x3FFFFFFF);
+       } else if (debug & QED_LOG_INFO_MASK) {
+               *p_dp_level = QED_LEVEL_INFO;
+       } else if (debug & QED_LOG_NOTICE_MASK) {
+               *p_dp_level = QED_LEVEL_NOTICE;
+       }
+}
+
+static void qede_free_fp_array(struct qede_dev *edev)
+{
+       if (edev->fp_array) {
+               struct qede_fastpath *fp;
+               int i;
+
+               for_each_rss(i) {
+                       fp = &edev->fp_array[i];
+
+                       kfree(fp->sb_info);
+                       kfree(fp->rxq);
+                       kfree(fp->txqs);
+               }
+               kfree(edev->fp_array);
+       }
+       edev->num_rss = 0;
+}
+
+static int qede_alloc_fp_array(struct qede_dev *edev)
+{
+       struct qede_fastpath *fp;
+       int i;
+
+       edev->fp_array = kcalloc(QEDE_RSS_CNT(edev),
+                                sizeof(*edev->fp_array), GFP_KERNEL);
+       if (!edev->fp_array) {
+               DP_NOTICE(edev, "fp array allocation failed\n");
+               goto err;
+       }
+
+       for_each_rss(i) {
+               fp = &edev->fp_array[i];
+
+               fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
+               if (!fp->sb_info) {
+                       DP_NOTICE(edev, "sb info struct allocation failed\n");
+                       goto err;
+               }
+
+               fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
+               if (!fp->rxq) {
+                       DP_NOTICE(edev, "RXQ struct allocation failed\n");
+                       goto err;
+               }
+
+               fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL);
+               if (!fp->txqs) {
+                       DP_NOTICE(edev, "TXQ array allocation failed\n");
+                       goto err;
+               }
+       }
+
+       return 0;
+err:
+       qede_free_fp_array(edev);
+       return -ENOMEM;
+}
+
+static void qede_sp_task(struct work_struct *work)
+{
+       struct qede_dev *edev = container_of(work, struct qede_dev,
+                                            sp_task.work);
+       mutex_lock(&edev->qede_lock);
+
+       if (edev->state == QEDE_STATE_OPEN) {
+               if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
+                       qede_config_rx_mode(edev->ndev);
+       }
+
+       mutex_unlock(&edev->qede_lock);
+}
+
+static void qede_update_pf_params(struct qed_dev *cdev)
+{
+       struct qed_pf_params pf_params;
+
+       /* 16 rx + 16 tx */
+       memset(&pf_params, 0, sizeof(struct qed_pf_params));
+       pf_params.eth_pf_params.num_cons = 32;
+       qed_ops->common->update_pf_params(cdev, &pf_params);
+}
+
+enum qede_probe_mode {
+       QEDE_PROBE_NORMAL,
+};
+
+static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
+                       enum qede_probe_mode mode)
+{
+       struct qed_slowpath_params params;
+       struct qed_dev_eth_info dev_info;
+       struct qede_dev *edev;
+       struct qed_dev *cdev;
+       int rc;
+
+       if (unlikely(dp_level & QED_LEVEL_INFO))
+               pr_notice("Starting qede probe\n");
+
+       cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH,
+                                     dp_module, dp_level);
+       if (!cdev) {
+               rc = -ENODEV;
+               goto err0;
+       }
+
+       qede_update_pf_params(cdev);
+
+       /* Start the Slowpath-process */
+       memset(&params, 0, sizeof(struct qed_slowpath_params));
+       params.int_mode = QED_INT_MODE_MSIX;
+       params.drv_major = QEDE_MAJOR_VERSION;
+       params.drv_minor = QEDE_MINOR_VERSION;
+       params.drv_rev = QEDE_REVISION_VERSION;
+       params.drv_eng = QEDE_ENGINEERING_VERSION;
+       strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+       rc = qed_ops->common->slowpath_start(cdev, &params);
+       if (rc) {
+               pr_notice("Cannot start slowpath\n");
+               goto err1;
+       }
+
+       /* Learn information crucial for qede to progress */
+       rc = qed_ops->fill_dev_info(cdev, &dev_info);
+       if (rc)
+               goto err2;
+
+       edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
+                                  dp_level);
+       if (!edev) {
+               rc = -ENOMEM;
+               goto err2;
+       }
+
+       qede_init_ndev(edev);
+
+       rc = register_netdev(edev->ndev);
+       if (rc) {
+               DP_NOTICE(edev, "Cannot register net-device\n");
+               goto err3;
+       }
+
+       edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
+
+       edev->ops->register_ops(cdev, &qede_ll_ops, edev);
+
+       INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
+       mutex_init(&edev->qede_lock);
+
+       DP_INFO(edev, "Ending successfully qede probe\n");
+
+       return 0;
+
+err3:
+       free_netdev(edev->ndev);
+err2:
+       qed_ops->common->slowpath_stop(cdev);
+err1:
+       qed_ops->common->remove(cdev);
+err0:
+       return rc;
+}
+
+static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       u32 dp_module = 0;
+       u8 dp_level = 0;
+
+       qede_config_debug(debug, &dp_module, &dp_level);
+
+       return __qede_probe(pdev, dp_module, dp_level,
+                           QEDE_PROBE_NORMAL);
+}
+
+enum qede_remove_mode {
+       QEDE_REMOVE_NORMAL,
+};
+
+static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+       struct qede_dev *edev = netdev_priv(ndev);
+       struct qed_dev *cdev = edev->cdev;
+
+       DP_INFO(edev, "Starting qede_remove\n");
+
+       cancel_delayed_work_sync(&edev->sp_task);
+       unregister_netdev(ndev);
+
+       edev->ops->common->set_power_state(cdev, PCI_D0);
+
+       pci_set_drvdata(pdev, NULL);
+
+       free_netdev(ndev);
+
+       /* Use global ops since we've freed edev */
+       qed_ops->common->slowpath_stop(cdev);
+       qed_ops->common->remove(cdev);
+
+       pr_notice("Ending successfully qede_remove\n");
+}
+
+static void qede_remove(struct pci_dev *pdev)
+{
+       __qede_remove(pdev, QEDE_REMOVE_NORMAL);
+}
+
+/* -------------------------------------------------------------------------
+ * START OF LOAD / UNLOAD
+ * -------------------------------------------------------------------------
+ */
+
+static int qede_set_num_queues(struct qede_dev *edev)
+{
+       int rc;
+       u16 rss_num;
+
+       /* Setup queues according to possible resources*/
+       rss_num = netif_get_num_default_rss_queues() *
+                 edev->dev_info.common.num_hwfns;
+
+       rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
+
+       rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
+       if (rc > 0) {
+               /* Managed to request interrupts for our queues */
+               edev->num_rss = rc;
+               DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
+                       QEDE_RSS_CNT(edev), rss_num);
+               rc = 0;
+       }
+       return rc;
+}
+
+static void qede_free_mem_sb(struct qede_dev *edev,
+                            struct qed_sb_info *sb_info)
+{
+       if (sb_info->sb_virt)
+               dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
+                                 (void *)sb_info->sb_virt, sb_info->sb_phys);
+}
+
+/* This function allocates fast-path status block memory */
+static int qede_alloc_mem_sb(struct qede_dev *edev,
+                            struct qed_sb_info *sb_info,
+                            u16 sb_id)
+{
+       struct status_block *sb_virt;
+       dma_addr_t sb_phys;
+       int rc;
+
+       sb_virt = dma_alloc_coherent(&edev->pdev->dev,
+                                    sizeof(*sb_virt),
+                                    &sb_phys, GFP_KERNEL);
+       if (!sb_virt) {
+               DP_ERR(edev, "Status block allocation failed\n");
+               return -ENOMEM;
+       }
+
+       rc = edev->ops->common->sb_init(edev->cdev, sb_info,
+                                       sb_virt, sb_phys, sb_id,
+                                       QED_SB_TYPE_L2_QUEUE);
+       if (rc) {
+               DP_ERR(edev, "Status block initialization failed\n");
+               dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
+                                 sb_virt, sb_phys);
+               return rc;
+       }
+
+       return 0;
+}
+
+static void qede_free_rx_buffers(struct qede_dev *edev,
+                                struct qede_rx_queue *rxq)
+{
+       u16 i;
+
+       for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
+               struct sw_rx_data *rx_buf;
+               u8 *data;
+
+               rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
+               data = rx_buf->data;
+
+               dma_unmap_single(&edev->pdev->dev,
+                                dma_unmap_addr(rx_buf, mapping),
+                                rxq->rx_buf_size, DMA_FROM_DEVICE);
+
+               rx_buf->data = NULL;
+               kfree(data);
+       }
+}
+
+static void qede_free_mem_rxq(struct qede_dev *edev,
+                             struct qede_rx_queue *rxq)
+{
+       /* Free rx buffers */
+       qede_free_rx_buffers(edev, rxq);
+
+       /* Free the parallel SW ring */
+       kfree(rxq->sw_rx_ring);
+
+       /* Free the real RQ ring used by FW */
+       edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
+       edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
+}
+
+static int qede_alloc_rx_buffer(struct qede_dev *edev,
+                               struct qede_rx_queue *rxq)
+{
+       struct sw_rx_data *sw_rx_data;
+       struct eth_rx_bd *rx_bd;
+       dma_addr_t mapping;
+       u16 rx_buf_size;
+       u8 *data;
+
+       rx_buf_size = rxq->rx_buf_size;
+
+       data = kmalloc(rx_buf_size, GFP_ATOMIC);
+       if (unlikely(!data)) {
+               DP_NOTICE(edev, "Failed to allocate Rx data\n");
+               return -ENOMEM;
+       }
+
+       mapping = dma_map_single(&edev->pdev->dev, data,
+                                rx_buf_size, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+               kfree(data);
+               DP_NOTICE(edev, "Failed to map Rx buffer\n");
+               return -ENOMEM;
+       }
+
+       sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+       sw_rx_data->data = data;
+
+       dma_unmap_addr_set(sw_rx_data, mapping, mapping);
+
+       /* Advance PROD and get BD pointer */
+       rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
+       WARN_ON(!rx_bd);
+       rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+       rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+
+       rxq->sw_rx_prod++;
+
+       return 0;
+}
+
+/* This function allocates all memory needed per Rx queue */
+static int qede_alloc_mem_rxq(struct qede_dev *edev,
+                             struct qede_rx_queue *rxq)
+{
+       int i, rc, size, num_allocated;
+
+       rxq->num_rx_buffers = edev->q_num_rx_buffers;
+
+       rxq->rx_buf_size = NET_IP_ALIGN +
+                          ETH_OVERHEAD +
+                          edev->ndev->mtu +
+                          QEDE_FW_RX_ALIGN_END;
+
+       /* Allocate the parallel driver ring for Rx buffers */
+       size = sizeof(*rxq->sw_rx_ring) * NUM_RX_BDS_MAX;
+       rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
+       if (!rxq->sw_rx_ring) {
+               DP_ERR(edev, "Rx buffers ring allocation failed\n");
+               goto err;
+       }
+
+       /* Allocate FW Rx ring  */
+       rc = edev->ops->common->chain_alloc(edev->cdev,
+                                           QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+                                           QED_CHAIN_MODE_NEXT_PTR,
+                                           NUM_RX_BDS_MAX,
+                                           sizeof(struct eth_rx_bd),
+                                           &rxq->rx_bd_ring);
+
+       if (rc)
+               goto err;
+
+       /* Allocate FW completion ring */
+       rc = edev->ops->common->chain_alloc(edev->cdev,
+                                           QED_CHAIN_USE_TO_CONSUME,
+                                           QED_CHAIN_MODE_PBL,
+                                           NUM_RX_BDS_MAX,
+                                           sizeof(union eth_rx_cqe),
+                                           &rxq->rx_comp_ring);
+       if (rc)
+               goto err;
+
+       /* Allocate buffers for the Rx ring */
+       for (i = 0; i < rxq->num_rx_buffers; i++) {
+               rc = qede_alloc_rx_buffer(edev, rxq);
+               if (rc)
+                       break;
+       }
+       num_allocated = i;
+       if (!num_allocated) {
+               DP_ERR(edev, "Rx buffers allocation failed\n");
+               goto err;
+       } else if (num_allocated < rxq->num_rx_buffers) {
+               DP_NOTICE(edev,
+                         "Allocated less buffers than desired (%d allocated)\n",
+                         num_allocated);
+       }
+
+       return 0;
+
+err:
+       qede_free_mem_rxq(edev, rxq);
+       return -ENOMEM;
+}
+
+static void qede_free_mem_txq(struct qede_dev *edev,
+                             struct qede_tx_queue *txq)
+{
+       /* Free the parallel SW ring */
+       kfree(txq->sw_tx_ring);
+
+       /* Free the real RQ ring used by FW */
+       edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
+}
+
+/* This function allocates all memory needed per Tx queue */
+static int qede_alloc_mem_txq(struct qede_dev *edev,
+                             struct qede_tx_queue *txq)
+{
+       int size, rc;
+       union eth_tx_bd_types *p_virt;
+
+       txq->num_tx_buffers = edev->q_num_tx_buffers;
+
+       /* Allocate the parallel driver ring for Tx buffers */
+       size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
+       txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
+       if (!txq->sw_tx_ring) {
+               DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
+               goto err;
+       }
+
+       rc = edev->ops->common->chain_alloc(edev->cdev,
+                                           QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+                                           QED_CHAIN_MODE_PBL,
+                                           NUM_TX_BDS_MAX,
+                                           sizeof(*p_virt),
+                                           &txq->tx_pbl);
+       if (rc)
+               goto err;
+
+       return 0;
+
+err:
+       qede_free_mem_txq(edev, txq);
+       return -ENOMEM;
+}
+
+/* This function frees all memory of a single fp */
+static void qede_free_mem_fp(struct qede_dev *edev,
+                            struct qede_fastpath *fp)
+{
+       int tc;
+
+       qede_free_mem_sb(edev, fp->sb_info);
+
+       qede_free_mem_rxq(edev, fp->rxq);
+
+       for (tc = 0; tc < edev->num_tc; tc++)
+               qede_free_mem_txq(edev, &fp->txqs[tc]);
+}
+
+/* This function allocates all memory needed for a single fp (i.e. an entity
+ * which contains status block, one rx queue and multiple per-TC tx queues.
+ */
+static int qede_alloc_mem_fp(struct qede_dev *edev,
+                            struct qede_fastpath *fp)
+{
+       int rc, tc;
+
+       rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id);
+       if (rc)
+               goto err;
+
+       rc = qede_alloc_mem_rxq(edev, fp->rxq);
+       if (rc)
+               goto err;
+
+       for (tc = 0; tc < edev->num_tc; tc++) {
+               rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
+               if (rc)
+                       goto err;
+       }
+
+       return 0;
+
+err:
+       qede_free_mem_fp(edev, fp);
+       return -ENOMEM;
+}
+
+static void qede_free_mem_load(struct qede_dev *edev)
+{
+       int i;
+
+       for_each_rss(i) {
+               struct qede_fastpath *fp = &edev->fp_array[i];
+
+               qede_free_mem_fp(edev, fp);
+       }
+}
+
+/* This function allocates all qede memory at NIC load. */
+static int qede_alloc_mem_load(struct qede_dev *edev)
+{
+       int rc = 0, rss_id;
+
+       for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) {
+               struct qede_fastpath *fp = &edev->fp_array[rss_id];
+
+               rc = qede_alloc_mem_fp(edev, fp);
+               if (rc)
+                       break;
+       }
+
+       if (rss_id != QEDE_RSS_CNT(edev)) {
+               /* Failed allocating memory for all the queues */
+               if (!rss_id) {
+                       DP_ERR(edev,
+                              "Failed to allocate memory for the leading queue\n");
+                       rc = -ENOMEM;
+               } else {
+                       DP_NOTICE(edev,
+                                 "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
+                                 QEDE_RSS_CNT(edev), rss_id);
+               }
+               edev->num_rss = rss_id;
+       }
+
+       return 0;
+}
+
+/* This function inits fp content and resets the SB, RXQ and TXQ structures */
+static void qede_init_fp(struct qede_dev *edev)
+{
+       int rss_id, txq_index, tc;
+       struct qede_fastpath *fp;
+
+       for_each_rss(rss_id) {
+               fp = &edev->fp_array[rss_id];
+
+               fp->edev = edev;
+               fp->rss_id = rss_id;
+
+               memset((void *)&fp->napi, 0, sizeof(fp->napi));
+
+               memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
+
+               memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
+               fp->rxq->rxq_id = rss_id;
+
+               memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs)));
+               for (tc = 0; tc < edev->num_tc; tc++) {
+                       txq_index = tc * QEDE_RSS_CNT(edev) + rss_id;
+                       fp->txqs[tc].index = txq_index;
+               }
+
+               snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+                        edev->ndev->name, rss_id);
+       }
+}
+
+static int qede_set_real_num_queues(struct qede_dev *edev)
+{
+       int rc = 0;
+
+       rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev));
+       if (rc) {
+               DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
+               return rc;
+       }
+       rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev));
+       if (rc) {
+               DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
+               return rc;
+       }
+
+       return 0;
+}
+
+static void qede_napi_disable_remove(struct qede_dev *edev)
+{
+       int i;
+
+       for_each_rss(i) {
+               napi_disable(&edev->fp_array[i].napi);
+
+               netif_napi_del(&edev->fp_array[i].napi);
+       }
+}
+
+static void qede_napi_add_enable(struct qede_dev *edev)
+{
+       int i;
+
+       /* Add NAPI objects */
+       for_each_rss(i) {
+               netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
+                              qede_poll, NAPI_POLL_WEIGHT);
+               napi_enable(&edev->fp_array[i].napi);
+       }
+}
+
+static void qede_sync_free_irqs(struct qede_dev *edev)
+{
+       int i;
+
+       for (i = 0; i < edev->int_info.used_cnt; i++) {
+               if (edev->int_info.msix_cnt) {
+                       synchronize_irq(edev->int_info.msix[i].vector);
+                       free_irq(edev->int_info.msix[i].vector,
+                                &edev->fp_array[i]);
+               } else {
+                       edev->ops->common->simd_handler_clean(edev->cdev, i);
+               }
+       }
+
+       edev->int_info.used_cnt = 0;
+}
+
+static int qede_req_msix_irqs(struct qede_dev *edev)
+{
+       int i, rc;
+
+       /* Sanitize number of interrupts == number of prepared RSS queues */
+       if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) {
+               DP_ERR(edev,
+                      "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
+                      QEDE_RSS_CNT(edev), edev->int_info.msix_cnt);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < QEDE_RSS_CNT(edev); i++) {
+               rc = request_irq(edev->int_info.msix[i].vector,
+                                qede_msix_fp_int, 0, edev->fp_array[i].name,
+                                &edev->fp_array[i]);
+               if (rc) {
+                       DP_ERR(edev, "Request fp %d irq failed\n", i);
+                       qede_sync_free_irqs(edev);
+                       return rc;
+               }
+               DP_VERBOSE(edev, NETIF_MSG_INTR,
+                          "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
+                          edev->fp_array[i].name, i,
+                          &edev->fp_array[i]);
+               edev->int_info.used_cnt++;
+       }
+
+       return 0;
+}
+
+static void qede_simd_fp_handler(void *cookie)
+{
+       struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
+
+       napi_schedule_irqoff(&fp->napi);
+}
+
+static int qede_setup_irqs(struct qede_dev *edev)
+{
+       int i, rc = 0;
+
+       /* Learn Interrupt configuration */
+       rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
+       if (rc)
+               return rc;
+
+       if (edev->int_info.msix_cnt) {
+               rc = qede_req_msix_irqs(edev);
+               if (rc)
+                       return rc;
+               edev->ndev->irq = edev->int_info.msix[0].vector;
+       } else {
+               const struct qed_common_ops *ops;
+
+               /* qed should learn receive the RSS ids and callbacks */
+               ops = edev->ops->common;
+               for (i = 0; i < QEDE_RSS_CNT(edev); i++)
+                       ops->simd_handler_config(edev->cdev,
+                                                &edev->fp_array[i], i,
+                                                qede_simd_fp_handler);
+               edev->int_info.used_cnt = QEDE_RSS_CNT(edev);
+       }
+       return 0;
+}
+
+static int qede_drain_txq(struct qede_dev *edev,
+                         struct qede_tx_queue *txq,
+                         bool allow_drain)
+{
+       int rc, cnt = 1000;
+
+       while (txq->sw_tx_cons != txq->sw_tx_prod) {
+               if (!cnt) {
+                       if (allow_drain) {
+                               DP_NOTICE(edev,
+                                         "Tx queue[%d] is stuck, requesting MCP to drain\n",
+                                         txq->index);
+                               rc = edev->ops->common->drain(edev->cdev);
+                               if (rc)
+                                       return rc;
+                               return qede_drain_txq(edev, txq, false);
+                       }
+                       DP_NOTICE(edev,
+                                 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
+                                 txq->index, txq->sw_tx_prod,
+                                 txq->sw_tx_cons);
+                       return -ENODEV;
+               }
+               cnt--;
+               usleep_range(1000, 2000);
+               barrier();
+       }
+
+       /* FW finished processing, wait for HW to transmit all tx packets */
+       usleep_range(1000, 2000);
+
+       return 0;
+}
+
+static int qede_stop_queues(struct qede_dev *edev)
+{
+       struct qed_update_vport_params vport_update_params;
+       struct qed_dev *cdev = edev->cdev;
+       int rc, tc, i;
+
+       /* Disable the vport */
+       memset(&vport_update_params, 0, sizeof(vport_update_params));
+       vport_update_params.vport_id = 0;
+       vport_update_params.update_vport_active_flg = 1;
+       vport_update_params.vport_active_flg = 0;
+       vport_update_params.update_rss_flg = 0;
+
+       rc = edev->ops->vport_update(cdev, &vport_update_params);
+       if (rc) {
+               DP_ERR(edev, "Failed to update vport\n");
+               return rc;
+       }
+
+       /* Flush Tx queues. If needed, request drain from MCP */
+       for_each_rss(i) {
+               struct qede_fastpath *fp = &edev->fp_array[i];
+
+               for (tc = 0; tc < edev->num_tc; tc++) {
+                       struct qede_tx_queue *txq = &fp->txqs[tc];
+
+                       rc = qede_drain_txq(edev, txq, true);
+                       if (rc)
+                               return rc;
+               }
+       }
+
+       /* Stop all Queues in reverse order*/
+       for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) {
+               struct qed_stop_rxq_params rx_params;
+
+               /* Stop the Tx Queue(s)*/
+               for (tc = 0; tc < edev->num_tc; tc++) {
+                       struct qed_stop_txq_params tx_params;
+
+                       tx_params.rss_id = i;
+                       tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i;
+                       rc = edev->ops->q_tx_stop(cdev, &tx_params);
+                       if (rc) {
+                               DP_ERR(edev, "Failed to stop TXQ #%d\n",
+                                      tx_params.tx_queue_id);
+                               return rc;
+                       }
+               }
+
+               /* Stop the Rx Queue*/
+               memset(&rx_params, 0, sizeof(rx_params));
+               rx_params.rss_id = i;
+               rx_params.rx_queue_id = i;
+
+               rc = edev->ops->q_rx_stop(cdev, &rx_params);
+               if (rc) {
+                       DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
+                       return rc;
+               }
+       }
+
+       /* Stop the vport */
+       rc = edev->ops->vport_stop(cdev, 0);
+       if (rc)
+               DP_ERR(edev, "Failed to stop VPORT\n");
+
+       return rc;
+}
+
+static int qede_start_queues(struct qede_dev *edev)
+{
+       int rc, tc, i;
+       int vport_id = 0, drop_ttl0_flg = 1, vlan_removal_en = 1;
+       struct qed_dev *cdev = edev->cdev;
+       struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
+       struct qed_update_vport_params vport_update_params;
+       struct qed_queue_start_common_params q_params;
+
+       if (!edev->num_rss) {
+               DP_ERR(edev,
+                      "Cannot update V-VPORT as active as there are no Rx queues\n");
+               return -EINVAL;
+       }
+
+       rc = edev->ops->vport_start(cdev, vport_id,
+                                   edev->ndev->mtu,
+                                   drop_ttl0_flg,
+                                   vlan_removal_en);
+
+       if (rc) {
+               DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+               return rc;
+       }
+
+       DP_VERBOSE(edev, NETIF_MSG_IFUP,
+                  "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
+                  vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
+
+       for_each_rss(i) {
+               struct qede_fastpath *fp = &edev->fp_array[i];
+               dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table;
+
+               memset(&q_params, 0, sizeof(q_params));
+               q_params.rss_id = i;
+               q_params.queue_id = i;
+               q_params.vport_id = 0;
+               q_params.sb = fp->sb_info->igu_sb_id;
+               q_params.sb_idx = RX_PI;
+
+               rc = edev->ops->q_rx_start(cdev, &q_params,
+                                          fp->rxq->rx_buf_size,
+                                          fp->rxq->rx_bd_ring.p_phys_addr,
+                                          phys_table,
+                                          fp->rxq->rx_comp_ring.page_cnt,
+                                          &fp->rxq->hw_rxq_prod_addr);
+               if (rc) {
+                       DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
+                       return rc;
+               }
+
+               fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+
+               qede_update_rx_prod(edev, fp->rxq);
+
+               for (tc = 0; tc < edev->num_tc; tc++) {
+                       struct qede_tx_queue *txq = &fp->txqs[tc];
+                       int txq_index = tc * QEDE_RSS_CNT(edev) + i;
+
+                       memset(&q_params, 0, sizeof(q_params));
+                       q_params.rss_id = i;
+                       q_params.queue_id = txq_index;
+                       q_params.vport_id = 0;
+                       q_params.sb = fp->sb_info->igu_sb_id;
+                       q_params.sb_idx = TX_PI(tc);
+
+                       rc = edev->ops->q_tx_start(cdev, &q_params,
+                                                  txq->tx_pbl.pbl.p_phys_table,
+                                                  txq->tx_pbl.page_cnt,
+                                                  &txq->doorbell_addr);
+                       if (rc) {
+                               DP_ERR(edev, "Start TXQ #%d failed %d\n",
+                                      txq_index, rc);
+                               return rc;
+                       }
+
+                       txq->hw_cons_ptr =
+                               &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
+                       SET_FIELD(txq->tx_db.data.params,
+                                 ETH_DB_DATA_DEST, DB_DEST_XCM);
+                       SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
+                                 DB_AGG_CMD_SET);
+                       SET_FIELD(txq->tx_db.data.params,
+                                 ETH_DB_DATA_AGG_VAL_SEL,
+                                 DQ_XCM_ETH_TX_BD_PROD_CMD);
+
+                       txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+               }
+       }
+
+       /* Prepare and send the vport enable */
+       memset(&vport_update_params, 0, sizeof(vport_update_params));
+       vport_update_params.vport_id = vport_id;
+       vport_update_params.update_vport_active_flg = 1;
+       vport_update_params.vport_active_flg = 1;
+
+       /* Fill struct with RSS params */
+       if (QEDE_RSS_CNT(edev) > 1) {
+               vport_update_params.update_rss_flg = 1;
+               for (i = 0; i < 128; i++)
+                       rss_params->rss_ind_table[i] =
+                       ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev));
+               netdev_rss_key_fill(rss_params->rss_key,
+                                   sizeof(rss_params->rss_key));
+       } else {
+               memset(rss_params, 0, sizeof(*rss_params));
+       }
+       memcpy(&vport_update_params.rss_params, rss_params,
+              sizeof(*rss_params));
+
+       rc = edev->ops->vport_update(cdev, &vport_update_params);
+       if (rc) {
+               DP_ERR(edev, "Update V-PORT failed %d\n", rc);
+               return rc;
+       }
+
+       return 0;
+}
+
+static int qede_set_mcast_rx_mac(struct qede_dev *edev,
+                                enum qed_filter_xcast_params_type opcode,
+                                unsigned char *mac, int num_macs)
+{
+       struct qed_filter_params filter_cmd;
+       int i;
+
+       memset(&filter_cmd, 0, sizeof(filter_cmd));
+       filter_cmd.type = QED_FILTER_TYPE_MCAST;
+       filter_cmd.filter.mcast.type = opcode;
+       filter_cmd.filter.mcast.num = num_macs;
+
+       for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
+               ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
+
+       return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+enum qede_unload_mode {
+       QEDE_UNLOAD_NORMAL,
+};
+
+static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
+{
+       struct qed_link_params link_params;
+       int rc;
+
+       DP_INFO(edev, "Starting qede unload\n");
+
+       mutex_lock(&edev->qede_lock);
+       edev->state = QEDE_STATE_CLOSED;
+
+       /* Close OS Tx */
+       netif_tx_disable(edev->ndev);
+       netif_carrier_off(edev->ndev);
+
+       /* Reset the link */
+       memset(&link_params, 0, sizeof(link_params));
+       link_params.link_up = false;
+       edev->ops->common->set_link(edev->cdev, &link_params);
+       rc = qede_stop_queues(edev);
+       if (rc) {
+               qede_sync_free_irqs(edev);
+               goto out;
+       }
+
+       DP_INFO(edev, "Stopped Queues\n");
+
+       edev->ops->fastpath_stop(edev->cdev);
+
+       /* Release the interrupts */
+       qede_sync_free_irqs(edev);
+       edev->ops->common->set_fp_int(edev->cdev, 0);
+
+       qede_napi_disable_remove(edev);
+
+       qede_free_mem_load(edev);
+       qede_free_fp_array(edev);
+
+out:
+       mutex_unlock(&edev->qede_lock);
+       DP_INFO(edev, "Ending qede unload\n");
+}
+
+enum qede_load_mode {
+       QEDE_LOAD_NORMAL,
+};
+
+static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
+{
+       struct qed_link_params link_params;
+       struct qed_link_output link_output;
+       int rc;
+
+       DP_INFO(edev, "Starting qede load\n");
+
+       rc = qede_set_num_queues(edev);
+       if (rc)
+               goto err0;
+
+       rc = qede_alloc_fp_array(edev);
+       if (rc)
+               goto err0;
+
+       qede_init_fp(edev);
+
+       rc = qede_alloc_mem_load(edev);
+       if (rc)
+               goto err1;
+       DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
+               QEDE_RSS_CNT(edev), edev->num_tc);
+
+       rc = qede_set_real_num_queues(edev);
+       if (rc)
+               goto err2;
+
+       qede_napi_add_enable(edev);
+       DP_INFO(edev, "Napi added and enabled\n");
+
+       rc = qede_setup_irqs(edev);
+       if (rc)
+               goto err3;
+       DP_INFO(edev, "Setup IRQs succeeded\n");
+
+       rc = qede_start_queues(edev);
+       if (rc)
+               goto err4;
+       DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
+
+       /* Add primary mac and set Rx filters */
+       ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
+
+       mutex_lock(&edev->qede_lock);
+       edev->state = QEDE_STATE_OPEN;
+       mutex_unlock(&edev->qede_lock);
+
+       /* Ask for link-up using current configuration */
+       memset(&link_params, 0, sizeof(link_params));
+       link_params.link_up = true;
+       edev->ops->common->set_link(edev->cdev, &link_params);
+
+       /* Query whether link is already-up */
+       memset(&link_output, 0, sizeof(link_output));
+       edev->ops->common->get_link(edev->cdev, &link_output);
+       qede_link_update(edev, &link_output);
+
+       DP_INFO(edev, "Ending successfully qede load\n");
+
+       return 0;
+
+err4:
+       qede_sync_free_irqs(edev);
+       memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
+err3:
+       qede_napi_disable_remove(edev);
+err2:
+       qede_free_mem_load(edev);
+err1:
+       edev->ops->common->set_fp_int(edev->cdev, 0);
+       qede_free_fp_array(edev);
+       edev->num_rss = 0;
+err0:
+       return rc;
+}
+
+void qede_reload(struct qede_dev *edev,
+                void (*func)(struct qede_dev *, union qede_reload_args *),
+                union qede_reload_args *args)
+{
+       qede_unload(edev, QEDE_UNLOAD_NORMAL);
+       /* Call function handler to update parameters
+        * needed for function load.
+        */
+       if (func)
+               func(edev, args);
+
+       qede_load(edev, QEDE_LOAD_NORMAL);
+
+       mutex_lock(&edev->qede_lock);
+       qede_config_rx_mode(edev->ndev);
+       mutex_unlock(&edev->qede_lock);
+}
+
+/* called with rtnl_lock */
+static int qede_open(struct net_device *ndev)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+
+       netif_carrier_off(ndev);
+
+       edev->ops->common->set_power_state(edev->cdev, PCI_D0);
+
+       return qede_load(edev, QEDE_LOAD_NORMAL);
+}
+
+static int qede_close(struct net_device *ndev)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+
+       qede_unload(edev, QEDE_UNLOAD_NORMAL);
+
+       return 0;
+}
+
+static void qede_link_update(void *dev, struct qed_link_output *link)
+{
+       struct qede_dev *edev = dev;
+
+       if (!netif_running(edev->ndev)) {
+               DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
+               return;
+       }
+
+       if (link->link_up) {
+               DP_NOTICE(edev, "Link is up\n");
+               netif_tx_start_all_queues(edev->ndev);
+               netif_carrier_on(edev->ndev);
+       } else {
+               DP_NOTICE(edev, "Link is down\n");
+               netif_tx_disable(edev->ndev);
+               netif_carrier_off(edev->ndev);
+       }
+}
+
+static int qede_set_mac_addr(struct net_device *ndev, void *p)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+       struct sockaddr *addr = p;
+       int rc;
+
+       ASSERT_RTNL(); /* @@@TBD To be removed */
+
+       DP_INFO(edev, "Set_mac_addr called\n");
+
+       if (!is_valid_ether_addr(addr->sa_data)) {
+               DP_NOTICE(edev, "The MAC address is not valid\n");
+               return -EFAULT;
+       }
+
+       ether_addr_copy(ndev->dev_addr, addr->sa_data);
+
+       if (!netif_running(ndev))  {
+               DP_NOTICE(edev, "The device is currently down\n");
+               return 0;
+       }
+
+       /* Remove the previous primary mac */
+       rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+                                  edev->primary_mac);
+       if (rc)
+               return rc;
+
+       /* Add MAC filter according to the new unicast HW MAC address */
+       ether_addr_copy(edev->primary_mac, ndev->dev_addr);
+       return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+                                     edev->primary_mac);
+}
+
+static int
+qede_configure_mcast_filtering(struct net_device *ndev,
+                              enum qed_filter_rx_mode_type *accept_flags)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+       unsigned char *mc_macs, *temp;
+       struct netdev_hw_addr *ha;
+       int rc = 0, mc_count;
+       size_t size;
+
+       size = 64 * ETH_ALEN;
+
+       mc_macs = kzalloc(size, GFP_KERNEL);
+       if (!mc_macs) {
+               DP_NOTICE(edev,
+                         "Failed to allocate memory for multicast MACs\n");
+               rc = -ENOMEM;
+               goto exit;
+       }
+
+       temp = mc_macs;
+
+       /* Remove all previously configured MAC filters */
+       rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+                                  mc_macs, 1);
+       if (rc)
+               goto exit;
+
+       netif_addr_lock_bh(ndev);
+
+       mc_count = netdev_mc_count(ndev);
+       if (mc_count < 64) {
+               netdev_for_each_mc_addr(ha, ndev) {
+                       ether_addr_copy(temp, ha->addr);
+                       temp += ETH_ALEN;
+               }
+       }
+
+       netif_addr_unlock_bh(ndev);
+
+       /* Check for all multicast @@@TBD resource allocation */
+       if ((ndev->flags & IFF_ALLMULTI) ||
+           (mc_count > 64)) {
+               if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
+                       *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+       } else {
+               /* Add all multicast MAC filters */
+               rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+                                          mc_macs, mc_count);
+       }
+
+exit:
+       kfree(mc_macs);
+       return rc;
+}
+
+static void qede_set_rx_mode(struct net_device *ndev)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+
+       DP_INFO(edev, "qede_set_rx_mode called\n");
+
+       if (edev->state != QEDE_STATE_OPEN) {
+               DP_INFO(edev,
+                       "qede_set_rx_mode called while interface is down\n");
+       } else {
+               set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
+               schedule_delayed_work(&edev->sp_task, 0);
+       }
+}
+
+/* Must be called with qede_lock held */
+static void qede_config_rx_mode(struct net_device *ndev)
+{
+       enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST;
+       struct qede_dev *edev = netdev_priv(ndev);
+       struct qed_filter_params rx_mode;
+       unsigned char *uc_macs, *temp;
+       struct netdev_hw_addr *ha;
+       int rc, uc_count;
+       size_t size;
+
+       netif_addr_lock_bh(ndev);
+
+       uc_count = netdev_uc_count(ndev);
+       size = uc_count * ETH_ALEN;
+
+       uc_macs = kzalloc(size, GFP_ATOMIC);
+       if (!uc_macs) {
+               DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
+               netif_addr_unlock_bh(ndev);
+               return;
+       }
+
+       temp = uc_macs;
+       netdev_for_each_uc_addr(ha, ndev) {
+               ether_addr_copy(temp, ha->addr);
+               temp += ETH_ALEN;
+       }
+
+       netif_addr_unlock_bh(ndev);
+
+       /* Configure the struct for the Rx mode */
+       memset(&rx_mode, 0, sizeof(struct qed_filter_params));
+       rx_mode.type = QED_FILTER_TYPE_RX_MODE;
+
+       /* Remove all previous unicast secondary macs and multicast macs
+        * (configrue / leave the primary mac)
+        */
+       rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
+                                  edev->primary_mac);
+       if (rc)
+               goto out;
+
+       /* Check for promiscuous */
+       if ((ndev->flags & IFF_PROMISC) ||
+           (uc_count > 15)) { /* @@@TBD resource allocation - 1 */
+               accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
+       } else {
+               /* Add MAC filters according to the unicast secondary macs */
+               int i;
+
+               temp = uc_macs;
+               for (i = 0; i < uc_count; i++) {
+                       rc = qede_set_ucast_rx_mac(edev,
+                                                  QED_FILTER_XCAST_TYPE_ADD,
+                                                  temp);
+                       if (rc)
+                               goto out;
+
+                       temp += ETH_ALEN;
+               }
+
+               rc = qede_configure_mcast_filtering(ndev, &accept_flags);
+               if (rc)
+                       goto out;
+       }
+
+       rx_mode.filter.accept_flags = accept_flags;
+       edev->ops->filter_config(edev->cdev, &rx_mode);
+out:
+       kfree(uc_macs);
+}
index 4847713211cafa2258b9511cda89f4232a623ebe..b09a6b80d10719c967994ed54618c50e25fdec0f 100644 (file)
@@ -1736,8 +1736,6 @@ static void ql_get_drvinfo(struct net_device *ndev,
                sizeof(drvinfo->version));
        strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
                sizeof(drvinfo->bus_info));
-       drvinfo->regdump_len = 0;
-       drvinfo->eedump_len = 0;
 }
 
 static u32 ql_get_msglevel(struct net_device *ndev)
index d6696cfa11d256546d623a0fc331ae6934798a78..46bbea8e023c0194e408ec9e269456c9456136e2 100644 (file)
@@ -1092,7 +1092,7 @@ struct qlcnic_filter_hash {
 struct qlcnic_mailbox {
        struct workqueue_struct *work_q;
        struct qlcnic_adapter   *adapter;
-       struct qlcnic_mbx_ops   *ops;
+       const struct qlcnic_mbx_ops *ops;
        struct work_struct      work;
        struct completion       completion;
        struct list_head        cmd_q;
index 9f0bdd993955cab628d676d079ceb0d6bd37e805..37a731be7d399f6ae14d5c0e1f889599f9fd2d5d 100644 (file)
@@ -4048,7 +4048,7 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
        struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
                                                  work);
        struct qlcnic_adapter *adapter = mbx->adapter;
-       struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
+       const struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
        struct device *dev = &adapter->pdev->dev;
        atomic_t *rsp_status = &mbx->rsp_status;
        struct list_head *head = &mbx->cmd_q;
@@ -4098,7 +4098,7 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
        }
 }
 
-static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
+static const struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
        .enqueue_cmd    = qlcnic_83xx_enqueue_mbx_cmd,
        .dequeue_cmd    = qlcnic_83xx_dequeue_mbx_cmd,
        .decode_resp    = qlcnic_83xx_decode_mbx_rsp,
index c3c514e332b5e5dfe67943e1155e8ff24e043c95..5dade1fd08b8656be02cfb9a79ea20176989a29f 100644 (file)
@@ -415,13 +415,6 @@ static void ql_get_drvinfo(struct net_device *ndev,
                 (qdev->fw_rev_id & 0x000000ff));
        strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
                sizeof(drvinfo->bus_info));
-       drvinfo->n_stats = 0;
-       drvinfo->testinfo_len = 0;
-       if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
-               drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
-       else
-               drvinfo->regdump_len = sizeof(struct ql_reg_dump);
-       drvinfo->eedump_len = 0;
 }
 
 static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
index 78bb4ceb1cdd364f3d044f746ada0ff0aef954e3..ef668d300800c61b18dc97af10afe2f78d20f870 100644 (file)
@@ -2388,7 +2388,6 @@ static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
        strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
-       info->regdump_len = tp->regs_len;
 }
 
 static int rtl8139_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
index eafa907965ecb248e418ecbb1cc01bb23d701b2c..32a80d2df7ffc161e86feab52ee850c2660afcf9 100644 (file)
@@ -3672,7 +3672,7 @@ static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
            rocker_port->stp_state == BR_STATE_FORWARDING)
                return 0;
 
-       flags |= ROCKER_OP_FLAG_REMOVE;
+       flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
 
        spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
 
@@ -4374,7 +4374,7 @@ static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
 }
 
 static int rocker_port_attr_set(struct net_device *dev,
-                               struct switchdev_attr *attr,
+                               const struct switchdev_attr *attr,
                                struct switchdev_trans *trans)
 {
        struct rocker_port *rocker_port = netdev_priv(dev);
@@ -4382,8 +4382,7 @@ static int rocker_port_attr_set(struct net_device *dev,
 
        switch (attr->id) {
        case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
-               err = rocker_port_stp_update(rocker_port, trans,
-                                            ROCKER_OP_FLAG_NOWAIT,
+               err = rocker_port_stp_update(rocker_port, trans, 0,
                                             attr->u.stp_state);
                break;
        case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
@@ -4469,7 +4468,7 @@ static int rocker_port_obj_add(struct net_device *dev,
                fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
                err = rocker_port_fib_ipv4(rocker_port, trans,
                                           htonl(fib4->dst), fib4->dst_len,
-                                          fib4->fi, fib4->tb_id, 0);
+                                          &fib4->fi, fib4->tb_id, 0);
                break;
        case SWITCHDEV_OBJ_ID_PORT_FDB:
                err = rocker_port_fdb_add(rocker_port, trans,
@@ -4517,7 +4516,7 @@ static int rocker_port_fdb_del(struct rocker_port *rocker_port,
                               const struct switchdev_obj_port_fdb *fdb)
 {
        __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
-       int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
+       int flags = ROCKER_OP_FLAG_REMOVE;
 
        if (!rocker_port_is_bridged(rocker_port))
                return -EINVAL;
@@ -4541,7 +4540,7 @@ static int rocker_port_obj_del(struct net_device *dev,
                fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
                err = rocker_port_fib_ipv4(rocker_port, NULL,
                                           htonl(fib4->dst), fib4->dst_len,
-                                          fib4->fi, fib4->tb_id,
+                                          &fib4->fi, fib4->tb_id,
                                           ROCKER_OP_FLAG_REMOVE);
                break;
        case SWITCHDEV_OBJ_ID_PORT_FDB:
@@ -4571,7 +4570,7 @@ static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
        hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
                if (found->key.rocker_port != rocker_port)
                        continue;
-               fdb->addr = found->key.addr;
+               ether_addr_copy(fdb->addr, found->key.addr);
                fdb->ndm_state = NUD_REACHABLE;
                fdb->vid = rocker_port_vlan_to_vid(rocker_port,
                                                   found->key.vlan_id);
index 974637d3ae2526427e82997203fdd811215e3692..6e11ee6173ce566e97a61ad99e929cfc49895d84 100644 (file)
@@ -2062,7 +2062,7 @@ static void efx_init_napi_channel(struct efx_channel *channel)
        netif_napi_add(channel->napi_dev, &channel->napi_str,
                       efx_poll, napi_weight);
        napi_hash_add(&channel->napi_str);
-       efx_channel_init_lock(channel);
+       efx_channel_busy_poll_init(channel);
 }
 
 static void efx_init_napi(struct efx_nic *efx)
@@ -2125,7 +2125,7 @@ static int efx_busy_poll(struct napi_struct *napi)
        if (!netif_running(efx->net_dev))
                return LL_FLUSH_FAILED;
 
-       if (!efx_channel_lock_poll(channel))
+       if (!efx_channel_try_lock_poll(channel))
                return LL_FLUSH_BUSY;
 
        old_rx_packets = channel->rx_queue.rx_packets;
index ad56231743a648e1bacdd31778ed009729a98a20..229e68c896346b5c6610a2d7ab9d8dbdaa3f79e3 100644 (file)
@@ -431,21 +431,8 @@ struct efx_channel {
        struct net_device *napi_dev;
        struct napi_struct napi_str;
 #ifdef CONFIG_NET_RX_BUSY_POLL
-       unsigned int state;
-       spinlock_t state_lock;
-#define EFX_CHANNEL_STATE_IDLE         0
-#define EFX_CHANNEL_STATE_NAPI         (1 << 0)  /* NAPI owns this channel */
-#define EFX_CHANNEL_STATE_POLL         (1 << 1)  /* poll owns this channel */
-#define EFX_CHANNEL_STATE_DISABLED     (1 << 2)  /* channel is disabled */
-#define EFX_CHANNEL_STATE_NAPI_YIELD   (1 << 3)  /* NAPI yielded this channel */
-#define EFX_CHANNEL_STATE_POLL_YIELD   (1 << 4)  /* poll yielded this channel */
-#define EFX_CHANNEL_OWNED \
-       (EFX_CHANNEL_STATE_NAPI | EFX_CHANNEL_STATE_POLL)
-#define EFX_CHANNEL_LOCKED \
-       (EFX_CHANNEL_OWNED | EFX_CHANNEL_STATE_DISABLED)
-#define EFX_CHANNEL_USER_PEND \
-       (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_POLL_YIELD)
-#endif /* CONFIG_NET_RX_BUSY_POLL */
+       unsigned long busy_poll_state;
+#endif
        struct efx_special_buffer eventq;
        unsigned int eventq_mask;
        unsigned int eventq_read_ptr;
@@ -480,98 +467,94 @@ struct efx_channel {
 };
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void efx_channel_init_lock(struct efx_channel *channel)
+enum efx_channel_busy_poll_state {
+       EFX_CHANNEL_STATE_IDLE = 0,
+       EFX_CHANNEL_STATE_NAPI = BIT(0),
+       EFX_CHANNEL_STATE_NAPI_REQ_BIT = 1,
+       EFX_CHANNEL_STATE_NAPI_REQ = BIT(1),
+       EFX_CHANNEL_STATE_POLL_BIT = 2,
+       EFX_CHANNEL_STATE_POLL = BIT(2),
+       EFX_CHANNEL_STATE_DISABLE_BIT = 3,
+};
+
+static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
 {
-       spin_lock_init(&channel->state_lock);
+       WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
 }
 
 /* Called from the device poll routine to get ownership of a channel. */
 static inline bool efx_channel_lock_napi(struct efx_channel *channel)
 {
-       bool rc = true;
-
-       spin_lock_bh(&channel->state_lock);
-       if (channel->state & EFX_CHANNEL_LOCKED) {
-               WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);
-               channel->state |= EFX_CHANNEL_STATE_NAPI_YIELD;
-               rc = false;
-       } else {
-               /* we don't care if someone yielded */
-               channel->state = EFX_CHANNEL_STATE_NAPI;
+       unsigned long prev, old = READ_ONCE(channel->busy_poll_state);
+
+       while (1) {
+               switch (old) {
+               case EFX_CHANNEL_STATE_POLL:
+                       /* Ensure efx_channel_try_lock_poll() wont starve us */
+                       set_bit(EFX_CHANNEL_STATE_NAPI_REQ_BIT,
+                               &channel->busy_poll_state);
+                       /* fallthrough */
+               case EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_REQ:
+                       return false;
+               default:
+                       break;
+               }
+               prev = cmpxchg(&channel->busy_poll_state, old,
+                              EFX_CHANNEL_STATE_NAPI);
+               if (unlikely(prev != old)) {
+                       /* This is likely to mean we've just entered polling
+                        * state. Go back round to set the REQ bit.
+                        */
+                       old = prev;
+                       continue;
+               }
+               return true;
        }
-       spin_unlock_bh(&channel->state_lock);
-       return rc;
 }
 
 static inline void efx_channel_unlock_napi(struct efx_channel *channel)
 {
-       spin_lock_bh(&channel->state_lock);
-       WARN_ON(channel->state &
-               (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_YIELD));
-
-       channel->state &= EFX_CHANNEL_STATE_DISABLED;
-       spin_unlock_bh(&channel->state_lock);
+       /* Make sure write has completed from efx_channel_lock_napi() */
+       smp_wmb();
+       WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
 }
 
 /* Called from efx_busy_poll(). */
-static inline bool efx_channel_lock_poll(struct efx_channel *channel)
+static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
 {
-       bool rc = true;
-
-       spin_lock_bh(&channel->state_lock);
-       if ((channel->state & EFX_CHANNEL_LOCKED)) {
-               channel->state |= EFX_CHANNEL_STATE_POLL_YIELD;
-               rc = false;
-       } else {
-               /* preserve yield marks */
-               channel->state |= EFX_CHANNEL_STATE_POLL;
-       }
-       spin_unlock_bh(&channel->state_lock);
-       return rc;
+       return cmpxchg(&channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE,
+                       EFX_CHANNEL_STATE_POLL) == EFX_CHANNEL_STATE_IDLE;
 }
 
-/* Returns true if NAPI tried to get the channel while it was locked. */
 static inline void efx_channel_unlock_poll(struct efx_channel *channel)
 {
-       spin_lock_bh(&channel->state_lock);
-       WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);
-
-       /* will reset state to idle, unless channel is disabled */
-       channel->state &= EFX_CHANNEL_STATE_DISABLED;
-       spin_unlock_bh(&channel->state_lock);
+       clear_bit_unlock(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
 }
 
-/* True if a socket is polling, even if it did not get the lock. */
 static inline bool efx_channel_busy_polling(struct efx_channel *channel)
 {
-       WARN_ON(!(channel->state & EFX_CHANNEL_OWNED));
-       return channel->state & EFX_CHANNEL_USER_PEND;
+       return test_bit(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
 }
 
 static inline void efx_channel_enable(struct efx_channel *channel)
 {
-       spin_lock_bh(&channel->state_lock);
-       channel->state = EFX_CHANNEL_STATE_IDLE;
-       spin_unlock_bh(&channel->state_lock);
+       clear_bit_unlock(EFX_CHANNEL_STATE_DISABLE_BIT,
+                        &channel->busy_poll_state);
 }
 
-/* False if the channel is currently owned. */
+/* Stop further polling or napi access.
+ * Returns false if the channel is currently busy polling.
+ */
 static inline bool efx_channel_disable(struct efx_channel *channel)
 {
-       bool rc = true;
-
-       spin_lock_bh(&channel->state_lock);
-       if (channel->state & EFX_CHANNEL_OWNED)
-               rc = false;
-       channel->state |= EFX_CHANNEL_STATE_DISABLED;
-       spin_unlock_bh(&channel->state_lock);
-
-       return rc;
+       set_bit(EFX_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state);
+       /* Implicit barrier in efx_channel_busy_polling() */
+       return !efx_channel_busy_polling(channel);
 }
 
 #else /* CONFIG_NET_RX_BUSY_POLL */
 
-static inline void efx_channel_init_lock(struct efx_channel *channel)
+static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
 {
 }
 
@@ -584,7 +567,7 @@ static inline void efx_channel_unlock_napi(struct efx_channel *channel)
 {
 }
 
-static inline bool efx_channel_lock_poll(struct efx_channel *channel)
+static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
 {
        return false;
 }
index 6ce973187225aec2668888009bfbc543975e8492..062bce9acde6f555794376cc25e24981033e0868 100644 (file)
@@ -4529,9 +4529,6 @@ static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
        strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
        strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
        strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
-       info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
-               cp->casreg_len : CAS_MAX_REGS;
-       info->n_stats = CAS_NUM_STAT_KEYS;
 }
 
 static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
index a9cac8413e49e8062875d93af33b8ea4d41061b6..14c9d1baa85cebb9c531ae3cf74d87d8e1cb06fb 100644 (file)
@@ -2182,11 +2182,6 @@ bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
        strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, pci_name(priv->pdev),
                sizeof(drvinfo->bus_info));
-
-       drvinfo->n_stats = ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
-       drvinfo->testinfo_len = 0;
-       drvinfo->regdump_len = 0;
-       drvinfo->eedump_len = 0;
 }
 
 /*
index cba3d9fcb46535946c2c1b67ef660f0cca9d803f..77d26fe286c0916491b13b80ea8353d8a1af1ace 100644 (file)
@@ -899,7 +899,6 @@ static void cpmac_get_drvinfo(struct net_device *dev,
        strlcpy(info->driver, "cpmac", sizeof(info->driver));
        strlcpy(info->version, CPMAC_VERSION, sizeof(info->version));
        snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
-       info->regdump_len = 0;
 }
 
 static const struct ethtool_ops cpmac_ethtool_ops = {
index 75584cc36339fa7d30eb474866dcef1c5068f639..040fbc1e55080a4d025df2a2fae888da6151008c 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/gpio.h>
 #include <linux/of.h>
+#include <linux/of_mdio.h>
 #include <linux/of_net.h>
 #include <linux/of_device.h>
 #include <linux/if_vlan.h>
@@ -366,6 +367,7 @@ struct cpsw_priv {
        spinlock_t                      lock;
        struct platform_device          *pdev;
        struct net_device               *ndev;
+       struct device_node              *phy_node;
        struct napi_struct              napi_rx;
        struct napi_struct              napi_tx;
        struct device                   *dev;
@@ -1146,7 +1148,11 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
                cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
                                   1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
 
-       slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
+       if (priv->phy_node)
+               slave->phy = of_phy_connect(priv->ndev, priv->phy_node,
+                                &cpsw_adjust_link, 0, slave->data->phy_if);
+       else
+               slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
                                 &cpsw_adjust_link, slave->data->phy_if);
        if (IS_ERR(slave->phy)) {
                dev_err(priv->dev, "phy %s not found on slave %d\n",
@@ -1784,7 +1790,6 @@ static void cpsw_get_drvinfo(struct net_device *ndev,
        strlcpy(info->driver, "cpsw", sizeof(info->driver));
        strlcpy(info->version, "1.0", sizeof(info->version));
        strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info));
-       info->regdump_len = cpsw_get_regs_len(ndev);
 }
 
 static u32 cpsw_get_msglevel(struct net_device *ndev)
@@ -1935,11 +1940,12 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
        slave->port_vlan = data->dual_emac_res_vlan;
 }
 
-static int cpsw_probe_dt(struct cpsw_platform_data *data,
+static int cpsw_probe_dt(struct cpsw_priv *priv,
                         struct platform_device *pdev)
 {
        struct device_node *node = pdev->dev.of_node;
        struct device_node *slave_node;
+       struct cpsw_platform_data *data = &priv->data;
        int i = 0, ret;
        u32 prop;
 
@@ -2030,6 +2036,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                if (strcmp(slave_node->name, "slave"))
                        continue;
 
+               priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
                parp = of_get_property(slave_node, "phy_id", &lenp);
                if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
                        dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
@@ -2045,7 +2052,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                }
                snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
                         PHY_ID_FMT, mdio->name, phyid);
-
                slave_data->phy_if = of_get_phy_mode(slave_node);
                if (slave_data->phy_if < 0) {
                        dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
@@ -2246,7 +2252,7 @@ static int cpsw_probe(struct platform_device *pdev)
        /* Select default pin state */
        pinctrl_pm_select_default_state(&pdev->dev);
 
-       if (cpsw_probe_dt(&priv->data, pdev)) {
+       if (cpsw_probe_dt(priv, pdev)) {
                dev_err(&pdev->dev, "cpsw: platform data missing\n");
                ret = -ENODEV;
                goto clean_runtime_disable_ret;
@@ -2584,17 +2590,7 @@ static struct platform_driver cpsw_driver = {
        .remove = cpsw_remove,
 };
 
-static int __init cpsw_init(void)
-{
-       return platform_driver_register(&cpsw_driver);
-}
-late_initcall(cpsw_init);
-
-static void __exit cpsw_exit(void)
-{
-       platform_driver_unregister(&cpsw_driver);
-}
-module_exit(cpsw_exit);
+module_platform_driver(cpsw_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
index 691ec936e88d53601e3ed257278708ae823670c8..a274cd49afe954442bbd3bf647be71a92d0087ee 100644 (file)
@@ -791,7 +791,6 @@ static void tlan_get_drvinfo(struct net_device *dev,
                        sizeof(info->bus_info));
        else
                strlcpy(info->bus_info, "EISA", sizeof(info->bus_info));
-       info->eedump_len = TLAN_EEPROM_SIZE;
 }
 
 static int tlan_get_eeprom_len(struct net_device *dev)
index a83263743665411eacb0ca845e23f52db612866c..2b7550c43f7800fe36e54fafacabfcd921c408f4 100644 (file)
@@ -2134,10 +2134,11 @@ static int rhine_rx(struct net_device *dev, int limit)
                        }
 
                        skb_put(skb, pkt_len);
-                       skb->protocol = eth_type_trans(skb, dev);
 
                        rhine_rx_vlan_tag(skb, desc, data_size);
 
+                       skb->protocol = eth_type_trans(skb, dev);
+
                        netif_receive_skb(skb);
 
                        u64_stats_update_begin(&rp->rx_stats.syncp);
index d95f9aae95e78ecc08b8a91bb67c267d10f81461..4684644703ccee1d4ad2c68439d48ac618b5c3b0 100644 (file)
@@ -1135,7 +1135,6 @@ static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
 {
        strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
        strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
-       ed->regdump_len = sizeof(u32) * AXIENET_REGS_N;
 }
 
 /**
index 0119dd199276b620c46f20f7c239ca30299600a1..9c218e140c41ad7a546a6534e38796c34298895b 100644 (file)
@@ -105,8 +105,6 @@ static void fjes_get_drvinfo(struct net_device *netdev,
        strlcpy(drvinfo->fw_version, "none", sizeof(drvinfo->fw_version));
        snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
                 "platform:%s", plat_dev->name);
-       drvinfo->regdump_len = 0;
-       drvinfo->eedump_len = 0;
 }
 
 static int fjes_get_settings(struct net_device *netdev,
index 8f5c02eed47de09883b43b8b587717993064ef0b..445071c163cb3b45412af65e58353a7eae26842a 100644 (file)
@@ -594,14 +594,12 @@ static struct rtable *geneve_get_rt(struct sk_buff *skb,
        rt = ip_route_output_key(geneve->net, fl4);
        if (IS_ERR(rt)) {
                netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr);
-               dev->stats.tx_carrier_errors++;
-               return rt;
+               return ERR_PTR(-ENETUNREACH);
        }
        if (rt->dst.dev == dev) { /* is this necessary? */
                netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr);
-               dev->stats.collisions++;
                ip_rt_put(rt);
-               return ERR_PTR(-EINVAL);
+               return ERR_PTR(-ELOOP);
        }
        return rt;
 }
@@ -627,12 +625,12 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
        struct ip_tunnel_info *info = NULL;
        struct rtable *rt = NULL;
        const struct iphdr *iip; /* interior IP header */
+       int err = -EINVAL;
        struct flowi4 fl4;
        __u8 tos, ttl;
        __be16 sport;
        bool udp_csum;
        __be16 df;
-       int err;
 
        if (geneve->collect_md) {
                info = skb_tunnel_info(skb);
@@ -647,7 +645,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
        rt = geneve_get_rt(skb, dev, &fl4, info);
        if (IS_ERR(rt)) {
                netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
-               dev->stats.tx_carrier_errors++;
+               err = PTR_ERR(rt);
                goto tx_error;
        }
 
@@ -699,10 +697,37 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
 tx_error:
        dev_kfree_skb(skb);
 err:
-       dev->stats.tx_errors++;
+       if (err == -ELOOP)
+               dev->stats.collisions++;
+       else if (err == -ENETUNREACH)
+               dev->stats.tx_carrier_errors++;
+       else
+               dev->stats.tx_errors++;
        return NETDEV_TX_OK;
 }
 
+static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+       struct ip_tunnel_info *info = skb_tunnel_info(skb);
+       struct geneve_dev *geneve = netdev_priv(dev);
+       struct rtable *rt;
+       struct flowi4 fl4;
+
+       if (ip_tunnel_info_af(info) != AF_INET)
+               return -EINVAL;
+
+       rt = geneve_get_rt(skb, dev, &fl4, info);
+       if (IS_ERR(rt))
+               return PTR_ERR(rt);
+
+       ip_rt_put(rt);
+       info->key.u.ipv4.src = fl4.saddr;
+       info->key.tp_src = udp_flow_src_port(geneve->net, skb,
+                                            1, USHRT_MAX, true);
+       info->key.tp_dst = geneve->dst_port;
+       return 0;
+}
+
 static const struct net_device_ops geneve_netdev_ops = {
        .ndo_init               = geneve_init,
        .ndo_uninit             = geneve_uninit,
@@ -713,6 +738,7 @@ static const struct net_device_ops geneve_netdev_ops = {
        .ndo_change_mtu         = eth_change_mtu,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_fill_metadata_dst  = geneve_fill_metadata_dst,
 };
 
 static void geneve_get_drvinfo(struct net_device *dev,
@@ -870,14 +896,14 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
        __be16 dst_port = htons(GENEVE_UDP_PORT);
        __u8 ttl = 0, tos = 0;
        bool metadata = false;
-       __be32 rem_addr;
-       __u32 vni;
+       __be32 rem_addr = 0;
+       __u32 vni = 0;
 
-       if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE])
-               return -EINVAL;
+       if (data[IFLA_GENEVE_ID])
+               vni = nla_get_u32(data[IFLA_GENEVE_ID]);
 
-       vni = nla_get_u32(data[IFLA_GENEVE_ID]);
-       rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
+       if (data[IFLA_GENEVE_REMOTE])
+               rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
 
        if (data[IFLA_GENEVE_TTL])
                ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
index 24f8dbcf854f08a5c274e0894431b1ef45703784..d50887e3df6de3b41767ccbb3fb04a67c9d459af 100644 (file)
@@ -348,7 +348,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
        struct rtable *rt;
        int err, ret = NET_XMIT_DROP;
        struct flowi4 fl4 = {
-               .flowi4_oif = dev_get_iflink(dev),
+               .flowi4_oif = dev->ifindex,
                .flowi4_tos = RT_TOS(ip4h->tos),
                .flowi4_flags = FLOWI_FLAG_ANYSRC,
                .daddr = ip4h->daddr,
@@ -386,7 +386,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
        struct dst_entry *dst;
        int err, ret = NET_XMIT_DROP;
        struct flowi6 fl6 = {
-               .flowi6_iif = skb->dev->ifindex,
+               .flowi6_iif = dev->ifindex,
                .daddr = ip6h->daddr,
                .saddr = ip6h->saddr,
                .flowi6_flags = FLOWI_FLAG_ANYSRC,
index 248478c6f6e49522681a3eeb8a80fd8eaefd32fc..197c93937c2d577e56cf7fab8dcef07313bf75f4 100644 (file)
@@ -137,7 +137,7 @@ static const struct proto_ops macvtap_socket_ops;
 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
                      NETIF_F_TSO6 | NETIF_F_UFO)
 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
-#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
+#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
 
 static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
 {
index 9d097ae54fb2aaa7cab5040dcb1008920079f49f..60994a83a0d68ca2e4c229fe16140b6017a9dd55 100644 (file)
@@ -141,6 +141,11 @@ config MICREL_PHY
        ---help---
          Supports the KSZ9021, VSC8201, KS8001 PHYs.
 
+config DP83848_PHY
+       tristate "Driver for Texas Instruments DP83848 PHY"
+       ---help---
+         Supports the DP83848 PHY.
+
 config DP83867_PHY
        tristate "Drivers for Texas Instruments DP83867 Gigabit PHY"
        ---help---
@@ -187,8 +192,6 @@ config MDIO_OCTEON
          busses. It is required by the Octeon and ThunderX ethernet device
          drivers.
 
-         If in doubt, say Y.
-
 config MDIO_SUN4I
        tristate "Allwinner sun4i MDIO interface support"
        depends on ARCH_SUNXI
index 7655d47ad8d8198dd0f141d64e7e2e0f20f6f58f..f31a4e25cf151a58efd594daeb2ec09724c39279 100644 (file)
@@ -26,6 +26,7 @@ obj-$(CONFIG_MDIO_BITBANG)    += mdio-bitbang.o
 obj-$(CONFIG_MDIO_GPIO)                += mdio-gpio.o
 obj-$(CONFIG_NATIONAL_PHY)     += national.o
 obj-$(CONFIG_DP83640_PHY)      += dp83640.o
+obj-$(CONFIG_DP83848_PHY)      += dp83848.o
 obj-$(CONFIG_DP83867_PHY)      += dp83867.o
 obj-$(CONFIG_STE10XP)          += ste10Xp.o
 obj-$(CONFIG_MICREL_PHY)       += micrel.o
index d6111affbcb6cafd282aaae2c76a6f8fc7c704f3..f1936b7a7af69f1802568b76063e498eb5e54a2b 100644 (file)
@@ -171,20 +171,7 @@ static struct phy_driver aquantia_driver[] = {
 },
 };
 
-static int __init aquantia_init(void)
-{
-       return phy_drivers_register(aquantia_driver,
-                                   ARRAY_SIZE(aquantia_driver));
-}
-
-static void __exit aquantia_exit(void)
-{
-       return phy_drivers_unregister(aquantia_driver,
-                                     ARRAY_SIZE(aquantia_driver));
-}
-
-module_init(aquantia_init);
-module_exit(aquantia_exit);
+module_phy_driver(aquantia_driver);
 
 static struct mdio_device_id __maybe_unused aquantia_tbl[] = {
        { PHY_ID_AQ1202, 0xfffffff0 },
index dd79ea6ba02315973deb26337bbecbc8dd42e5b6..ddb377e53633acca027232c3ed8b90eb772d49ac 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/brcmphy.h>
 #include <linux/export.h>
 #include <linux/mdio.h>
+#include <linux/module.h>
 #include <linux/phy.h>
 
 #define MII_BCM_CHANNEL_WIDTH     0x2000
@@ -206,3 +207,7 @@ int bcm_phy_enable_eee(struct phy_device *phydev)
        return 0;
 }
 EXPORT_SYMBOL_GPL(bcm_phy_enable_eee);
+
+MODULE_DESCRIPTION("Broadcom PHY Library");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
new file mode 100644 (file)
index 0000000..5ce9bef
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+ * Driver for the Texas Instruments DP83848 PHY
+ *
+ * Copyright (C) 2015 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#define DP83848_PHY_ID                 0x20005c90
+
+/* Registers */
+#define DP83848_MICR                   0x11
+#define DP83848_MISR                   0x12
+
+/* MICR Register Fields */
+#define DP83848_MICR_INT_OE            BIT(0) /* Interrupt Output Enable */
+#define DP83848_MICR_INTEN             BIT(1) /* Interrupt Enable */
+
+/* MISR Register Fields */
+#define DP83848_MISR_RHF_INT_EN                BIT(0) /* Receive Error Counter */
+#define DP83848_MISR_FHF_INT_EN                BIT(1) /* False Carrier Counter */
+#define DP83848_MISR_ANC_INT_EN                BIT(2) /* Auto-negotiation complete */
+#define DP83848_MISR_DUP_INT_EN                BIT(3) /* Duplex Status */
+#define DP83848_MISR_SPD_INT_EN                BIT(4) /* Speed status */
+#define DP83848_MISR_LINK_INT_EN       BIT(5) /* Link status */
+#define DP83848_MISR_ED_INT_EN         BIT(6) /* Energy detect */
+#define DP83848_MISR_LQM_INT_EN                BIT(7) /* Link Quality Monitor */
+
+static int dp83848_ack_interrupt(struct phy_device *phydev)
+{
+       int err = phy_read(phydev, DP83848_MISR);
+
+       return err < 0 ? err : 0;
+}
+
+static int dp83848_config_intr(struct phy_device *phydev)
+{
+       int err;
+
+       if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+               err = phy_write(phydev, DP83848_MICR,
+                               DP83848_MICR_INT_OE |
+                               DP83848_MICR_INTEN);
+               if (err < 0)
+                       return err;
+
+               return phy_write(phydev, DP83848_MISR,
+                                DP83848_MISR_ANC_INT_EN |
+                                DP83848_MISR_DUP_INT_EN |
+                                DP83848_MISR_SPD_INT_EN |
+                                DP83848_MISR_LINK_INT_EN);
+       }
+
+       return phy_write(phydev, DP83848_MICR, 0x0);
+}
+
+static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
+       { DP83848_PHY_ID, 0xfffffff0 },
+       { }
+};
+MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
+
+static struct phy_driver dp83848_driver[] = {
+       {
+               .phy_id         = DP83848_PHY_ID,
+               .phy_id_mask    = 0xfffffff0,
+               .name           = "TI DP83848",
+               .features       = PHY_BASIC_FEATURES,
+               .flags          = PHY_HAS_INTERRUPT,
+
+               .soft_reset     = genphy_soft_reset,
+               .config_init    = genphy_config_init,
+               .suspend        = genphy_suspend,
+               .resume         = genphy_resume,
+               .config_aneg    = genphy_config_aneg,
+               .read_status    = genphy_read_status,
+
+               /* IRQ related */
+               .ack_interrupt  = dp83848_ack_interrupt,
+               .config_intr    = dp83848_config_intr,
+
+               .driver         = { .owner = THIS_MODULE, },
+       },
+};
+module_phy_driver(dp83848_driver);
+
+MODULE_DESCRIPTION("Texas Instruments DP83848 PHY driver");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com");
+MODULE_LICENSE("GPL");
index 3bc9f03349f3a47d1c724cb50bd05e345f025f3e..95f51d7267b3da85d74506ab4ac867ea25eca570 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
-#include <linux/mdio-gpio.h>
+#include <linux/platform_data/mdio-gpio.h>
 
 #include <linux/of_gpio.h>
 #include <linux/of_mdio.h>
index 12f44c53cc8ebca7cba92119c26b01b249e31846..88cb4592b6fbbc6cc1ed6812b0fad2d17885a17e 100644 (file)
@@ -371,6 +371,33 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
 }
 EXPORT_SYMBOL(mdiobus_scan);
 
+/**
+ * mdiobus_read_nested - Nested version of the mdiobus_read function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to read
+ *
+ * In case of nested MDIO bus access avoid lockdep false positives by
+ * using mutex_lock_nested().
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum)
+{
+       int retval;
+
+       BUG_ON(in_interrupt());
+
+       mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+       retval = bus->read(bus, addr, regnum);
+       mutex_unlock(&bus->mdio_lock);
+
+       return retval;
+}
+EXPORT_SYMBOL(mdiobus_read_nested);
+
 /**
  * mdiobus_read - Convenience function for reading a given MII mgmt register
  * @bus: the mii_bus struct
@@ -395,6 +422,34 @@ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
 }
 EXPORT_SYMBOL(mdiobus_read);
 
+/**
+ * mdiobus_write_nested - Nested version of the mdiobus_write function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * In case of nested MDIO bus access avoid lockdep false positives by
+ * using mutex_lock_nested().
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val)
+{
+       int err;
+
+       BUG_ON(in_interrupt());
+
+       mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+       err = bus->write(bus, addr, regnum, val);
+       mutex_unlock(&bus->mdio_lock);
+
+       return err;
+}
+EXPORT_SYMBOL(mdiobus_write_nested);
+
 /**
  * mdiobus_write - Convenience function for writing a given MII mgmt register
  * @bus: the mii_bus struct
index 499185eaf413ba08b1447fbebf1f60080c643329..cf6312fafea545fbc3efb96e8ff6c63b35c7420e 100644 (file)
@@ -514,6 +514,27 @@ static int ksz8873mll_read_status(struct phy_device *phydev)
        return 0;
 }
 
+static int ksz9031_read_status(struct phy_device *phydev)
+{
+       int err;
+       int regval;
+
+       err = genphy_read_status(phydev);
+       if (err)
+               return err;
+
+       /* Make sure the PHY is not broken. Read idle error count,
+        * and reset the PHY if it is maxed out.
+        */
+       regval = phy_read(phydev, MII_STAT1000);
+       if ((regval & 0xFF) == 0xFF) {
+               phy_init_hw(phydev);
+               phydev->link = 0;
+       }
+
+       return 0;
+}
+
 static int ksz8873mll_config_aneg(struct phy_device *phydev)
 {
        return 0;
@@ -772,7 +793,7 @@ static struct phy_driver ksphy_driver[] = {
        .driver_data    = &ksz9021_type,
        .config_init    = ksz9031_config_init,
        .config_aneg    = genphy_config_aneg,
-       .read_status    = genphy_read_status,
+       .read_status    = ksz9031_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
        .suspend        = genphy_suspend,
index 70b08958763a129fff47ad00a1db130c1334f254..dc2da87709185870f808ef626e8446893c658db6 100644 (file)
@@ -43,16 +43,25 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
 
 static int smsc_phy_config_init(struct phy_device *phydev)
 {
+       int __maybe_unused len;
+       struct device *dev __maybe_unused = &phydev->dev;
+       struct device_node *of_node __maybe_unused = dev->of_node;
        int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+       int enable_energy = 1;
 
        if (rc < 0)
                return rc;
 
-       /* Enable energy detect mode for this SMSC Transceivers */
-       rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
-                      rc | MII_LAN83C185_EDPWRDOWN);
-       if (rc < 0)
-               return rc;
+       if (of_find_property(of_node, "smsc,disable-energy-detect", &len))
+               enable_energy = 0;
+
+       if (enable_energy) {
+               /* Enable energy detect mode for this SMSC Transceivers */
+               rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+                              rc | MII_LAN83C185_EDPWRDOWN);
+               if (rc < 0)
+                       return rc;
+       }
 
        return smsc_phy_ack_interrupt(phydev);
 }
index 91e1bec6079fafc20665a3d26949250da2817a55..07463fcca2126fd8838bf0fefee228dd11fd31d6 100644 (file)
@@ -112,20 +112,7 @@ static struct phy_driver teranetics_driver[] = {
 },
 };
 
-static int __init teranetics_init(void)
-{
-       return phy_drivers_register(teranetics_driver,
-                                   ARRAY_SIZE(teranetics_driver));
-}
-
-static void __exit teranetics_exit(void)
-{
-       return phy_drivers_unregister(teranetics_driver,
-                                     ARRAY_SIZE(teranetics_driver));
-}
-
-module_init(teranetics_init);
-module_exit(teranetics_exit);
+module_phy_driver(teranetics_driver);
 
 static struct mdio_device_id __maybe_unused teranetics_tbl[] = {
        { PHY_ID_TN2020, 0xffffffff },
index 3837ae344f63b9d69a5dd958d0e7b7dc202ff316..5e0b43283bce2c4f5251e5c5db982ca679526d07 100644 (file)
@@ -313,7 +313,6 @@ static void pppoe_flush_dev(struct net_device *dev)
                        if (po->pppoe_dev == dev &&
                            sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
                                pppox_unbind_sock(sk);
-                               sk->sk_state = PPPOX_ZOMBIE;
                                sk->sk_state_change(sk);
                                po->pppoe_dev = NULL;
                                dev_put(dev);
@@ -590,7 +589,7 @@ static int pppoe_release(struct socket *sock)
 
        po = pppox_sk(sk);
 
-       if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
+       if (po->pppoe_dev) {
                dev_put(po->pppoe_dev);
                po->pppoe_dev = NULL;
        }
index 3a8a36c8ded16b4e237c1687143fa4ff2cbff57b..7f83504dfa69bba2c8db612d1d25196fa5b72d91 100644 (file)
@@ -166,6 +166,7 @@ config USB_NET_AX8817X
            * Aten UC210T
            * ASIX AX88172
            * Billionton Systems, USB2AR
+           * Billionton Systems, GUSB2AM-1G-B
            * Buffalo LUA-U2-KTX
            * Corega FEther USB2-TX
            * D-Link DUB-E100
index a186b0a12d5025159b3d1c87df47055db0047ac3..bd9acff1eb7bf277a434345659517683cc0fba45 100644 (file)
@@ -588,7 +588,6 @@ void asix_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
        usbnet_get_drvinfo(net, info);
        strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
        strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
-       info->eedump_len = AX_EEPROM_LEN;
 }
 
 int asix_set_mac_address(struct net_device *net, void *p)
index 1173a24feda38c3af236c84acaf8982f39c0e0b1..5cabefc2349438f26cddde0022d52acf6e967116 100644 (file)
@@ -958,6 +958,10 @@ static const struct usb_device_id  products [] = {
        // Billionton Systems, USB2AR
        USB_DEVICE (0x08dd, 0x90ff),
        .driver_info =  (unsigned long) &ax8817x_info,
+}, {
+       // Billionton Systems, GUSB2AM-1G-B
+       USB_DEVICE(0x08dd, 0x0114),
+       .driver_info =  (unsigned long) &ax88178_info,
 }, {
        // ATEN UC210T
        USB_DEVICE (0x0557, 0x2009),
index 6e9c344c7a201bc18fcfe1f3f1e67d01a4250d01..0b4bdd39106b0a73e954070a42ac87be22ac1821 100644 (file)
@@ -258,7 +258,6 @@ static void dm9601_get_drvinfo(struct net_device *net,
 {
        /* Inherit standard device info */
        usbnet_get_drvinfo(net, info);
-       info->eedump_len = DM_EEPROM_LEN;
 }
 
 static u32 dm9601_get_link(struct net_device *net)
index 82d844a8ebd093e79c26ada9fc728fec8b10576e..4f345bd4e6e29558daf29c3d472d2c0768c3202f 100644 (file)
@@ -445,7 +445,6 @@ static int mcs7830_get_regs_len(struct net_device *net)
 static void mcs7830_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *drvinfo)
 {
        usbnet_get_drvinfo(net, drvinfo);
-       drvinfo->regdump_len = mcs7830_get_regs_len(net);
 }
 
 static void mcs7830_get_regs(struct net_device *net, struct ethtool_regs *regs, void *data)
index 4752e69de00e1f0ab366f35638564de9c14c8fa9..75ae756e93cf6fdfce1216b6f09cdf6217a39b17 100644 (file)
@@ -711,6 +711,10 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x9056, 8)},    /* Sierra Wireless Modem */
        {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
        {QMI_FIXED_INTF(0x1199, 0x9061, 8)},    /* Sierra Wireless Modem */
+       {QMI_FIXED_INTF(0x1199, 0x9070, 8)},    /* Sierra Wireless MC74xx/EM74xx */
+       {QMI_FIXED_INTF(0x1199, 0x9070, 10)},   /* Sierra Wireless MC74xx/EM74xx */
+       {QMI_FIXED_INTF(0x1199, 0x9071, 8)},    /* Sierra Wireless MC74xx/EM74xx */
+       {QMI_FIXED_INTF(0x1199, 0x9071, 10)},   /* Sierra Wireless MC74xx/EM74xx */
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
        {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},    /* Alcatel L800MA */
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
index 953de13267df19d6fcefe60c2008f6222ca5775c..a50df0d8fb9abbd548ad6646e4a066a1211363c5 100644 (file)
@@ -470,14 +470,10 @@ static int sr_get_eeprom(struct net_device *net,
 static void sr_get_drvinfo(struct net_device *net,
                                 struct ethtool_drvinfo *info)
 {
-       struct usbnet *dev = netdev_priv(net);
-       struct sr_data *data = (struct sr_data *)&dev->data;
-
        /* Inherit standard device info */
        usbnet_get_drvinfo(net, info);
        strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
        strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
-       info->eedump_len = data->eeprom_len;
 }
 
 static u32 sr_get_link(struct net_device *net)
index a681569ae0b5be395d8589acf818cbf37bde5479..9ba11d7377539e7fc2e0e0337f4234f5a0801946 100644 (file)
@@ -214,10 +214,6 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
 
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
-       drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS);
-       drvinfo->testinfo_len = 0;
-       drvinfo->eedump_len   = 0;
-       drvinfo->regdump_len  = vmxnet3_get_regs_len(netdev);
 }
 
 
index 191579aeab1695957e272d54adf630eb57f7ae14..92fa3e1ea65cca564907a43a78a859b6063f7f65 100644 (file)
@@ -30,6 +30,7 @@
 #include <net/arp.h>
 #include <net/ip.h>
 #include <net/ip_fib.h>
+#include <net/ip6_fib.h>
 #include <net/ip6_route.h>
 #include <net/rtnetlink.h>
 #include <net/route.h>
@@ -57,6 +58,7 @@ struct slave_queue {
 struct net_vrf {
        struct slave_queue      queue;
        struct rtable           *rth;
+       struct rt6_info         *rt6;
        u32                     tb_id;
 };
 
@@ -104,12 +106,56 @@ static struct dst_ops vrf_dst_ops = {
        .default_advmss = vrf_default_advmss,
 };
 
+/* neighbor handling is done with actual device; do not want
+ * to flip skb->dev for those ndisc packets. This really fails
+ * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
+ * a start.
+ */
+#if IS_ENABLED(CONFIG_IPV6)
+static bool check_ipv6_frame(const struct sk_buff *skb)
+{
+       const struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb->data;
+       size_t hlen = sizeof(*ipv6h);
+       bool rc = true;
+
+       if (skb->len < hlen)
+               goto out;
+
+       if (ipv6h->nexthdr == NEXTHDR_ICMP) {
+               const struct icmp6hdr *icmph;
+
+               if (skb->len < hlen + sizeof(*icmph))
+                       goto out;
+
+               icmph = (struct icmp6hdr *)(skb->data + sizeof(*ipv6h));
+               switch (icmph->icmp6_type) {
+               case NDISC_ROUTER_SOLICITATION:
+               case NDISC_ROUTER_ADVERTISEMENT:
+               case NDISC_NEIGHBOUR_SOLICITATION:
+               case NDISC_NEIGHBOUR_ADVERTISEMENT:
+               case NDISC_REDIRECT:
+                       rc = false;
+                       break;
+               }
+       }
+
+out:
+       return rc;
+}
+#else
+static bool check_ipv6_frame(const struct sk_buff *skb)
+{
+       return false;
+}
+#endif
+
 static bool is_ip_rx_frame(struct sk_buff *skb)
 {
        switch (skb->protocol) {
        case htons(ETH_P_IP):
-       case htons(ETH_P_IPV6):
                return true;
+       case htons(ETH_P_IPV6):
+               return check_ipv6_frame(skb);
        }
        return false;
 }
@@ -169,12 +215,53 @@ static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
        return stats;
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
+                                          struct net_device *dev)
+{
+       const struct ipv6hdr *iph = ipv6_hdr(skb);
+       struct net *net = dev_net(skb->dev);
+       struct flowi6 fl6 = {
+               /* needed to match OIF rule */
+               .flowi6_oif = dev->ifindex,
+               .flowi6_iif = LOOPBACK_IFINDEX,
+               .daddr = iph->daddr,
+               .saddr = iph->saddr,
+               .flowlabel = ip6_flowinfo(iph),
+               .flowi6_mark = skb->mark,
+               .flowi6_proto = iph->nexthdr,
+               .flowi6_flags = FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF,
+       };
+       int ret = NET_XMIT_DROP;
+       struct dst_entry *dst;
+       struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
+
+       dst = ip6_route_output(net, NULL, &fl6);
+       if (dst == dst_null)
+               goto err;
+
+       skb_dst_drop(skb);
+       skb_dst_set(skb, dst);
+
+       ret = ip6_local_out(net, skb->sk, skb);
+       if (unlikely(net_xmit_eval(ret)))
+               dev->stats.tx_errors++;
+       else
+               ret = NET_XMIT_SUCCESS;
+
+       return ret;
+err:
+       vrf_tx_error(dev, skb);
+       return NET_XMIT_DROP;
+}
+#else
 static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
                                           struct net_device *dev)
 {
        vrf_tx_error(dev, skb);
        return NET_XMIT_DROP;
 }
+#endif
 
 static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4,
                            struct net_device *vrf_dev)
@@ -269,6 +356,157 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
        return ret;
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie)
+{
+       return dst;
+}
+
+static struct dst_ops vrf_dst_ops6 = {
+       .family         = AF_INET6,
+       .local_out      = ip6_local_out,
+       .check          = vrf_ip6_check,
+       .mtu            = vrf_v4_mtu,
+       .destroy        = vrf_dst_destroy,
+       .default_advmss = vrf_default_advmss,
+};
+
+static int init_dst_ops6_kmem_cachep(void)
+{
+       vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache",
+                                                    sizeof(struct rt6_info),
+                                                    0,
+                                                    SLAB_HWCACHE_ALIGN,
+                                                    NULL);
+
+       if (!vrf_dst_ops6.kmem_cachep)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void free_dst_ops6_kmem_cachep(void)
+{
+       kmem_cache_destroy(vrf_dst_ops6.kmem_cachep);
+}
+
+static int vrf_input6(struct sk_buff *skb)
+{
+       skb->dev->stats.rx_errors++;
+       kfree_skb(skb);
+       return 0;
+}
+
+/* modelled after ip6_finish_output2 */
+static int vrf_finish_output6(struct net *net, struct sock *sk,
+                             struct sk_buff *skb)
+{
+       struct dst_entry *dst = skb_dst(skb);
+       struct net_device *dev = dst->dev;
+       struct neighbour *neigh;
+       struct in6_addr *nexthop;
+       int ret;
+
+       skb->protocol = htons(ETH_P_IPV6);
+       skb->dev = dev;
+
+       rcu_read_lock_bh();
+       nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
+       neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
+       if (unlikely(!neigh))
+               neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
+       if (!IS_ERR(neigh)) {
+               ret = dst_neigh_output(dst, neigh, skb);
+               rcu_read_unlock_bh();
+               return ret;
+       }
+       rcu_read_unlock_bh();
+
+       IP6_INC_STATS(dev_net(dst->dev),
+                     ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
+       kfree_skb(skb);
+       return -EINVAL;
+}
+
+/* modelled after ip6_output */
+static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+       return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
+                           net, sk, skb, NULL, skb_dst(skb)->dev,
+                           vrf_finish_output6,
+                           !(IP6CB(skb)->flags & IP6SKB_REROUTED));
+}
+
+static void vrf_rt6_destroy(struct net_vrf *vrf)
+{
+       dst_destroy(&vrf->rt6->dst);
+       free_percpu(vrf->rt6->rt6i_pcpu);
+       vrf->rt6 = NULL;
+}
+
+static int vrf_rt6_create(struct net_device *dev)
+{
+       struct net_vrf *vrf = netdev_priv(dev);
+       struct dst_entry *dst;
+       struct rt6_info *rt6;
+       int cpu;
+       int rc = -ENOMEM;
+
+       rt6 = dst_alloc(&vrf_dst_ops6, dev, 0,
+                       DST_OBSOLETE_NONE,
+                       (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+       if (!rt6)
+               goto out;
+
+       dst = &rt6->dst;
+
+       rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL);
+       if (!rt6->rt6i_pcpu) {
+               dst_destroy(dst);
+               goto out;
+       }
+       for_each_possible_cpu(cpu) {
+               struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu);
+               *p =  NULL;
+       }
+
+       memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst));
+
+       INIT_LIST_HEAD(&rt6->rt6i_siblings);
+       INIT_LIST_HEAD(&rt6->rt6i_uncached);
+
+       rt6->dst.input  = vrf_input6;
+       rt6->dst.output = vrf_output6;
+
+       rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id);
+
+       atomic_set(&rt6->dst.__refcnt, 2);
+
+       vrf->rt6 = rt6;
+       rc = 0;
+out:
+       return rc;
+}
+#else
+static int init_dst_ops6_kmem_cachep(void)
+{
+       return 0;
+}
+
+static void free_dst_ops6_kmem_cachep(void)
+{
+}
+
+static void vrf_rt6_destroy(struct net_vrf *vrf)
+{
+}
+
+static int vrf_rt6_create(struct net_device *dev)
+{
+       return 0;
+}
+#endif
+
 /* modelled after ip_finish_output2 */
 static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
@@ -490,6 +728,7 @@ static void vrf_dev_uninit(struct net_device *dev)
        struct slave *slave, *next;
 
        vrf_rtable_destroy(vrf);
+       vrf_rt6_destroy(vrf);
 
        list_for_each_entry_safe(slave, next, head, list)
                vrf_del_slave(dev, slave->dev);
@@ -513,10 +752,15 @@ static int vrf_dev_init(struct net_device *dev)
        if (!vrf->rth)
                goto out_stats;
 
+       if (vrf_rt6_create(dev) != 0)
+               goto out_rth;
+
        dev->flags = IFF_MASTER | IFF_NOARP;
 
        return 0;
 
+out_rth:
+       vrf_rtable_destroy(vrf);
 out_stats:
        free_percpu(dev->dstats);
        dev->dstats = NULL;
@@ -586,10 +830,30 @@ static void vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
        fl4->flowi4_scope = scope;
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
+                                        const struct flowi6 *fl6)
+{
+       struct rt6_info *rt = NULL;
+
+       if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) {
+               struct net_vrf *vrf = netdev_priv(dev);
+
+               rt = vrf->rt6;
+               atomic_inc(&rt->dst.__refcnt);
+       }
+
+       return (struct dst_entry *)rt;
+}
+#endif
+
 static const struct l3mdev_ops vrf_l3mdev_ops = {
        .l3mdev_fib_table       = vrf_fib_table,
        .l3mdev_get_rtable      = vrf_get_rtable,
        .l3mdev_get_saddr       = vrf_get_saddr,
+#if IS_ENABLED(CONFIG_IPV6)
+       .l3mdev_get_rt6_dst     = vrf_get_rt6_dst,
+#endif
 };
 
 static void vrf_get_drvinfo(struct net_device *dev,
@@ -731,6 +995,10 @@ static int __init vrf_init_module(void)
        if (!vrf_dst_ops.kmem_cachep)
                return -ENOMEM;
 
+       rc = init_dst_ops6_kmem_cachep();
+       if (rc != 0)
+               goto error2;
+
        register_netdevice_notifier(&vrf_notifier_block);
 
        rc = rtnl_link_register(&vrf_link_ops);
@@ -741,6 +1009,8 @@ static int __init vrf_init_module(void)
 
 error:
        unregister_netdevice_notifier(&vrf_notifier_block);
+       free_dst_ops6_kmem_cachep();
+error2:
        kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
        return rc;
 }
@@ -750,6 +1020,7 @@ static void __exit vrf_cleanup_module(void)
        rtnl_link_unregister(&vrf_link_ops);
        unregister_netdevice_notifier(&vrf_notifier_block);
        kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
+       free_dst_ops6_kmem_cachep();
 }
 
 module_init(vrf_init_module);
index ce704df7681bda7d364b4244e3b0dab32a98da21..6369a5734d4c3e899e96ec74469b0af4b3bca865 100644 (file)
@@ -2360,6 +2360,46 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
+static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
+                               struct ip_tunnel_info *info,
+                               __be16 sport, __be16 dport)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct rtable *rt;
+       struct flowi4 fl4;
+
+       memset(&fl4, 0, sizeof(fl4));
+       fl4.flowi4_tos = RT_TOS(info->key.tos);
+       fl4.flowi4_mark = skb->mark;
+       fl4.flowi4_proto = IPPROTO_UDP;
+       fl4.daddr = info->key.u.ipv4.dst;
+
+       rt = ip_route_output_key(vxlan->net, &fl4);
+       if (IS_ERR(rt))
+               return PTR_ERR(rt);
+       ip_rt_put(rt);
+
+       info->key.u.ipv4.src = fl4.saddr;
+       info->key.tp_src = sport;
+       info->key.tp_dst = dport;
+       return 0;
+}
+
+static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct ip_tunnel_info *info = skb_tunnel_info(skb);
+       __be16 sport, dport;
+
+       sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
+                                 vxlan->cfg.port_max, true);
+       dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
+
+       if (ip_tunnel_info_af(info) == AF_INET)
+               return egress_ipv4_tun_info(dev, skb, info, sport, dport);
+       return -EINVAL;
+}
+
 static const struct net_device_ops vxlan_netdev_ops = {
        .ndo_init               = vxlan_init,
        .ndo_uninit             = vxlan_uninit,
@@ -2374,6 +2414,7 @@ static const struct net_device_ops vxlan_netdev_ops = {
        .ndo_fdb_add            = vxlan_fdb_add,
        .ndo_fdb_del            = vxlan_fdb_delete,
        .ndo_fdb_dump           = vxlan_fdb_dump,
+       .ndo_fill_metadata_dst  = vxlan_fill_metadata_dst,
 };
 
 /* Info for udev, that this is a virtual tunnel endpoint */
@@ -2794,11 +2835,10 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
        struct vxlan_config conf;
        int err;
 
-       if (!data[IFLA_VXLAN_ID])
-               return -EINVAL;
-
        memset(&conf, 0, sizeof(conf));
-       conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]);
+
+       if (data[IFLA_VXLAN_ID])
+               conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]);
 
        if (data[IFLA_VXLAN_GROUP]) {
                conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
index a63ab2e831054b372b551788e9106ebf29bc925c..f9f94229bf1ba5902fbea46b6948540b0cc10c03 100644 (file)
@@ -214,8 +214,6 @@ config USB_NET_RNDIS_WLAN
 
          If you choose to build a module, it'll be called rndis_wlan.
 
-source "drivers/net/wireless/rtl818x/Kconfig"
-
 config ADM8211
        tristate "ADMtek ADM8211 support"
        depends on MAC80211 && PCI
@@ -243,6 +241,8 @@ config ADM8211
 
          Thanks to Infineon-ADMtek for their support of this driver.
 
+source "drivers/net/wireless/realtek/rtl818x/Kconfig"
+
 config MAC80211_HWSIM
        tristate "Simulated radio testing tool for mac80211"
        depends on MAC80211
@@ -278,7 +278,8 @@ source "drivers/net/wireless/orinoco/Kconfig"
 source "drivers/net/wireless/p54/Kconfig"
 source "drivers/net/wireless/rt2x00/Kconfig"
 source "drivers/net/wireless/mediatek/Kconfig"
-source "drivers/net/wireless/rtlwifi/Kconfig"
+source "drivers/net/wireless/realtek/rtlwifi/Kconfig"
+source "drivers/net/wireless/realtek/rtl8xxxu/Kconfig"
 source "drivers/net/wireless/ti/Kconfig"
 source "drivers/net/wireless/zd1211rw/Kconfig"
 source "drivers/net/wireless/mwifiex/Kconfig"
index 6b9e729dd8acbf0af1a545109b0b304d400e93d4..740fdd353c5ddc310a1b9c0e9f84fd69601a5831 100644 (file)
@@ -22,9 +22,7 @@ obj-$(CONFIG_HOSTAP)          += hostap/
 obj-$(CONFIG_B43)              += b43/
 obj-$(CONFIG_B43LEGACY)                += b43legacy/
 obj-$(CONFIG_ZD1211RW)         += zd1211rw/
-obj-$(CONFIG_RTL8180)          += rtl818x/
-obj-$(CONFIG_RTL8187)          += rtl818x/
-obj-$(CONFIG_RTLWIFI)          += rtlwifi/
+obj-$(CONFIG_WLAN)             += realtek/
 
 # 16-bit wireless PCMCIA client drivers
 obj-$(CONFIG_PCMCIA_RAYCS)     += ray_cs.o
index df7c7616533b08636374736911ca3f624983a47e..7d3231acfb24939bcee74cf9b6a38a3a26a82a67 100644 (file)
@@ -82,6 +82,16 @@ enum bmi_cmd_id {
 
 #define BMI_NVRAM_SEG_NAME_SZ 16
 
+#define BMI_PARAM_GET_EEPROM_BOARD_ID 0x10
+
+#define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK   0x7c00
+#define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB    10
+
+#define ATH10K_BMI_CHIP_ID_FROM_OTP_MASK    0x18000
+#define ATH10K_BMI_CHIP_ID_FROM_OTP_LSB     15
+
+#define ATH10K_BMI_BOARD_ID_STATUS_MASK 0xff
+
 struct bmi_cmd {
        __le32 id; /* enum bmi_cmd_id */
        union {
index cf28fbebaedcfc9b372d509c6a88fcbc2d808773..84220c376308809ed2e5800b8403844d024ba30c 100644 (file)
@@ -413,7 +413,7 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
        lockdep_assert_held(&ar_pci->ce_lock);
 
        if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
-               return -EIO;
+               return -ENOSPC;
 
        desc->addr = __cpu_to_le32(paddr);
        desc->nbytes = 0;
@@ -1076,9 +1076,7 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
 }
 
 int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
-                        const struct ce_attr *attr,
-                        void (*send_cb)(struct ath10k_ce_pipe *),
-                        void (*recv_cb)(struct ath10k_ce_pipe *))
+                        const struct ce_attr *attr)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
@@ -1104,10 +1102,10 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
        ce_state->src_sz_max = attr->src_sz_max;
 
        if (attr->src_nentries)
-               ce_state->send_cb = send_cb;
+               ce_state->send_cb = attr->send_cb;
 
        if (attr->dest_nentries)
-               ce_state->recv_cb = recv_cb;
+               ce_state->recv_cb = attr->recv_cb;
 
        if (attr->src_nentries) {
                ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
index 5c903e15dd65e6ad7da14caffb1e3a4fd8fe4bfc..dbb94fdb274b0bc591098cd0a555285b102edb05 100644 (file)
@@ -209,9 +209,7 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
                        const struct ce_attr *attr);
 void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
 int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
-                        const struct ce_attr *attr,
-                        void (*send_cb)(struct ath10k_ce_pipe *),
-                        void (*recv_cb)(struct ath10k_ce_pipe *));
+                        const struct ce_attr *attr);
 void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
 
 /*==================CE Engine Shutdown=======================*/
@@ -277,6 +275,9 @@ struct ce_attr {
 
        /* #entries in destination ring - Must be a power of 2 */
        unsigned int dest_nentries;
+
+       void (*send_cb)(struct ath10k_ce_pipe *);
+       void (*recv_cb)(struct ath10k_ce_pipe *);
 };
 
 #define SR_BA_ADDRESS          0x0000
index 879625adc63a8eeb46c671cdcd8e77daf2cc1e79..13de3617d5abc3e76a5f7dc99eb96f9a500d9b42 100644 (file)
@@ -448,6 +448,56 @@ out:
        return ret;
 }
 
+static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
+{
+       u32 result, address;
+       u8 board_id, chip_id;
+       int ret;
+
+       address = ar->hw_params.patch_load_addr;
+
+       if (!ar->otp_data || !ar->otp_len) {
+               ath10k_warn(ar,
+                           "failed to retrieve board id because of invalid otp\n");
+               return -ENODATA;
+       }
+
+       ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                  "boot upload otp to 0x%x len %zd for board id\n",
+                  address, ar->otp_len);
+
+       ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
+       if (ret) {
+               ath10k_err(ar, "could not write otp for board id check: %d\n",
+                          ret);
+               return ret;
+       }
+
+       ret = ath10k_bmi_execute(ar, address, BMI_PARAM_GET_EEPROM_BOARD_ID,
+                                &result);
+       if (ret) {
+               ath10k_err(ar, "could not execute otp for board id check: %d\n",
+                          ret);
+               return ret;
+       }
+
+       board_id = MS(result, ATH10K_BMI_BOARD_ID_FROM_OTP);
+       chip_id = MS(result, ATH10K_BMI_CHIP_ID_FROM_OTP);
+
+       ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                  "boot get otp board id result 0x%08x board_id %d chip_id %d\n",
+                  result, board_id, chip_id);
+
+       if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0)
+               return -EOPNOTSUPP;
+
+       ar->id.bmi_ids_valid = true;
+       ar->id.bmi_board_id = board_id;
+       ar->id.bmi_chip_id = chip_id;
+
+       return 0;
+}
+
 static int ath10k_download_and_run_otp(struct ath10k *ar)
 {
        u32 result, address = ar->hw_params.patch_load_addr;
@@ -486,8 +536,8 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
 
        if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
-                                  ar->fw_features))
-           && result != 0) {
+                                  ar->fw_features)) &&
+           result != 0) {
                ath10k_err(ar, "otp calibration failed: %d", result);
                return -EINVAL;
        }
@@ -510,7 +560,7 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
                data_len = ar->firmware_len;
                mode_name = "normal";
                ret = ath10k_swap_code_seg_configure(ar,
-                               ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
+                                                    ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
                if (ret) {
                        ath10k_err(ar, "failed to configure fw code swap: %d\n",
                                   ret);
@@ -541,11 +591,18 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
        return ret;
 }
 
-static void ath10k_core_free_firmware_files(struct ath10k *ar)
+static void ath10k_core_free_board_files(struct ath10k *ar)
 {
        if (!IS_ERR(ar->board))
                release_firmware(ar->board);
 
+       ar->board = NULL;
+       ar->board_data = NULL;
+       ar->board_len = 0;
+}
+
+static void ath10k_core_free_firmware_files(struct ath10k *ar)
+{
        if (!IS_ERR(ar->otp))
                release_firmware(ar->otp);
 
@@ -557,10 +614,6 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
 
        ath10k_swap_code_seg_release(ar);
 
-       ar->board = NULL;
-       ar->board_data = NULL;
-       ar->board_len = 0;
-
        ar->otp = NULL;
        ar->otp_data = NULL;
        ar->otp_len = 0;
@@ -570,7 +623,6 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
        ar->firmware_len = 0;
 
        ar->cal_file = NULL;
-
 }
 
 static int ath10k_fetch_cal_file(struct ath10k *ar)
@@ -592,68 +644,251 @@ static int ath10k_fetch_cal_file(struct ath10k *ar)
        return 0;
 }
 
-static int ath10k_core_fetch_spec_board_file(struct ath10k *ar)
+static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar)
 {
-       char filename[100];
-
-       scnprintf(filename, sizeof(filename), "board-%s-%s.bin",
-                 ath10k_bus_str(ar->hif.bus), ar->spec_board_id);
+       if (!ar->hw_params.fw.board) {
+               ath10k_err(ar, "failed to find board file fw entry\n");
+               return -EINVAL;
+       }
 
-       ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
+       ar->board = ath10k_fetch_fw_file(ar,
+                                        ar->hw_params.fw.dir,
+                                        ar->hw_params.fw.board);
        if (IS_ERR(ar->board))
                return PTR_ERR(ar->board);
 
        ar->board_data = ar->board->data;
        ar->board_len = ar->board->size;
-       ar->spec_board_loaded = true;
 
        return 0;
 }
 
-static int ath10k_core_fetch_generic_board_file(struct ath10k *ar)
+static int ath10k_core_parse_bd_ie_board(struct ath10k *ar,
+                                        const void *buf, size_t buf_len,
+                                        const char *boardname)
 {
-       if (!ar->hw_params.fw.board) {
-               ath10k_err(ar, "failed to find board file fw entry\n");
-               return -EINVAL;
+       const struct ath10k_fw_ie *hdr;
+       bool name_match_found;
+       int ret, board_ie_id;
+       size_t board_ie_len;
+       const void *board_ie_data;
+
+       name_match_found = false;
+
+       /* go through ATH10K_BD_IE_BOARD_ elements */
+       while (buf_len > sizeof(struct ath10k_fw_ie)) {
+               hdr = buf;
+               board_ie_id = le32_to_cpu(hdr->id);
+               board_ie_len = le32_to_cpu(hdr->len);
+               board_ie_data = hdr->data;
+
+               buf_len -= sizeof(*hdr);
+               buf += sizeof(*hdr);
+
+               if (buf_len < ALIGN(board_ie_len, 4)) {
+                       ath10k_err(ar, "invalid ATH10K_BD_IE_BOARD length: %zu < %zu\n",
+                                  buf_len, ALIGN(board_ie_len, 4));
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               switch (board_ie_id) {
+               case ATH10K_BD_IE_BOARD_NAME:
+                       ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "board name", "",
+                                       board_ie_data, board_ie_len);
+
+                       if (board_ie_len != strlen(boardname))
+                               break;
+
+                       ret = memcmp(board_ie_data, boardname, strlen(boardname));
+                       if (ret)
+                               break;
+
+                       name_match_found = true;
+                       ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                                  "boot found match for name '%s'",
+                                  boardname);
+                       break;
+               case ATH10K_BD_IE_BOARD_DATA:
+                       if (!name_match_found)
+                               /* no match found */
+                               break;
+
+                       ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                                  "boot found board data for '%s'",
+                                  boardname);
+
+                       ar->board_data = board_ie_data;
+                       ar->board_len = board_ie_len;
+
+                       ret = 0;
+                       goto out;
+               default:
+                       ath10k_warn(ar, "unknown ATH10K_BD_IE_BOARD found: %d\n",
+                                   board_ie_id);
+                       break;
+               }
+
+               /* jump over the padding */
+               board_ie_len = ALIGN(board_ie_len, 4);
+
+               buf_len -= board_ie_len;
+               buf += board_ie_len;
        }
 
-       ar->board = ath10k_fetch_fw_file(ar,
-                                        ar->hw_params.fw.dir,
-                                        ar->hw_params.fw.board);
+       /* no match found */
+       ret = -ENOENT;
+
+out:
+       return ret;
+}
+
+static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
+                                             const char *boardname,
+                                             const char *filename)
+{
+       size_t len, magic_len, ie_len;
+       struct ath10k_fw_ie *hdr;
+       const u8 *data;
+       int ret, ie_id;
+
+       ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
        if (IS_ERR(ar->board))
                return PTR_ERR(ar->board);
 
-       ar->board_data = ar->board->data;
-       ar->board_len = ar->board->size;
-       ar->spec_board_loaded = false;
+       data = ar->board->data;
+       len = ar->board->size;
+
+       /* magic has extra null byte padded */
+       magic_len = strlen(ATH10K_BOARD_MAGIC) + 1;
+       if (len < magic_len) {
+               ath10k_err(ar, "failed to find magic value in %s/%s, file too short: %zu\n",
+                          ar->hw_params.fw.dir, filename, len);
+               ret = -EINVAL;
+               goto err;
+       }
+
+       if (memcmp(data, ATH10K_BOARD_MAGIC, magic_len)) {
+               ath10k_err(ar, "found invalid board magic\n");
+               ret = -EINVAL;
+               goto err;
+       }
+
+       /* magic is padded to 4 bytes */
+       magic_len = ALIGN(magic_len, 4);
+       if (len < magic_len) {
+               ath10k_err(ar, "failed: %s/%s too small to contain board data, len: %zu\n",
+                          ar->hw_params.fw.dir, filename, len);
+               ret = -EINVAL;
+               goto err;
+       }
+
+       data += magic_len;
+       len -= magic_len;
+
+       while (len > sizeof(struct ath10k_fw_ie)) {
+               hdr = (struct ath10k_fw_ie *)data;
+               ie_id = le32_to_cpu(hdr->id);
+               ie_len = le32_to_cpu(hdr->len);
+
+               len -= sizeof(*hdr);
+               data = hdr->data;
+
+               if (len < ALIGN(ie_len, 4)) {
+                       ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n",
+                                  ie_id, ie_len, len);
+                       ret = -EINVAL;
+                       goto err;
+               }
+
+               switch (ie_id) {
+               case ATH10K_BD_IE_BOARD:
+                       ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len,
+                                                           boardname);
+                       if (ret == -ENOENT)
+                               /* no match found, continue */
+                               break;
+                       else if (ret)
+                               /* there was an error, bail out */
+                               goto err;
+
+                       /* board data found */
+                       goto out;
+               }
+
+               /* jump over the padding */
+               ie_len = ALIGN(ie_len, 4);
+
+               len -= ie_len;
+               data += ie_len;
+       }
+
+out:
+       if (!ar->board_data || !ar->board_len) {
+               ath10k_err(ar,
+                          "failed to fetch board data for %s from %s/%s\n",
+                          ar->hw_params.fw.dir, boardname, filename);
+               ret = -ENODATA;
+               goto err;
+       }
+
+       return 0;
+
+err:
+       ath10k_core_free_board_files(ar);
+       return ret;
+}
+
+static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
+                                        size_t name_len)
+{
+       if (ar->id.bmi_ids_valid) {
+               scnprintf(name, name_len,
+                         "bus=%s,bmi-chip-id=%d,bmi-board-id=%d",
+                         ath10k_bus_str(ar->hif.bus),
+                         ar->id.bmi_chip_id,
+                         ar->id.bmi_board_id);
+               goto out;
+       }
+
+       scnprintf(name, name_len,
+                 "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x",
+                 ath10k_bus_str(ar->hif.bus),
+                 ar->id.vendor, ar->id.device,
+                 ar->id.subsystem_vendor, ar->id.subsystem_device);
+
+out:
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name);
 
        return 0;
 }
 
 static int ath10k_core_fetch_board_file(struct ath10k *ar)
 {
+       char boardname[100];
        int ret;
 
-       if (strlen(ar->spec_board_id) > 0) {
-               ret = ath10k_core_fetch_spec_board_file(ar);
-               if (ret) {
-                       ath10k_info(ar, "failed to load spec board file, falling back to generic: %d\n",
-                                   ret);
-                       goto generic;
-               }
-
-               ath10k_dbg(ar, ATH10K_DBG_BOOT, "found specific board file for %s\n",
-                          ar->spec_board_id);
-               return 0;
+       ret = ath10k_core_create_board_name(ar, boardname, sizeof(boardname));
+       if (ret) {
+               ath10k_err(ar, "failed to create board name: %d", ret);
+               return ret;
        }
 
-generic:
-       ret = ath10k_core_fetch_generic_board_file(ar);
+       ar->bd_api = 2;
+       ret = ath10k_core_fetch_board_data_api_n(ar, boardname,
+                                                ATH10K_BOARD_API2_FILE);
+       if (!ret)
+               goto success;
+
+       ar->bd_api = 1;
+       ret = ath10k_core_fetch_board_data_api_1(ar);
        if (ret) {
-               ath10k_err(ar, "failed to fetch generic board data: %d\n", ret);
+               ath10k_err(ar, "failed to fetch board data\n");
                return ret;
        }
 
+success:
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "using board api %d\n", ar->bd_api);
        return 0;
 }
 
@@ -885,12 +1120,6 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
        /* calibration file is optional, don't check for any errors */
        ath10k_fetch_cal_file(ar);
 
-       ret = ath10k_core_fetch_board_file(ar);
-       if (ret) {
-               ath10k_err(ar, "failed to fetch board file: %d\n", ret);
-               return ret;
-       }
-
        ar->fw_api = 5;
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
 
@@ -1263,10 +1492,10 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
                goto err;
 
        /* Some of of qca988x solutions are having global reset issue
-         * during target initialization. Bypassing PLL setting before
-         * downloading firmware and letting the SoC run on REF_CLK is
-         * fixing the problem. Corresponding firmware change is also needed
-         * to set the clock source once the target is initialized.
+        * during target initialization. Bypassing PLL setting before
+        * downloading firmware and letting the SoC run on REF_CLK is
+        * fixing the problem. Corresponding firmware change is also needed
+        * to set the clock source once the target is initialized.
         */
        if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
                     ar->fw_features)) {
@@ -1500,6 +1729,19 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
                goto err_power_down;
        }
 
+       ret = ath10k_core_get_board_id_from_otp(ar);
+       if (ret && ret != -EOPNOTSUPP) {
+               ath10k_err(ar, "failed to get board id from otp for qca99x0: %d\n",
+                          ret);
+               return ret;
+       }
+
+       ret = ath10k_core_fetch_board_file(ar);
+       if (ret) {
+               ath10k_err(ar, "failed to fetch board file: %d\n", ret);
+               goto err_free_firmware_files;
+       }
+
        ret = ath10k_core_init_firmware_features(ar);
        if (ret) {
                ath10k_err(ar, "fatal problem with firmware features: %d\n",
@@ -1627,6 +1869,7 @@ void ath10k_core_unregister(struct ath10k *ar)
        ath10k_testmode_destroy(ar);
 
        ath10k_core_free_firmware_files(ar);
+       ath10k_core_free_board_files(ar);
 
        ath10k_debug_unregister(ar);
 }
index 04e040a06cb15e61ca50379624d93fce37808ff3..7cc7cdd56c9530d45677abf5bda7d4f54a7e639d 100644 (file)
@@ -250,6 +250,30 @@ struct ath10k_fw_stats {
        struct list_head peers;
 };
 
+#define ATH10K_TPC_TABLE_TYPE_FLAG     1
+#define ATH10K_TPC_PREAM_TABLE_END     0xFFFF
+
+struct ath10k_tpc_table {
+       u32 pream_idx[WMI_TPC_RATE_MAX];
+       u8 rate_code[WMI_TPC_RATE_MAX];
+       char tpc_value[WMI_TPC_RATE_MAX][WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
+};
+
+struct ath10k_tpc_stats {
+       u32 reg_domain;
+       u32 chan_freq;
+       u32 phy_mode;
+       u32 twice_antenna_reduction;
+       u32 twice_max_rd_power;
+       s32 twice_antenna_gain;
+       u32 power_limit;
+       u32 num_tx_chain;
+       u32 ctl;
+       u32 rate_max;
+       u8 flag[WMI_TPC_FLAG];
+       struct ath10k_tpc_table tpc_table[WMI_TPC_FLAG];
+};
+
 struct ath10k_dfs_stats {
        u32 phy_errors;
        u32 pulses_total;
@@ -378,6 +402,11 @@ struct ath10k_debug {
        struct ath10k_dfs_stats dfs_stats;
        struct ath_dfs_pool_stats dfs_pool_stats;
 
+       /* used for tpc-dump storage, protected by data-lock */
+       struct ath10k_tpc_stats *tpc_stats;
+
+       struct completion tpc_complete;
+
        /* protected by conf_mutex */
        u32 fw_dbglog_mask;
        u32 fw_dbglog_level;
@@ -647,10 +676,19 @@ struct ath10k {
                struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
        } swap;
 
-       char spec_board_id[100];
-       bool spec_board_loaded;
+       struct {
+               u32 vendor;
+               u32 device;
+               u32 subsystem_vendor;
+               u32 subsystem_device;
+
+               bool bmi_ids_valid;
+               u8 bmi_board_id;
+               u8 bmi_chip_id;
+       } id;
 
        int fw_api;
+       int bd_api;
        enum ath10k_cal_mode cal_mode;
 
        struct {
index bf033f46f8aaae1280baf478d66a5890720c962d..6cc1aa3449c88640359ef716e3b300b692154d24 100644 (file)
@@ -125,19 +125,25 @@ EXPORT_SYMBOL(ath10k_info);
 void ath10k_print_driver_info(struct ath10k *ar)
 {
        char fw_features[128] = {};
+       char boardinfo[100];
 
        ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
 
-       ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n",
+       if (ar->id.bmi_ids_valid)
+               scnprintf(boardinfo, sizeof(boardinfo), "bmi %d:%d",
+                         ar->id.bmi_chip_id, ar->id.bmi_board_id);
+       else
+               scnprintf(boardinfo, sizeof(boardinfo), "sub %04x:%04x",
+                         ar->id.subsystem_vendor, ar->id.subsystem_device);
+
+       ath10k_info(ar, "%s (0x%08x, 0x%08x %s) fw %s fwapi %d bdapi %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n",
                    ar->hw_params.name,
                    ar->target_version,
                    ar->chip_id,
-                   (strlen(ar->spec_board_id) > 0 ? ", " : ""),
-                   ar->spec_board_id,
-                   (strlen(ar->spec_board_id) > 0 && !ar->spec_board_loaded
-                    ? " fallback" : ""),
+                   boardinfo,
                    ar->hw->wiphy->fw_version,
                    ar->fw_api,
+                   ar->bd_api,
                    ar->htt.target_version_major,
                    ar->htt.target_version_minor,
                    ar->wmi.op_version,
@@ -285,28 +291,6 @@ static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
        spin_unlock_bh(&ar->data_lock);
 }
 
-static size_t ath10k_debug_fw_stats_num_peers(struct list_head *head)
-{
-       struct ath10k_fw_stats_peer *i;
-       size_t num = 0;
-
-       list_for_each_entry(i, head, list)
-               ++num;
-
-       return num;
-}
-
-static size_t ath10k_debug_fw_stats_num_vdevs(struct list_head *head)
-{
-       struct ath10k_fw_stats_vdev *i;
-       size_t num = 0;
-
-       list_for_each_entry(i, head, list)
-               ++num;
-
-       return num;
-}
-
 void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
 {
        struct ath10k_fw_stats stats = {};
@@ -343,8 +327,8 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
                goto free;
        }
 
-       num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers);
-       num_vdevs = ath10k_debug_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs);
+       num_peers = ath10k_wmi_fw_stats_num_peers(&ar->debug.fw_stats.peers);
+       num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs);
        is_start = (list_empty(&ar->debug.fw_stats.pdevs) &&
                    !list_empty(&stats.pdevs));
        is_end = (!list_empty(&ar->debug.fw_stats.pdevs) &&
@@ -429,240 +413,6 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar)
        return 0;
 }
 
-/* FIXME: How to calculate the buffer size sanely? */
-#define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
-
-static void ath10k_fw_stats_fill(struct ath10k *ar,
-                                struct ath10k_fw_stats *fw_stats,
-                                char *buf)
-{
-       unsigned int len = 0;
-       unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
-       const struct ath10k_fw_stats_pdev *pdev;
-       const struct ath10k_fw_stats_vdev *vdev;
-       const struct ath10k_fw_stats_peer *peer;
-       size_t num_peers;
-       size_t num_vdevs;
-       int i;
-
-       spin_lock_bh(&ar->data_lock);
-
-       pdev = list_first_entry_or_null(&fw_stats->pdevs,
-                                       struct ath10k_fw_stats_pdev, list);
-       if (!pdev) {
-               ath10k_warn(ar, "failed to get pdev stats\n");
-               goto unlock;
-       }
-
-       num_peers = ath10k_debug_fw_stats_num_peers(&fw_stats->peers);
-       num_vdevs = ath10k_debug_fw_stats_num_vdevs(&fw_stats->vdevs);
-
-       len += scnprintf(buf + len, buf_len - len, "\n");
-       len += scnprintf(buf + len, buf_len - len, "%30s\n",
-                        "ath10k PDEV stats");
-       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
-                                "=================");
-
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "Channel noise floor", pdev->ch_noise_floor);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-                        "Channel TX power", pdev->chan_tx_power);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-                        "TX frame count", pdev->tx_frame_count);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-                        "RX frame count", pdev->rx_frame_count);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-                        "RX clear count", pdev->rx_clear_count);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-                        "Cycle count", pdev->cycle_count);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-                        "PHY error count", pdev->phy_err_count);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-                        "RTS bad count", pdev->rts_bad);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-                        "RTS good count", pdev->rts_good);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-                        "FCS bad count", pdev->fcs_bad);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-                        "No beacon count", pdev->no_beacons);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-                        "MIB int count", pdev->mib_int_count);
-
-       len += scnprintf(buf + len, buf_len - len, "\n");
-       len += scnprintf(buf + len, buf_len - len, "%30s\n",
-                        "ath10k PDEV TX stats");
-       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
-                                "=================");
-
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "HTT cookies queued", pdev->comp_queued);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "HTT cookies disp.", pdev->comp_delivered);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "MSDU queued", pdev->msdu_enqued);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "MPDU queued", pdev->mpdu_enqued);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "MSDUs dropped", pdev->wmm_drop);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "Local enqued", pdev->local_enqued);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "Local freed", pdev->local_freed);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "HW queued", pdev->hw_queued);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "PPDUs reaped", pdev->hw_reaped);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "Num underruns", pdev->underrun);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "PPDUs cleaned", pdev->tx_abort);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "MPDUs requed", pdev->mpdus_requed);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "Excessive retries", pdev->tx_ko);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "HW rate", pdev->data_rc);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "Sched self tiggers", pdev->self_triggers);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "Dropped due to SW retries",
-                        pdev->sw_retry_failure);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "Illegal rate phy errors",
-                        pdev->illgl_rate_phy_err);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "Pdev continous xretry", pdev->pdev_cont_xretry);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "TX timeout", pdev->pdev_tx_timeout);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "PDEV resets", pdev->pdev_resets);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "PHY underrun", pdev->phy_underrun);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "MPDU is more than txop limit", pdev->txop_ovf);
-
-       len += scnprintf(buf + len, buf_len - len, "\n");
-       len += scnprintf(buf + len, buf_len - len, "%30s\n",
-                        "ath10k PDEV RX stats");
-       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
-                                "=================");
-
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "Mid PPDU route change",
-                        pdev->mid_ppdu_route_change);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "Tot. number of statuses", pdev->status_rcvd);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "Extra frags on rings 0", pdev->r0_frags);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "Extra frags on rings 1", pdev->r1_frags);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "Extra frags on rings 2", pdev->r2_frags);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "Extra frags on rings 3", pdev->r3_frags);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "MSDUs delivered to HTT", pdev->htt_msdus);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "MPDUs delivered to HTT", pdev->htt_mpdus);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "MSDUs delivered to stack", pdev->loc_msdus);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "MPDUs delivered to stack", pdev->loc_mpdus);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "Oversized AMSUs", pdev->oversize_amsdu);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "PHY errors", pdev->phy_errs);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "PHY errors drops", pdev->phy_err_drop);
-       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
-
-       len += scnprintf(buf + len, buf_len - len, "\n");
-       len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
-                        "ath10k VDEV stats", num_vdevs);
-       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
-                                "=================");
-
-       list_for_each_entry(vdev, &fw_stats->vdevs, list) {
-               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-                                "vdev id", vdev->vdev_id);
-               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-                                "beacon snr", vdev->beacon_snr);
-               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-                                "data snr", vdev->data_snr);
-               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-                                "num rx frames", vdev->num_rx_frames);
-               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-                                "num rts fail", vdev->num_rts_fail);
-               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-                                "num rts success", vdev->num_rts_success);
-               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-                                "num rx err", vdev->num_rx_err);
-               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-                                "num rx discard", vdev->num_rx_discard);
-               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-                                "num tx not acked", vdev->num_tx_not_acked);
-
-               for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
-                       len += scnprintf(buf + len, buf_len - len,
-                                       "%25s [%02d] %u\n",
-                                        "num tx frames", i,
-                                        vdev->num_tx_frames[i]);
-
-               for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
-                       len += scnprintf(buf + len, buf_len - len,
-                                       "%25s [%02d] %u\n",
-                                        "num tx frames retries", i,
-                                        vdev->num_tx_frames_retries[i]);
-
-               for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
-                       len += scnprintf(buf + len, buf_len - len,
-                                       "%25s [%02d] %u\n",
-                                        "num tx frames failures", i,
-                                        vdev->num_tx_frames_failures[i]);
-
-               for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
-                       len += scnprintf(buf + len, buf_len - len,
-                                       "%25s [%02d] 0x%08x\n",
-                                        "tx rate history", i,
-                                        vdev->tx_rate_history[i]);
-
-               for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
-                       len += scnprintf(buf + len, buf_len - len,
-                                       "%25s [%02d] %u\n",
-                                        "beacon rssi history", i,
-                                        vdev->beacon_rssi_history[i]);
-
-               len += scnprintf(buf + len, buf_len - len, "\n");
-       }
-
-       len += scnprintf(buf + len, buf_len - len, "\n");
-       len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
-                        "ath10k PEER stats", num_peers);
-       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
-                                "=================");
-
-       list_for_each_entry(peer, &fw_stats->peers, list) {
-               len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
-                                "Peer MAC address", peer->peer_macaddr);
-               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-                                "Peer RSSI", peer->peer_rssi);
-               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-                                "Peer TX rate", peer->peer_tx_rate);
-               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-                                "Peer RX rate", peer->peer_rx_rate);
-               len += scnprintf(buf + len, buf_len - len, "\n");
-       }
-
-unlock:
-       spin_unlock_bh(&ar->data_lock);
-
-       if (len >= buf_len)
-               buf[len - 1] = 0;
-       else
-               buf[len] = 0;
-}
-
 static int ath10k_fw_stats_open(struct inode *inode, struct file *file)
 {
        struct ath10k *ar = inode->i_private;
@@ -688,7 +438,12 @@ static int ath10k_fw_stats_open(struct inode *inode, struct file *file)
                goto err_free;
        }
 
-       ath10k_fw_stats_fill(ar, &ar->debug.fw_stats, buf);
+       ret = ath10k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, buf);
+       if (ret) {
+               ath10k_warn(ar, "failed to fill fw stats: %d\n", ret);
+               goto err_free;
+       }
+
        file->private_data = buf;
 
        mutex_unlock(&ar->conf_mutex);
@@ -1843,6 +1598,233 @@ static const struct file_operations fops_nf_cal_period = {
        .llseek = default_llseek,
 };
 
+#define ATH10K_TPC_CONFIG_BUF_SIZE     (1024 * 1024)
+
+static int ath10k_debug_tpc_stats_request(struct ath10k *ar)
+{
+       int ret;
+       unsigned long time_left;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       reinit_completion(&ar->debug.tpc_complete);
+
+       ret = ath10k_wmi_pdev_get_tpc_config(ar, WMI_TPC_CONFIG_PARAM);
+       if (ret) {
+               ath10k_warn(ar, "failed to request tpc config: %d\n", ret);
+               return ret;
+       }
+
+       time_left = wait_for_completion_timeout(&ar->debug.tpc_complete,
+                                               1 * HZ);
+       if (time_left == 0)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+void ath10k_debug_tpc_stats_process(struct ath10k *ar,
+                                   struct ath10k_tpc_stats *tpc_stats)
+{
+       spin_lock_bh(&ar->data_lock);
+
+       kfree(ar->debug.tpc_stats);
+       ar->debug.tpc_stats = tpc_stats;
+       complete(&ar->debug.tpc_complete);
+
+       spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats,
+                                  unsigned int j, char *buf, unsigned int *len)
+{
+       unsigned int i, buf_len;
+       static const char table_str[][5] = { "CDD",
+                                            "STBC",
+                                            "TXBF" };
+       static const char pream_str[][6] = { "CCK",
+                                            "OFDM",
+                                            "HT20",
+                                            "HT40",
+                                            "VHT20",
+                                            "VHT40",
+                                            "VHT80",
+                                            "HTCUP" };
+
+       buf_len = ATH10K_TPC_CONFIG_BUF_SIZE;
+       *len += scnprintf(buf + *len, buf_len - *len,
+                         "********************************\n");
+       *len += scnprintf(buf + *len, buf_len - *len,
+                         "******************* %s POWER TABLE ****************\n",
+                         table_str[j]);
+       *len += scnprintf(buf + *len, buf_len - *len,
+                         "********************************\n");
+       *len += scnprintf(buf + *len, buf_len - *len,
+                         "No.  Preamble Rate_code tpc_value1 tpc_value2 tpc_value3\n");
+
+       for (i = 0; i < tpc_stats->rate_max; i++) {
+               *len += scnprintf(buf + *len, buf_len - *len,
+                                 "%8d %s 0x%2x %s\n", i,
+                                 pream_str[tpc_stats->tpc_table[j].pream_idx[i]],
+                                 tpc_stats->tpc_table[j].rate_code[i],
+                                 tpc_stats->tpc_table[j].tpc_value[i]);
+       }
+
+       *len += scnprintf(buf + *len, buf_len - *len,
+                         "***********************************\n");
+}
+
+static void ath10k_tpc_stats_fill(struct ath10k *ar,
+                                 struct ath10k_tpc_stats *tpc_stats,
+                                 char *buf)
+{
+       unsigned int len, j, buf_len;
+
+       len = 0;
+       buf_len = ATH10K_TPC_CONFIG_BUF_SIZE;
+
+       spin_lock_bh(&ar->data_lock);
+
+       if (!tpc_stats) {
+               ath10k_warn(ar, "failed to get tpc stats\n");
+               goto unlock;
+       }
+
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       len += scnprintf(buf + len, buf_len - len,
+                        "*************************************\n");
+       len += scnprintf(buf + len, buf_len - len,
+                        "TPC config for channel %4d mode %d\n",
+                        tpc_stats->chan_freq,
+                        tpc_stats->phy_mode);
+       len += scnprintf(buf + len, buf_len - len,
+                        "*************************************\n");
+       len += scnprintf(buf + len, buf_len - len,
+                        "CTL           =  0x%2x Reg. Domain            = %2d\n",
+                        tpc_stats->ctl,
+                        tpc_stats->reg_domain);
+       len += scnprintf(buf + len, buf_len - len,
+                        "Antenna Gain  = %2d Reg. Max Antenna Gain     =  %2d\n",
+                        tpc_stats->twice_antenna_gain,
+                        tpc_stats->twice_antenna_reduction);
+       len += scnprintf(buf + len, buf_len - len,
+                        "Power Limit   = %2d Reg. Max Power            = %2d\n",
+                        tpc_stats->power_limit,
+                        tpc_stats->twice_max_rd_power / 2);
+       len += scnprintf(buf + len, buf_len - len,
+                        "Num tx chains = %2d Num supported rates       = %2d\n",
+                        tpc_stats->num_tx_chain,
+                        tpc_stats->rate_max);
+
+       for (j = 0; j < tpc_stats->num_tx_chain ; j++) {
+               switch (j) {
+               case WMI_TPC_TABLE_TYPE_CDD:
+                       if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
+                               len += scnprintf(buf + len, buf_len - len,
+                                                "CDD not supported\n");
+                               break;
+                       }
+
+                       ath10k_tpc_stats_print(tpc_stats, j, buf, &len);
+                       break;
+               case WMI_TPC_TABLE_TYPE_STBC:
+                       if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
+                               len += scnprintf(buf + len, buf_len - len,
+                                                "STBC not supported\n");
+                               break;
+                       }
+
+                       ath10k_tpc_stats_print(tpc_stats, j, buf, &len);
+                       break;
+               case WMI_TPC_TABLE_TYPE_TXBF:
+                       if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
+                               len += scnprintf(buf + len, buf_len - len,
+                                                "TXBF not supported\n***************************\n");
+                               break;
+                       }
+
+                       ath10k_tpc_stats_print(tpc_stats, j, buf, &len);
+                       break;
+               default:
+                       len += scnprintf(buf + len, buf_len - len,
+                                        "Invalid Type\n");
+                       break;
+               }
+       }
+
+unlock:
+       spin_unlock_bh(&ar->data_lock);
+
+       if (len >= buf_len)
+               buf[len - 1] = 0;
+       else
+               buf[len] = 0;
+}
+
+static int ath10k_tpc_stats_open(struct inode *inode, struct file *file)
+{
+       struct ath10k *ar = inode->i_private;
+       void *buf = NULL;
+       int ret;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (ar->state != ATH10K_STATE_ON) {
+               ret = -ENETDOWN;
+               goto err_unlock;
+       }
+
+       buf = vmalloc(ATH10K_TPC_CONFIG_BUF_SIZE);
+       if (!buf) {
+               ret = -ENOMEM;
+               goto err_unlock;
+       }
+
+       ret = ath10k_debug_tpc_stats_request(ar);
+       if (ret) {
+               ath10k_warn(ar, "failed to request tpc config stats: %d\n",
+                           ret);
+               goto err_free;
+       }
+
+       ath10k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf);
+       file->private_data = buf;
+
+       mutex_unlock(&ar->conf_mutex);
+       return 0;
+
+err_free:
+       vfree(buf);
+
+err_unlock:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+static int ath10k_tpc_stats_release(struct inode *inode, struct file *file)
+{
+       vfree(file->private_data);
+
+       return 0;
+}
+
+static ssize_t ath10k_tpc_stats_read(struct file *file, char __user *user_buf,
+                                    size_t count, loff_t *ppos)
+{
+       const char *buf = file->private_data;
+       unsigned int len = strlen(buf);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_tpc_stats = {
+       .open = ath10k_tpc_stats_open,
+       .release = ath10k_tpc_stats_release,
+       .read = ath10k_tpc_stats_read,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
 int ath10k_debug_start(struct ath10k *ar)
 {
        int ret;
@@ -2111,6 +2093,8 @@ void ath10k_debug_destroy(struct ath10k *ar)
        ar->debug.fw_crash_data = NULL;
 
        ath10k_debug_fw_stats_reset(ar);
+
+       kfree(ar->debug.tpc_stats);
 }
 
 int ath10k_debug_register(struct ath10k *ar)
@@ -2127,6 +2111,7 @@ int ath10k_debug_register(struct ath10k *ar)
        INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
                          ath10k_debug_htt_stats_dwork);
 
+       init_completion(&ar->debug.tpc_complete);
        init_completion(&ar->debug.fw_stats_complete);
 
        debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
@@ -2195,6 +2180,9 @@ int ath10k_debug_register(struct ath10k *ar)
        debugfs_create_file("quiet_period", S_IRUGO | S_IWUSR,
                            ar->debug.debugfs_phy, ar, &fops_quiet_period);
 
+       debugfs_create_file("tpc_stats", S_IRUSR,
+                           ar->debug.debugfs_phy, ar, &fops_tpc_stats);
+
        return 0;
 }
 
index 53bd6a19eab6215b6108077c7407de6f2bac1d30..7de780c4ec8da0c9235f280510eda3f7c068aaf4 100644 (file)
@@ -55,6 +55,9 @@ enum ath10k_dbg_aggr_mode {
        ATH10K_DBG_AGGR_MODE_MAX,
 };
 
+/* FIXME: How to calculate the buffer size sanely? */
+#define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
+
 extern unsigned int ath10k_debug_mask;
 
 __printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
@@ -70,6 +73,8 @@ void ath10k_debug_destroy(struct ath10k *ar);
 int ath10k_debug_register(struct ath10k *ar);
 void ath10k_debug_unregister(struct ath10k *ar);
 void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_debug_tpc_stats_process(struct ath10k *ar,
+                                   struct ath10k_tpc_stats *tpc_stats);
 struct ath10k_fw_crash_data *
 ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
 
@@ -117,6 +122,12 @@ static inline void ath10k_debug_fw_stats_process(struct ath10k *ar,
 {
 }
 
+static inline void ath10k_debug_tpc_stats_process(struct ath10k *ar,
+                                                 struct ath10k_tpc_stats *tpc_stats)
+{
+       kfree(tpc_stats);
+}
+
 static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
                                           int len)
 {
index 0c92e0251e84f64fd0e8b53679deda60803749f9..89e7076c919fd61abc04a2648ad67b1b4e2820f7 100644 (file)
@@ -30,13 +30,6 @@ struct ath10k_hif_sg_item {
        u16 len;
 };
 
-struct ath10k_hif_cb {
-       int (*tx_completion)(struct ath10k *ar,
-                            struct sk_buff *wbuf);
-       int (*rx_completion)(struct ath10k *ar,
-                            struct sk_buff *wbuf);
-};
-
 struct ath10k_hif_ops {
        /* send a scatter-gather list to the target */
        int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
@@ -65,8 +58,7 @@ struct ath10k_hif_ops {
        void (*stop)(struct ath10k *ar);
 
        int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
-                                  u8 *ul_pipe, u8 *dl_pipe,
-                                  int *ul_is_polled, int *dl_is_polled);
+                                  u8 *ul_pipe, u8 *dl_pipe);
 
        void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe);
 
@@ -80,9 +72,6 @@ struct ath10k_hif_ops {
         */
        void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
 
-       void (*set_callbacks)(struct ath10k *ar,
-                             struct ath10k_hif_cb *callbacks);
-
        u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
 
        u32 (*read32)(struct ath10k *ar, u32 address);
@@ -142,13 +131,10 @@ static inline void ath10k_hif_stop(struct ath10k *ar)
 
 static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar,
                                                 u16 service_id,
-                                                u8 *ul_pipe, u8 *dl_pipe,
-                                                int *ul_is_polled,
-                                                int *dl_is_polled)
+                                                u8 *ul_pipe, u8 *dl_pipe)
 {
        return ar->hif.ops->map_service_to_pipe(ar, service_id,
-                                               ul_pipe, dl_pipe,
-                                               ul_is_polled, dl_is_polled);
+                                               ul_pipe, dl_pipe);
 }
 
 static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
@@ -163,12 +149,6 @@ static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
        ar->hif.ops->send_complete_check(ar, pipe_id, force);
 }
 
-static inline void ath10k_hif_set_callbacks(struct ath10k *ar,
-                                           struct ath10k_hif_cb *callbacks)
-{
-       ar->hif.ops->set_callbacks(ar, callbacks);
-}
-
 static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
                                                   u8 pipe_id)
 {
index 32d9ff1b19dcdf2d6d2609e9f95844fb5d144e8d..5b3c6bcf959854ed33776c3223adb06cb4f748f2 100644 (file)
 /* Send */
 /********/
 
-static inline void ath10k_htc_send_complete_check(struct ath10k_htc_ep *ep,
-                                                 int force)
-{
-       /*
-        * Check whether HIF has any prior sends that have finished,
-        * have not had the post-processing done.
-        */
-       ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force);
-}
-
 static void ath10k_htc_control_tx_complete(struct ath10k *ar,
                                           struct sk_buff *skb)
 {
@@ -181,24 +171,22 @@ err_pull:
        return ret;
 }
 
-static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
-                                           struct sk_buff *skb)
+void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
 {
        struct ath10k_htc *htc = &ar->htc;
        struct ath10k_skb_cb *skb_cb;
        struct ath10k_htc_ep *ep;
 
        if (WARN_ON_ONCE(!skb))
-               return 0;
+               return;
 
        skb_cb = ATH10K_SKB_CB(skb);
        ep = &htc->endpoint[skb_cb->eid];
 
        ath10k_htc_notify_tx_completion(ep, skb);
        /* the skb now belongs to the completion handler */
-
-       return 0;
 }
+EXPORT_SYMBOL(ath10k_htc_tx_completion_handler);
 
 /***********/
 /* Receive */
@@ -304,8 +292,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
        return status;
 }
 
-static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
-                                           struct sk_buff *skb)
+void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
 {
        int status = 0;
        struct ath10k_htc *htc = &ar->htc;
@@ -326,21 +313,11 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
                ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
                ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
                                hdr, sizeof(*hdr));
-               status = -EINVAL;
                goto out;
        }
 
        ep = &htc->endpoint[eid];
 
-       /*
-        * If this endpoint that received a message from the target has
-        * a to-target HIF pipe whose send completions are polled rather
-        * than interrupt-driven, this is a good point to ask HIF to check
-        * whether it has any completed sends to handle.
-        */
-       if (ep->ul_is_polled)
-               ath10k_htc_send_complete_check(ep, 1);
-
        payload_len = __le16_to_cpu(hdr->len);
 
        if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
@@ -348,7 +325,6 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
                            payload_len + sizeof(*hdr));
                ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
                                hdr, sizeof(*hdr));
-               status = -EINVAL;
                goto out;
        }
 
@@ -358,7 +334,6 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
                           skb->len, payload_len);
                ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
                                "", hdr, sizeof(*hdr));
-               status = -EINVAL;
                goto out;
        }
 
@@ -374,7 +349,6 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
                    (trailer_len > payload_len)) {
                        ath10k_warn(ar, "Invalid trailer length: %d\n",
                                    trailer_len);
-                       status = -EPROTO;
                        goto out;
                }
 
@@ -407,7 +381,6 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
                                 * sending unsolicited messages on the ep 0
                                 */
                                ath10k_warn(ar, "HTC rx ctrl still processing\n");
-                               status = -EINVAL;
                                complete(&htc->ctl_resp);
                                goto out;
                        }
@@ -439,9 +412,8 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
        skb = NULL;
 out:
        kfree_skb(skb);
-
-       return status;
 }
+EXPORT_SYMBOL(ath10k_htc_rx_completion_handler);
 
 static void ath10k_htc_control_rx_complete(struct ath10k *ar,
                                           struct sk_buff *skb)
@@ -767,9 +739,7 @@ setup:
        status = ath10k_hif_map_service_to_pipe(htc->ar,
                                                ep->service_id,
                                                &ep->ul_pipe_id,
-                                               &ep->dl_pipe_id,
-                                               &ep->ul_is_polled,
-                                               &ep->dl_is_polled);
+                                               &ep->dl_pipe_id);
        if (status)
                return status;
 
@@ -778,10 +748,6 @@ setup:
                   htc_service_name(ep->service_id), ep->ul_pipe_id,
                   ep->dl_pipe_id, ep->eid);
 
-       ath10k_dbg(ar, ATH10K_DBG_BOOT,
-                  "boot htc ep %d ul polled %d dl polled %d\n",
-                  ep->eid, ep->ul_is_polled, ep->dl_is_polled);
-
        if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
                ep->tx_credit_flow_enabled = false;
                ath10k_dbg(ar, ATH10K_DBG_BOOT,
@@ -841,7 +807,6 @@ int ath10k_htc_start(struct ath10k_htc *htc)
 /* registered target arrival callback from the HIF layer */
 int ath10k_htc_init(struct ath10k *ar)
 {
-       struct ath10k_hif_cb htc_callbacks;
        struct ath10k_htc_ep *ep = NULL;
        struct ath10k_htc *htc = &ar->htc;
 
@@ -849,15 +814,11 @@ int ath10k_htc_init(struct ath10k *ar)
 
        ath10k_htc_reset_endpoint_states(htc);
 
-       /* setup HIF layer callbacks */
-       htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
-       htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
        htc->ar = ar;
 
        /* Get HIF default pipe for HTC message exchange */
        ep = &htc->endpoint[ATH10K_HTC_EP_0];
 
-       ath10k_hif_set_callbacks(ar, &htc_callbacks);
        ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
 
        init_completion(&htc->ctl_resp);
index 527179c0edced2f008e6cfa37daf79d3cf94ec0d..e70aa38e6e05dbb1611981d5c26b332d05ac0ce5 100644 (file)
@@ -312,8 +312,6 @@ struct ath10k_htc_ep {
        int max_ep_message_len;
        u8 ul_pipe_id;
        u8 dl_pipe_id;
-       int ul_is_polled; /* call HIF to get tx completions */
-       int dl_is_polled; /* call HIF to fetch rx (not implemented) */
 
        u8 seq_no; /* for debugging */
        int tx_credits;
@@ -355,5 +353,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
 int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
                    struct sk_buff *packet);
 struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
+void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
 
 #endif
index 5a8e4eae7a9cbe817c7d62107f8bdc30dd89b449..2bad50e520b5bd2338ab479b13127aefe2dc0cf5 100644 (file)
@@ -1488,7 +1488,6 @@ struct ath10k_htt {
        int num_pending_mgmt_tx;
        struct idr pending_tx;
        wait_queue_head_t empty_tx_wq;
-       struct dma_pool *tx_pool;
 
        /* set if host-fw communication goes haywire
         * used to avoid further failures */
@@ -1509,6 +1508,11 @@ struct ath10k_htt {
                dma_addr_t paddr;
                struct htt_msdu_ext_desc *vaddr;
        } frag_desc;
+
+       struct {
+               dma_addr_t paddr;
+               struct ath10k_htt_txbuf *vaddr;
+       } txbuf;
 };
 
 #define RX_HTT_HDR_STATUS_LEN 64
@@ -1587,6 +1591,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
                                u8 max_subfrms_ampdu,
                                u8 max_subfrms_amsdu);
+void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
 
 void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc);
 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
index 606c1a34f004026afd31991bfc277583cf1b3ea6..6060dda4e9101ac73c92cafb597c1d71592bb00e 100644 (file)
@@ -2125,6 +2125,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
        /* Free the indication buffer */
        dev_kfree_skb_any(skb);
 }
+EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
 
 static void ath10k_htt_txrx_compl_task(unsigned long ptr)
 {
index eb5ba9bb8b4db5d41a2eabe13a9d1ad0cbd5ff95..16823970dbfd33608486b6854b0ed9d1142b8576 100644 (file)
@@ -108,9 +108,12 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
        spin_lock_init(&htt->tx_lock);
        idr_init(&htt->pending_tx);
 
-       htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
-                                      sizeof(struct ath10k_htt_txbuf), 4, 0);
-       if (!htt->tx_pool) {
+       size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
+       htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size,
+                                                 &htt->txbuf.paddr,
+                                                 GFP_DMA);
+       if (!htt->txbuf.vaddr) {
+               ath10k_err(ar, "failed to alloc tx buffer\n");
                ret = -ENOMEM;
                goto free_idr_pending_tx;
        }
@@ -125,14 +128,17 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
        if (!htt->frag_desc.vaddr) {
                ath10k_warn(ar, "failed to alloc fragment desc memory\n");
                ret = -ENOMEM;
-               goto free_tx_pool;
+               goto free_txbuf;
        }
 
 skip_frag_desc_alloc:
        return 0;
 
-free_tx_pool:
-       dma_pool_destroy(htt->tx_pool);
+free_txbuf:
+       size = htt->max_num_pending_tx *
+                         sizeof(struct ath10k_htt_txbuf);
+       dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
+                         htt->txbuf.paddr);
 free_idr_pending_tx:
        idr_destroy(&htt->pending_tx);
        return ret;
@@ -160,7 +166,13 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt)
 
        idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
        idr_destroy(&htt->pending_tx);
-       dma_pool_destroy(htt->tx_pool);
+
+       if (htt->txbuf.vaddr) {
+               size = htt->max_num_pending_tx *
+                                 sizeof(struct ath10k_htt_txbuf);
+               dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
+                                 htt->txbuf.paddr);
+       }
 
        if (htt->frag_desc.vaddr) {
                size = htt->max_num_pending_tx *
@@ -175,6 +187,12 @@ void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
        dev_kfree_skb_any(skb);
 }
 
+void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+       dev_kfree_skb_any(skb);
+}
+EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);
+
 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
 {
        struct ath10k *ar = htt->ar;
@@ -454,9 +472,9 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        spin_lock_bh(&htt->tx_lock);
        res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
        spin_unlock_bh(&htt->tx_lock);
-       if (res < 0) {
+       if (res < 0)
                goto err_tx_dec;
-       }
+
        msdu_id = res;
 
        txdesc = ath10k_htc_alloc_skb(ar, len);
@@ -521,7 +539,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        int res;
        u8 flags0 = 0;
        u16 msdu_id, flags1 = 0;
-       dma_addr_t paddr = 0;
        u32 frags_paddr = 0;
        struct htt_msdu_ext_desc *ext_desc = NULL;
        bool limit_mgmt_desc = false;
@@ -542,21 +559,17 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        spin_lock_bh(&htt->tx_lock);
        res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
        spin_unlock_bh(&htt->tx_lock);
-       if (res < 0) {
+       if (res < 0)
                goto err_tx_dec;
-       }
+
        msdu_id = res;
 
        prefetch_len = min(htt->prefetch_len, msdu->len);
        prefetch_len = roundup(prefetch_len, 4);
 
-       skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
-                                          &paddr);
-       if (!skb_cb->htt.txbuf) {
-               res = -ENOMEM;
-               goto err_free_msdu_id;
-       }
-       skb_cb->htt.txbuf_paddr = paddr;
+       skb_cb->htt.txbuf = &htt->txbuf.vaddr[msdu_id];
+       skb_cb->htt.txbuf_paddr = htt->txbuf.paddr +
+               (sizeof(struct ath10k_htt_txbuf) * msdu_id);
 
        if ((ieee80211_is_action(hdr->frame_control) ||
             ieee80211_is_deauth(hdr->frame_control) ||
@@ -574,7 +587,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        res = dma_mapping_error(dev, skb_cb->paddr);
        if (res) {
                res = -EIO;
-               goto err_free_txbuf;
+               goto err_free_msdu_id;
        }
 
        switch (skb_cb->txmode) {
@@ -706,10 +719,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
 
 err_unmap_msdu:
        dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
-err_free_txbuf:
-       dma_pool_free(htt->tx_pool,
-                     skb_cb->htt.txbuf,
-                     skb_cb->htt.txbuf_paddr);
 err_free_msdu_id:
        spin_lock_bh(&htt->tx_lock);
        ath10k_htt_tx_free_msdu_id(htt, msdu_id);
index bc421a5c535630f39b71fe016670729ec9a8945c..2d87737e35ffc28bfe3bc623491c6967970bd51b 100644 (file)
@@ -97,6 +97,9 @@ enum qca6174_chip_id_rev {
 
 /* includes also the null byte */
 #define ATH10K_FIRMWARE_MAGIC               "QCA-ATH10K"
+#define ATH10K_BOARD_MAGIC                  "QCA-ATH10K-BOARD"
+
+#define ATH10K_BOARD_API2_FILE         "board-2.bin"
 
 #define REG_DUMP_COUNT_QCA988X 60
 
@@ -159,6 +162,16 @@ enum ath10k_fw_htt_op_version {
        ATH10K_FW_HTT_OP_VERSION_MAX,
 };
 
+enum ath10k_bd_ie_type {
+       /* contains sub IEs of enum ath10k_bd_ie_board_type */
+       ATH10K_BD_IE_BOARD = 0,
+};
+
+enum ath10k_bd_ie_board_type {
+       ATH10K_BD_IE_BOARD_NAME = 0,
+       ATH10K_BD_IE_BOARD_DATA = 1,
+};
+
 enum ath10k_hw_rev {
        ATH10K_HW_QCA988X,
        ATH10K_HW_QCA6174,
@@ -337,7 +350,7 @@ enum ath10k_hw_rate_cck {
 #define TARGET_10X_MAX_FRAG_ENTRIES            0
 
 /* 10.2 parameters */
-#define TARGET_10_2_DMA_BURST_SIZE             1
+#define TARGET_10_2_DMA_BURST_SIZE             0
 
 /* Target specific defines for WMI-TLV firmware */
 #define TARGET_TLV_NUM_VDEVS                   4
@@ -391,7 +404,7 @@ enum ath10k_hw_rate_cck {
 
 #define TARGET_10_4_TX_DBG_LOG_SIZE            1024
 #define TARGET_10_4_NUM_WDS_ENTRIES            32
-#define TARGET_10_4_DMA_BURST_SIZE             1
+#define TARGET_10_4_DMA_BURST_SIZE             0
 #define TARGET_10_4_MAC_AGGR_DELIM             0
 #define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
 #define TARGET_10_4_VOW_CONFIG                 0
index 79490ad41ac562a74bb10b9fe05a7785321c77ea..484c1a10372f13a9fead3d7c4f4368080ab9f10a 100644 (file)
@@ -197,9 +197,8 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
                return -EOPNOTSUPP;
        }
 
-       if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+       if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
                key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-       }
 
        if (cmd == DISABLE_KEY) {
                arg.key_cipher = WMI_CIPHER_NONE;
@@ -1111,7 +1110,8 @@ static int ath10k_monitor_recalc(struct ath10k *ar)
 
                        ret = ath10k_monitor_stop(ar);
                        if (ret)
-                               ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", ret);
+                               ath10k_warn(ar, "failed to stop disallowed monitor: %d\n",
+                                           ret);
                                /* not serious */
                }
 
@@ -2084,7 +2084,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
        enum ieee80211_band band;
        const u8 *ht_mcs_mask;
        const u16 *vht_mcs_mask;
-       int i, n, max_nss;
+       int i, n;
+       u8 max_nss;
        u32 stbc;
 
        lockdep_assert_held(&ar->conf_mutex);
@@ -2169,7 +2170,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
                        arg->peer_ht_rates.rates[i] = i;
        } else {
                arg->peer_ht_rates.num_rates = n;
-               arg->peer_num_spatial_streams = max_nss;
+               arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
        }
 
        ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
@@ -4065,6 +4066,7 @@ static u32 get_nss_from_chainmask(u16 chain_mask)
 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
 {
        int nsts = ar->vht_cap_info;
+
        nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
        nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
 
@@ -4081,8 +4083,9 @@ static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
 {
        int sound_dim = ar->vht_cap_info;
+
        sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
-       sound_dim >>=IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+       sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
 
        /* If the sounding dimension is not advertised by the firmware,
         * let's use a default value of 1
@@ -4656,7 +4659,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                                                info->use_cts_prot ? 1 : 0);
                if (ret)
                        ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
-                                       info->use_cts_prot, arvif->vdev_id, ret);
+                                   info->use_cts_prot, arvif->vdev_id, ret);
        }
 
        if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -6268,8 +6271,8 @@ ath10k_mac_update_rx_channel(struct ath10k *ar,
        rcu_read_lock();
        if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
                ieee80211_iter_chan_contexts_atomic(ar->hw,
-                                       ath10k_mac_get_any_chandef_iter,
-                                       &def);
+                                                   ath10k_mac_get_any_chandef_iter,
+                                                   &def);
 
                if (vifs)
                        def = &vifs[0].new_ctx->def;
@@ -7301,7 +7304,7 @@ int ath10k_mac_register(struct ath10k *ar)
                            ath10k_reg_notifier);
        if (ret) {
                ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
-               goto err_free;
+               goto err_dfs_detector_exit;
        }
 
        ar->hw->wiphy->cipher_suites = cipher_suites;
@@ -7310,7 +7313,7 @@ int ath10k_mac_register(struct ath10k *ar)
        ret = ieee80211_register_hw(ar->hw);
        if (ret) {
                ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
-               goto err_free;
+               goto err_dfs_detector_exit;
        }
 
        if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
@@ -7324,10 +7327,16 @@ int ath10k_mac_register(struct ath10k *ar)
 
 err_unregister:
        ieee80211_unregister_hw(ar->hw);
+
+err_dfs_detector_exit:
+       if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+               ar->dfs_detector->exit(ar->dfs_detector);
+
 err_free:
        kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
        kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
 
+       SET_IEEE80211_DEV(ar->hw, NULL);
        return ret;
 }
 
index 110fcad609b900f9abe32aec3f6e0ab8b8df6e63..5c05b0cf54a1e502c1ca32412541bcb1c3002c7e 100644 (file)
@@ -104,6 +104,10 @@ static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
                               struct ath10k_ce_pipe *rx_pipe,
                               struct bmi_xfer *xfer);
 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
+static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
 
 static const struct ce_attr host_ce_config_wlan[] = {
        /* CE0: host->target HTC control and raw streams */
@@ -112,6 +116,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
                .src_nentries = 16,
                .src_sz_max = 256,
                .dest_nentries = 0,
+               .send_cb = ath10k_pci_htc_tx_cb,
        },
 
        /* CE1: target->host HTT + HTC control */
@@ -120,6 +125,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
                .src_nentries = 0,
                .src_sz_max = 2048,
                .dest_nentries = 512,
+               .recv_cb = ath10k_pci_htc_rx_cb,
        },
 
        /* CE2: target->host WMI */
@@ -128,6 +134,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
                .src_nentries = 0,
                .src_sz_max = 2048,
                .dest_nentries = 128,
+               .recv_cb = ath10k_pci_htc_rx_cb,
        },
 
        /* CE3: host->target WMI */
@@ -136,6 +143,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
                .src_nentries = 32,
                .src_sz_max = 2048,
                .dest_nentries = 0,
+               .send_cb = ath10k_pci_htc_tx_cb,
        },
 
        /* CE4: host->target HTT */
@@ -144,14 +152,16 @@ static const struct ce_attr host_ce_config_wlan[] = {
                .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
                .src_sz_max = 256,
                .dest_nentries = 0,
+               .send_cb = ath10k_pci_htt_tx_cb,
        },
 
-       /* CE5: unused */
+       /* CE5: target->host HTT (HIF->HTT) */
        {
                .flags = CE_ATTR_FLAGS,
                .src_nentries = 0,
-               .src_sz_max = 0,
-               .dest_nentries = 0,
+               .src_sz_max = 512,
+               .dest_nentries = 512,
+               .recv_cb = ath10k_pci_htt_rx_cb,
        },
 
        /* CE6: target autonomous hif_memcpy */
@@ -257,12 +267,12 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
 
        /* NB: 50% of src nentries, since tx has 2 frags */
 
-       /* CE5: unused */
+       /* CE5: target->host HTT (HIF->HTT) */
        {
                .pipenum = __cpu_to_le32(5),
-               .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+               .pipedir = __cpu_to_le32(PIPEDIR_IN),
                .nentries = __cpu_to_le32(32),
-               .nbytes_max = __cpu_to_le32(2048),
+               .nbytes_max = __cpu_to_le32(512),
                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
                .reserved = __cpu_to_le32(0),
        },
@@ -396,7 +406,7 @@ static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
        {
                __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
-               __cpu_to_le32(1),
+               __cpu_to_le32(5),
        },
 
        /* (Additions here) */
@@ -452,8 +462,12 @@ static int ath10k_pci_wake_wait(struct ath10k *ar)
        int curr_delay = 5;
 
        while (tot_delay < PCIE_WAKE_TIMEOUT) {
-               if (ath10k_pci_is_awake(ar))
+               if (ath10k_pci_is_awake(ar)) {
+                       if (tot_delay > PCIE_WAKE_LATE_US)
+                               ath10k_warn(ar, "device wakeup took %d ms which is unusally long, otherwise it works normally.\n",
+                                           tot_delay / 1000);
                        return 0;
+               }
 
                udelay(curr_delay);
                tot_delay += curr_delay;
@@ -465,12 +479,53 @@ static int ath10k_pci_wake_wait(struct ath10k *ar)
        return -ETIMEDOUT;
 }
 
+static int ath10k_pci_force_wake(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       unsigned long flags;
+       int ret = 0;
+
+       spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+       if (!ar_pci->ps_awake) {
+               iowrite32(PCIE_SOC_WAKE_V_MASK,
+                         ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+                         PCIE_SOC_WAKE_ADDRESS);
+
+               ret = ath10k_pci_wake_wait(ar);
+               if (ret == 0)
+                       ar_pci->ps_awake = true;
+       }
+
+       spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+
+       return ret;
+}
+
+static void ath10k_pci_force_sleep(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+       iowrite32(PCIE_SOC_WAKE_RESET,
+                 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+                 PCIE_SOC_WAKE_ADDRESS);
+       ar_pci->ps_awake = false;
+
+       spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
 static int ath10k_pci_wake(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        unsigned long flags;
        int ret = 0;
 
+       if (ar_pci->pci_ps == 0)
+               return ret;
+
        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 
        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
@@ -502,6 +557,9 @@ static void ath10k_pci_sleep(struct ath10k *ar)
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        unsigned long flags;
 
+       if (ar_pci->pci_ps == 0)
+               return;
+
        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 
        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
@@ -544,6 +602,11 @@ static void ath10k_pci_sleep_sync(struct ath10k *ar)
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        unsigned long flags;
 
+       if (ar_pci->pci_ps == 0) {
+               ath10k_pci_force_sleep(ar);
+               return;
+       }
+
        del_timer_sync(&ar_pci->ps_timer);
 
        spin_lock_irqsave(&ar_pci->ps_lock, flags);
@@ -682,8 +745,6 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
        dma_addr_t paddr;
        int ret;
 
-       lockdep_assert_held(&ar_pci->ce_lock);
-
        skb = dev_alloc_skb(pipe->buf_sz);
        if (!skb)
                return -ENOMEM;
@@ -701,9 +762,10 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
 
        ATH10K_SKB_RXCB(skb)->paddr = paddr;
 
+       spin_lock_bh(&ar_pci->ce_lock);
        ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
+       spin_unlock_bh(&ar_pci->ce_lock);
        if (ret) {
-               ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
                dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
                                 DMA_FROM_DEVICE);
                dev_kfree_skb_any(skb);
@@ -713,25 +775,27 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
        return 0;
 }
 
-static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
+static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
 {
        struct ath10k *ar = pipe->hif_ce_state;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
        int ret, num;
 
-       lockdep_assert_held(&ar_pci->ce_lock);
-
        if (pipe->buf_sz == 0)
                return;
 
        if (!ce_pipe->dest_ring)
                return;
 
+       spin_lock_bh(&ar_pci->ce_lock);
        num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
+       spin_unlock_bh(&ar_pci->ce_lock);
        while (num--) {
                ret = __ath10k_pci_rx_post_buf(pipe);
                if (ret) {
+                       if (ret == -ENOSPC)
+                               break;
                        ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
                        mod_timer(&ar_pci->rx_post_retry, jiffies +
                                  ATH10K_PCI_RX_POST_RETRY_MS);
@@ -740,25 +804,13 @@ static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
        }
 }
 
-static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
-{
-       struct ath10k *ar = pipe->hif_ce_state;
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
-       spin_lock_bh(&ar_pci->ce_lock);
-       __ath10k_pci_rx_post_pipe(pipe);
-       spin_unlock_bh(&ar_pci->ce_lock);
-}
-
 static void ath10k_pci_rx_post(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int i;
 
-       spin_lock_bh(&ar_pci->ce_lock);
        for (i = 0; i < CE_COUNT; i++)
-               __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
-       spin_unlock_bh(&ar_pci->ce_lock);
+               ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
 }
 
 static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
@@ -1102,11 +1154,9 @@ static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
 }
 
 /* Called by lower (CE) layer when a send to Target completes. */
-static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
+static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
 {
        struct ath10k *ar = ce_state->ar;
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
        struct sk_buff_head list;
        struct sk_buff *skb;
        u32 ce_data;
@@ -1124,16 +1174,16 @@ static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
        }
 
        while ((skb = __skb_dequeue(&list)))
-               cb->tx_completion(ar, skb);
+               ath10k_htc_tx_completion_handler(ar, skb);
 }
 
-/* Called by lower (CE) layer when data is received from the Target. */
-static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
+static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
+                                    void (*callback)(struct ath10k *ar,
+                                                     struct sk_buff *skb))
 {
        struct ath10k *ar = ce_state->ar;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
-       struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
        struct sk_buff *skb;
        struct sk_buff_head list;
        void *transfer_context;
@@ -1168,12 +1218,56 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
                ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
                                skb->data, skb->len);
 
-               cb->rx_completion(ar, skb);
+               callback(ar, skb);
        }
 
        ath10k_pci_rx_post_pipe(pipe_info);
 }
 
+/* Called by lower (CE) layer when data is received from the Target. */
+static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+       ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+/* Called by lower (CE) layer when a send to HTT Target completes. */
+static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+       struct ath10k *ar = ce_state->ar;
+       struct sk_buff *skb;
+       u32 ce_data;
+       unsigned int nbytes;
+       unsigned int transfer_id;
+
+       while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
+                                            &nbytes, &transfer_id) == 0) {
+               /* no need to call tx completion for NULL pointers */
+               if (!skb)
+                       continue;
+
+               dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
+                                skb->len, DMA_TO_DEVICE);
+               ath10k_htt_hif_tx_complete(ar, skb);
+       }
+}
+
+static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
+{
+       skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+       ath10k_htt_t2h_msg_handler(ar, skb);
+}
+
+/* Called by lower (CE) layer when HTT data is received from the Target. */
+static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+       /* CE4 polling needs to be done whenever CE pipe which transports
+        * HTT Rx (target->host) is processed.
+        */
+       ath10k_ce_per_engine_service(ce_state->ar, 4);
+
+       ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
+}
+
 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
                                struct ath10k_hif_sg_item *items, int n_items)
 {
@@ -1343,17 +1437,6 @@ static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
        ath10k_ce_per_engine_service(ar, pipe);
 }
 
-static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
-                                        struct ath10k_hif_cb *callbacks)
-{
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
-       ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
-
-       memcpy(&ar_pci->msg_callbacks_current, callbacks,
-              sizeof(ar_pci->msg_callbacks_current));
-}
-
 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -1368,10 +1451,8 @@ static void ath10k_pci_kill_tasklet(struct ath10k *ar)
        del_timer_sync(&ar_pci->rx_post_retry);
 }
 
-static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
-                                             u16 service_id, u8 *ul_pipe,
-                                             u8 *dl_pipe, int *ul_is_polled,
-                                             int *dl_is_polled)
+static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
+                                             u8 *ul_pipe, u8 *dl_pipe)
 {
        const struct service_to_pipe *entry;
        bool ul_set = false, dl_set = false;
@@ -1379,9 +1460,6 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
 
        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
 
-       /* polling for received messages not supported */
-       *dl_is_polled = 0;
-
        for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
                entry = &target_service_to_ce_map_wlan[i];
 
@@ -1415,25 +1493,17 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
        if (WARN_ON(!ul_set || !dl_set))
                return -ENOENT;
 
-       *ul_is_polled =
-               (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
-
        return 0;
 }
 
 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
                                            u8 *ul_pipe, u8 *dl_pipe)
 {
-       int ul_is_polled, dl_is_polled;
-
        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
 
        (void)ath10k_pci_hif_map_service_to_pipe(ar,
                                                 ATH10K_HTC_SVC_ID_RSVD_CTRL,
-                                                ul_pipe,
-                                                dl_pipe,
-                                                &ul_is_polled,
-                                                &dl_is_polled);
+                                                ul_pipe, dl_pipe);
 }
 
 static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
@@ -1504,6 +1574,7 @@ static void ath10k_pci_irq_enable(struct ath10k *ar)
 static int ath10k_pci_hif_start(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
 
        ath10k_pci_irq_enable(ar);
@@ -1579,7 +1650,7 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
 
                ce_ring->per_transfer_context[i] = NULL;
 
-               ar_pci->msg_callbacks_current.tx_completion(ar, skb);
+               ath10k_htc_tx_completion_handler(ar, skb);
        }
 }
 
@@ -1999,9 +2070,7 @@ static int ath10k_pci_alloc_pipes(struct ath10k *ar)
                pipe->pipe_num = i;
                pipe->hif_ce_state = ar;
 
-               ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i],
-                                          ath10k_pci_ce_send_done,
-                                          ath10k_pci_ce_recv_data);
+               ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
                if (ret) {
                        ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
                                   i, ret);
@@ -2257,7 +2326,7 @@ static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
        ret = ath10k_pci_wait_for_target_init(ar);
        if (ret) {
                ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
-                               ret);
+                           ret);
                return ret;
        }
 
@@ -2397,6 +2466,15 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        struct pci_dev *pdev = ar_pci->pdev;
        u32 val;
+       int ret = 0;
+
+       if (ar_pci->pci_ps == 0) {
+               ret = ath10k_pci_force_wake(ar);
+               if (ret) {
+                       ath10k_err(ar, "failed to wake up target: %d\n", ret);
+                       return ret;
+               }
+       }
 
        /* Suspend/Resume resets the PCI configuration space, so we have to
         * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
@@ -2407,7 +2485,7 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
        if ((val & 0x0000ff00) != 0)
                pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
 
-       return 0;
+       return ret;
 }
 #endif
 
@@ -2421,7 +2499,6 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
        .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
        .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
        .send_complete_check    = ath10k_pci_hif_send_complete_check,
-       .set_callbacks          = ath10k_pci_hif_set_callbacks,
        .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
        .power_up               = ath10k_pci_hif_power_up,
        .power_down             = ath10k_pci_hif_power_down,
@@ -2501,6 +2578,16 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
 {
        struct ath10k *ar = arg;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+
+       if (ar_pci->pci_ps == 0) {
+               ret = ath10k_pci_force_wake(ar);
+               if (ret) {
+                       ath10k_warn(ar, "failed to wake device up on irq: %d\n",
+                                   ret);
+                       return IRQ_NONE;
+               }
+       }
 
        if (ar_pci->num_msi_intrs == 0) {
                if (!ath10k_pci_irq_pending(ar))
@@ -2900,17 +2987,21 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        struct ath10k_pci *ar_pci;
        enum ath10k_hw_rev hw_rev;
        u32 chip_id;
+       bool pci_ps;
 
        switch (pci_dev->device) {
        case QCA988X_2_0_DEVICE_ID:
                hw_rev = ATH10K_HW_QCA988X;
+               pci_ps = false;
                break;
        case QCA6164_2_1_DEVICE_ID:
        case QCA6174_2_1_DEVICE_ID:
                hw_rev = ATH10K_HW_QCA6174;
+               pci_ps = true;
                break;
        case QCA99X0_2_0_DEVICE_ID:
                hw_rev = ATH10K_HW_QCA99X0;
+               pci_ps = false;
                break;
        default:
                WARN_ON(1);
@@ -2924,19 +3015,21 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
                return -ENOMEM;
        }
 
-       ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
+                  pdev->vendor, pdev->device,
+                  pdev->subsystem_vendor, pdev->subsystem_device);
 
        ar_pci = ath10k_pci_priv(ar);
        ar_pci->pdev = pdev;
        ar_pci->dev = &pdev->dev;
        ar_pci->ar = ar;
        ar->dev_id = pci_dev->device;
+       ar_pci->pci_ps = pci_ps;
 
-       if (pdev->subsystem_vendor || pdev->subsystem_device)
-               scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
-                         "%04x:%04x:%04x:%04x",
-                         pdev->vendor, pdev->device,
-                         pdev->subsystem_vendor, pdev->subsystem_device);
+       ar->id.vendor = pdev->vendor;
+       ar->id.device = pdev->device;
+       ar->id.subsystem_vendor = pdev->subsystem_vendor;
+       ar->id.subsystem_device = pdev->subsystem_device;
 
        spin_lock_init(&ar_pci->ce_lock);
        spin_lock_init(&ar_pci->ps_lock);
@@ -2962,6 +3055,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        ath10k_pci_ce_deinit(ar);
        ath10k_pci_irq_disable(ar);
 
+       if (ar_pci->pci_ps == 0) {
+               ret = ath10k_pci_force_wake(ar);
+               if (ret) {
+                       ath10k_warn(ar, "failed to wake up device : %d\n", ret);
+                       goto err_free_pipes;
+               }
+       }
+
        ret = ath10k_pci_init_irq(ar);
        if (ret) {
                ath10k_err(ar, "failed to init irqs: %d\n", ret);
@@ -3090,13 +3191,16 @@ MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
 
 /* QCA6174 2.1 firmware files */
 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
 
 /* QCA6174 3.1 firmware files */
 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
index 8d364fb8f743eb6f8ed488c88d83b8adf25b6536..f91bf333cb75e7cfb0fb974ccf5a80217876b3d9 100644 (file)
@@ -175,8 +175,6 @@ struct ath10k_pci {
 
        struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
 
-       struct ath10k_hif_cb msg_callbacks_current;
-
        /* Copy Engine used for Diagnostic Accesses */
        struct ath10k_ce_pipe *ce_diag;
 
@@ -221,6 +219,12 @@ struct ath10k_pci {
         * powersave register state changes.
         */
        bool ps_awake;
+
+       /* pci power save, disable for QCA988X and QCA99X0.
+        * Writing 'false' to this variable avoids frequent locking
+        * on MMIO read/write.
+        */
+       bool pci_ps;
 };
 
 static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -230,7 +234,8 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
 
 #define ATH10K_PCI_RX_POST_RETRY_MS 50
 #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
-#define PCIE_WAKE_TIMEOUT 10000        /* 10ms */
+#define PCIE_WAKE_TIMEOUT 30000        /* 30ms */
+#define PCIE_WAKE_LATE_US 10000        /* 10ms */
 
 #define BAR_NUM 0
 
index 1a899d70dc5db5e6dea73280a42f06218285de77..60fe562e304110435a481f2e8fcf6181efb11688 100644 (file)
@@ -215,6 +215,6 @@ err_cooling_destroy:
 
 void ath10k_thermal_unregister(struct ath10k *ar)
 {
-       thermal_cooling_device_unregister(ar->thermal.cdev);
        sysfs_remove_link(&ar->dev->kobj, "cooling_device");
+       thermal_cooling_device_unregister(ar->thermal.cdev);
 }
index 7db7d501726b9b7605e955ba03163a83ed65188c..6d1105ab4592b1cb6434b9fb6b98702288324b5f 100644 (file)
@@ -92,11 +92,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
        skb_cb = ATH10K_SKB_CB(msdu);
        dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 
-       if (skb_cb->htt.txbuf)
-               dma_pool_free(htt->tx_pool,
-                             skb_cb->htt.txbuf,
-                             skb_cb->htt.txbuf_paddr);
-
        ath10k_report_offchan_tx(htt->ar, msdu);
 
        info = IEEE80211_SKB_CB(msdu);
index 248ffc3d6620cfb9daeb9c35ce14d3ad4f021b84..b54aa08cb25cf74de221333591ab903dfe6032d9 100644 (file)
@@ -177,6 +177,11 @@ struct wmi_ops {
                                                const struct wmi_tdls_peer_capab_arg *cap,
                                                const struct wmi_channel_arg *chan);
        struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
+       struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
+                                                  u32 param);
+       void (*fw_stats_fill)(struct ath10k *ar,
+                             struct ath10k_fw_stats *fw_stats,
+                             char *buf);
 };
 
 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -1270,4 +1275,31 @@ ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
        return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
 }
 
+static inline int
+ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
+{
+       struct sk_buff *skb;
+
+       if (!ar->wmi.ops->gen_pdev_get_tpc_config)
+               return -EOPNOTSUPP;
+
+       skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
+
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       return ath10k_wmi_cmd_send(ar, skb,
+                                  ar->wmi.cmd->pdev_get_tpc_config_cmdid);
+}
+
+static inline int
+ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
+                        char *buf)
+{
+       if (!ar->wmi.ops->fw_stats_fill)
+               return -EOPNOTSUPP;
+
+       ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
+       return 0;
+}
 #endif
index b5849b3fd2f0cd73941d490e8ae202014432e5f8..8f835480a62cf2e9e74597b55fefa2bef332c461 100644 (file)
@@ -3468,6 +3468,7 @@ static const struct wmi_ops wmi_tlv_ops = {
        .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
        .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
        .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
+       .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
 };
 
 /************/
index 87d9de2aa8c50792ea266417ebd524e93a5a119c..6e7d7a7f6a97a28724f839809e0526a8bedaa8be 100644 (file)
@@ -3018,8 +3018,6 @@ static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
                        memcpy(skb_put(bcn, arvif->u.ap.noa_len),
                               arvif->u.ap.noa_data,
                               arvif->u.ap.noa_len);
-
-       return;
 }
 
 static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
@@ -3507,7 +3505,7 @@ void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
                                                          tsf);
                        if (res < 0) {
                                ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
-                                           res);
+                                          res);
                                return;
                        }
                        break;
@@ -3835,9 +3833,258 @@ void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
        ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
 }
 
+static u8 ath10k_tpc_config_get_rate(struct ath10k *ar,
+                                    struct wmi_pdev_tpc_config_event *ev,
+                                    u32 rate_idx, u32 num_chains,
+                                    u32 rate_code, u8 type)
+{
+       u8 tpc, num_streams, preamble, ch, stm_idx;
+
+       num_streams = ATH10K_HW_NSS(rate_code);
+       preamble = ATH10K_HW_PREAMBLE(rate_code);
+       ch = num_chains - 1;
+
+       tpc = min_t(u8, ev->rates_array[rate_idx], ev->max_reg_allow_pow[ch]);
+
+       if (__le32_to_cpu(ev->num_tx_chain) <= 1)
+               goto out;
+
+       if (preamble == WMI_RATE_PREAMBLE_CCK)
+               goto out;
+
+       stm_idx = num_streams - 1;
+       if (num_chains <= num_streams)
+               goto out;
+
+       switch (type) {
+       case WMI_TPC_TABLE_TYPE_STBC:
+               tpc = min_t(u8, tpc,
+                           ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]);
+               break;
+       case WMI_TPC_TABLE_TYPE_TXBF:
+               tpc = min_t(u8, tpc,
+                           ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]);
+               break;
+       case WMI_TPC_TABLE_TYPE_CDD:
+               tpc = min_t(u8, tpc,
+                           ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]);
+               break;
+       default:
+               ath10k_warn(ar, "unknown wmi tpc table type: %d\n", type);
+               tpc = 0;
+               break;
+       }
+
+out:
+       return tpc;
+}
+
+static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
+                                         struct wmi_pdev_tpc_config_event *ev,
+                                         struct ath10k_tpc_stats *tpc_stats,
+                                         u8 *rate_code, u16 *pream_table, u8 type)
+{
+       u32 i, j, pream_idx, flags;
+       u8 tpc[WMI_TPC_TX_N_CHAIN];
+       char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
+       char buff[WMI_TPC_BUF_SIZE];
+
+       flags = __le32_to_cpu(ev->flags);
+
+       switch (type) {
+       case WMI_TPC_TABLE_TYPE_CDD:
+               if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
+                       ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
+                       tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+                       return;
+               }
+               break;
+       case WMI_TPC_TABLE_TYPE_STBC:
+               if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
+                       ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
+                       tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+                       return;
+               }
+               break;
+       case WMI_TPC_TABLE_TYPE_TXBF:
+               if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
+                       ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
+                       tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+                       return;
+               }
+               break;
+       default:
+               ath10k_dbg(ar, ATH10K_DBG_WMI,
+                          "invalid table type in wmi tpc event: %d\n", type);
+               return;
+       }
+
+       pream_idx = 0;
+       for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
+               memset(tpc_value, 0, sizeof(tpc_value));
+               memset(buff, 0, sizeof(buff));
+               if (i == pream_table[pream_idx])
+                       pream_idx++;
+
+               for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
+                       if (j >= __le32_to_cpu(ev->num_tx_chain))
+                               break;
+
+                       tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
+                                                           rate_code[i],
+                                                           type);
+                       snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
+                       strncat(tpc_value, buff, strlen(buff));
+               }
+               tpc_stats->tpc_table[type].pream_idx[i] = pream_idx;
+               tpc_stats->tpc_table[type].rate_code[i] = rate_code[i];
+               memcpy(tpc_stats->tpc_table[type].tpc_value[i],
+                      tpc_value, sizeof(tpc_value));
+       }
+}
+
 void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
 {
-       ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
+       u32 i, j, pream_idx, num_tx_chain;
+       u8 rate_code[WMI_TPC_RATE_MAX], rate_idx;
+       u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
+       struct wmi_pdev_tpc_config_event *ev;
+       struct ath10k_tpc_stats *tpc_stats;
+
+       ev = (struct wmi_pdev_tpc_config_event *)skb->data;
+
+       tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
+       if (!tpc_stats)
+               return;
+
+       /* Create the rate code table based on the chains supported */
+       rate_idx = 0;
+       pream_idx = 0;
+
+       /* Fill CCK rate code */
+       for (i = 0; i < 4; i++) {
+               rate_code[rate_idx] =
+                       ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_CCK);
+               rate_idx++;
+       }
+       pream_table[pream_idx] = rate_idx;
+       pream_idx++;
+
+       /* Fill OFDM rate code */
+       for (i = 0; i < 8; i++) {
+               rate_code[rate_idx] =
+                       ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_OFDM);
+               rate_idx++;
+       }
+       pream_table[pream_idx] = rate_idx;
+       pream_idx++;
+
+       num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+
+       /* Fill HT20 rate code */
+       for (i = 0; i < num_tx_chain; i++) {
+               for (j = 0; j < 8; j++) {
+                       rate_code[rate_idx] =
+                       ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
+                       rate_idx++;
+               }
+       }
+       pream_table[pream_idx] = rate_idx;
+       pream_idx++;
+
+       /* Fill HT40 rate code */
+       for (i = 0; i < num_tx_chain; i++) {
+               for (j = 0; j < 8; j++) {
+                       rate_code[rate_idx] =
+                       ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
+                       rate_idx++;
+               }
+       }
+       pream_table[pream_idx] = rate_idx;
+       pream_idx++;
+
+       /* Fill VHT20 rate code */
+       for (i = 0; i < __le32_to_cpu(ev->num_tx_chain); i++) {
+               for (j = 0; j < 10; j++) {
+                       rate_code[rate_idx] =
+                       ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
+                       rate_idx++;
+               }
+       }
+       pream_table[pream_idx] = rate_idx;
+       pream_idx++;
+
+       /* Fill VHT40 rate code */
+       for (i = 0; i < num_tx_chain; i++) {
+               for (j = 0; j < 10; j++) {
+                       rate_code[rate_idx] =
+                       ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
+                       rate_idx++;
+               }
+       }
+       pream_table[pream_idx] = rate_idx;
+       pream_idx++;
+
+       /* Fill VHT80 rate code */
+       for (i = 0; i < num_tx_chain; i++) {
+               for (j = 0; j < 10; j++) {
+                       rate_code[rate_idx] =
+                       ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
+                       rate_idx++;
+               }
+       }
+       pream_table[pream_idx] = rate_idx;
+       pream_idx++;
+
+       rate_code[rate_idx++] =
+               ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
+       rate_code[rate_idx++] =
+               ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
+       rate_code[rate_idx++] =
+               ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
+       rate_code[rate_idx++] =
+               ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
+       rate_code[rate_idx++] =
+               ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
+
+       pream_table[pream_idx] = ATH10K_TPC_PREAM_TABLE_END;
+
+       tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
+       tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
+       tpc_stats->ctl = __le32_to_cpu(ev->ctl);
+       tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
+       tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
+       tpc_stats->twice_antenna_reduction =
+               __le32_to_cpu(ev->twice_antenna_reduction);
+       tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
+       tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
+       tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+       tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
+
+       ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
+                                     rate_code, pream_table,
+                                     WMI_TPC_TABLE_TYPE_CDD);
+       ath10k_tpc_config_disp_tables(ar, ev,  tpc_stats,
+                                     rate_code, pream_table,
+                                     WMI_TPC_TABLE_TYPE_STBC);
+       ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
+                                     rate_code, pream_table,
+                                     WMI_TPC_TABLE_TYPE_TXBF);
+
+       ath10k_debug_tpc_stats_process(ar, tpc_stats);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI,
+                  "wmi event tpc config channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
+                  __le32_to_cpu(ev->chan_freq),
+                  __le32_to_cpu(ev->phy_mode),
+                  __le32_to_cpu(ev->ctl),
+                  __le32_to_cpu(ev->reg_domain),
+                  a_sle32_to_cpu(ev->twice_antenna_gain),
+                  __le32_to_cpu(ev->twice_antenna_reduction),
+                  __le32_to_cpu(ev->power_limit),
+                  __le32_to_cpu(ev->twice_max_rd_power) / 2,
+                  __le32_to_cpu(ev->num_tx_chain),
+                  __le32_to_cpu(ev->rate_max));
 }
 
 void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
@@ -5090,7 +5337,7 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
        config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
        config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
 
-       config.rx_decap_mode        = __cpu_to_le32(TARGET_10_4_RX_DECAP_MODE);
+       config.rx_decap_mode        = __cpu_to_le32(ar->wmi.rx_decap_mode);
        config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
        config.bmiss_offload_max_vdev =
                        __cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
@@ -6356,6 +6603,399 @@ ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
        return skb;
 }
 
+static struct sk_buff *
+ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param)
+{
+       struct wmi_pdev_get_tpc_config_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       cmd = (struct wmi_pdev_get_tpc_config_cmd *)skb->data;
+       cmd->param = __cpu_to_le32(param);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI,
+                  "wmi pdev get tcp config param:%d\n", param);
+       return skb;
+}
+
+size_t ath10k_wmi_fw_stats_num_peers(struct list_head *head)
+{
+       struct ath10k_fw_stats_peer *i;
+       size_t num = 0;
+
+       list_for_each_entry(i, head, list)
+               ++num;
+
+       return num;
+}
+
+size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head)
+{
+       struct ath10k_fw_stats_vdev *i;
+       size_t num = 0;
+
+       list_for_each_entry(i, head, list)
+               ++num;
+
+       return num;
+}
+
+static void
+ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+                                  char *buf, u32 *length)
+{
+       u32 len = *length;
+       u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n",
+                       "ath10k PDEV stats");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+                       "=================");
+
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                       "Channel noise floor", pdev->ch_noise_floor);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                       "Channel TX power", pdev->chan_tx_power);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                       "TX frame count", pdev->tx_frame_count);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                       "RX frame count", pdev->rx_frame_count);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                       "RX clear count", pdev->rx_clear_count);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                       "Cycle count", pdev->cycle_count);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                       "PHY error count", pdev->phy_err_count);
+
+       *length = len;
+}
+
+static void
+ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+                                   char *buf, u32 *length)
+{
+       u32 len = *length;
+       u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                       "RTS bad count", pdev->rts_bad);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                       "RTS good count", pdev->rts_good);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                       "FCS bad count", pdev->fcs_bad);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                       "No beacon count", pdev->no_beacons);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                       "MIB int count", pdev->mib_int_count);
+
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       *length = len;
+}
+
+static void
+ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+                                char *buf, u32 *length)
+{
+       u32 len = *length;
+       u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+       len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+                        "ath10k PDEV TX stats");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+                                "=================");
+
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "HTT cookies queued", pdev->comp_queued);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "HTT cookies disp.", pdev->comp_delivered);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MSDU queued", pdev->msdu_enqued);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MPDU queued", pdev->mpdu_enqued);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MSDUs dropped", pdev->wmm_drop);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Local enqued", pdev->local_enqued);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Local freed", pdev->local_freed);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "HW queued", pdev->hw_queued);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "PPDUs reaped", pdev->hw_reaped);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Num underruns", pdev->underrun);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "PPDUs cleaned", pdev->tx_abort);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MPDUs requed", pdev->mpdus_requed);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Excessive retries", pdev->tx_ko);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "HW rate", pdev->data_rc);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Sched self tiggers", pdev->self_triggers);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Dropped due to SW retries",
+                        pdev->sw_retry_failure);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Illegal rate phy errors",
+                        pdev->illgl_rate_phy_err);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Pdev continuous xretry", pdev->pdev_cont_xretry);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "TX timeout", pdev->pdev_tx_timeout);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "PDEV resets", pdev->pdev_resets);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "PHY underrun", pdev->phy_underrun);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MPDU is more than txop limit", pdev->txop_ovf);
+       *length = len;
+}
+
+static void
+ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+                                char *buf, u32 *length)
+{
+       u32 len = *length;
+       u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+       len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+                        "ath10k PDEV RX stats");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+                                "=================");
+
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Mid PPDU route change",
+                        pdev->mid_ppdu_route_change);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Tot. number of statuses", pdev->status_rcvd);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Extra frags on rings 0", pdev->r0_frags);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Extra frags on rings 1", pdev->r1_frags);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Extra frags on rings 2", pdev->r2_frags);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Extra frags on rings 3", pdev->r3_frags);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MSDUs delivered to HTT", pdev->htt_msdus);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MPDUs delivered to HTT", pdev->htt_mpdus);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MSDUs delivered to stack", pdev->loc_msdus);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MPDUs delivered to stack", pdev->loc_mpdus);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Oversized AMSUs", pdev->oversize_amsdu);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "PHY errors", pdev->phy_errs);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "PHY errors drops", pdev->phy_err_drop);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+       *length = len;
+}
+
+static void
+ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev *vdev,
+                             char *buf, u32 *length)
+{
+       u32 len = *length;
+       u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+       int i;
+
+       len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                       "vdev id", vdev->vdev_id);
+       len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                       "beacon snr", vdev->beacon_snr);
+       len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                       "data snr", vdev->data_snr);
+       len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                       "num rx frames", vdev->num_rx_frames);
+       len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                       "num rts fail", vdev->num_rts_fail);
+       len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                       "num rts success", vdev->num_rts_success);
+       len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                       "num rx err", vdev->num_rx_err);
+       len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                       "num rx discard", vdev->num_rx_discard);
+       len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                       "num tx not acked", vdev->num_tx_not_acked);
+
+       for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
+               len += scnprintf(buf + len, buf_len - len,
+                               "%25s [%02d] %u\n",
+                               "num tx frames", i,
+                               vdev->num_tx_frames[i]);
+
+       for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
+               len += scnprintf(buf + len, buf_len - len,
+                               "%25s [%02d] %u\n",
+                               "num tx frames retries", i,
+                               vdev->num_tx_frames_retries[i]);
+
+       for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
+               len += scnprintf(buf + len, buf_len - len,
+                               "%25s [%02d] %u\n",
+                               "num tx frames failures", i,
+                               vdev->num_tx_frames_failures[i]);
+
+       for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
+               len += scnprintf(buf + len, buf_len - len,
+                               "%25s [%02d] 0x%08x\n",
+                               "tx rate history", i,
+                               vdev->tx_rate_history[i]);
+
+       for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
+               len += scnprintf(buf + len, buf_len - len,
+                               "%25s [%02d] %u\n",
+                               "beacon rssi history", i,
+                               vdev->beacon_rssi_history[i]);
+
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       *length = len;
+}
+
+static void
+ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
+                             char *buf, u32 *length)
+{
+       u32 len = *length;
+       u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+       len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+                       "Peer MAC address", peer->peer_macaddr);
+       len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                       "Peer RSSI", peer->peer_rssi);
+       len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                       "Peer TX rate", peer->peer_tx_rate);
+       len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                       "Peer RX rate", peer->peer_rx_rate);
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       *length = len;
+}
+
+void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
+                                     struct ath10k_fw_stats *fw_stats,
+                                     char *buf)
+{
+       u32 len = 0;
+       u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+       const struct ath10k_fw_stats_pdev *pdev;
+       const struct ath10k_fw_stats_vdev *vdev;
+       const struct ath10k_fw_stats_peer *peer;
+       size_t num_peers;
+       size_t num_vdevs;
+
+       spin_lock_bh(&ar->data_lock);
+
+       pdev = list_first_entry_or_null(&fw_stats->pdevs,
+                                       struct ath10k_fw_stats_pdev, list);
+       if (!pdev) {
+               ath10k_warn(ar, "failed to get pdev stats\n");
+               goto unlock;
+       }
+
+       num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
+       num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
+
+       ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+       ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+       ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+                        "ath10k VDEV stats", num_vdevs);
+       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+                                "=================");
+
+       list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+               ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
+       }
+
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+                        "ath10k PEER stats", num_peers);
+       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+                                "=================");
+
+       list_for_each_entry(peer, &fw_stats->peers, list) {
+               ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
+       }
+
+unlock:
+       spin_unlock_bh(&ar->data_lock);
+
+       if (len >= buf_len)
+               buf[len - 1] = 0;
+       else
+               buf[len] = 0;
+}
+
+void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
+                                    struct ath10k_fw_stats *fw_stats,
+                                    char *buf)
+{
+       unsigned int len = 0;
+       unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
+       const struct ath10k_fw_stats_pdev *pdev;
+       const struct ath10k_fw_stats_vdev *vdev;
+       const struct ath10k_fw_stats_peer *peer;
+       size_t num_peers;
+       size_t num_vdevs;
+
+       spin_lock_bh(&ar->data_lock);
+
+       pdev = list_first_entry_or_null(&fw_stats->pdevs,
+                                       struct ath10k_fw_stats_pdev, list);
+       if (!pdev) {
+               ath10k_warn(ar, "failed to get pdev stats\n");
+               goto unlock;
+       }
+
+       num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
+       num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
+
+       ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+       ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
+       ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+       ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+                        "ath10k VDEV stats", num_vdevs);
+       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+                                "=================");
+
+       list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+               ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
+       }
+
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+                        "ath10k PEER stats", num_peers);
+       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+                                "=================");
+
+       list_for_each_entry(peer, &fw_stats->peers, list) {
+               ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
+       }
+
+unlock:
+       spin_unlock_bh(&ar->data_lock);
+
+       if (len >= buf_len)
+               buf[len - 1] = 0;
+       else
+               buf[len] = 0;
+}
+
 static const struct wmi_ops wmi_ops = {
        .rx = ath10k_wmi_op_rx,
        .map_svc = wmi_main_svc_map,
@@ -6414,6 +7054,7 @@ static const struct wmi_ops wmi_ops = {
        .gen_addba_send = ath10k_wmi_op_gen_addba_send,
        .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
        .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+       .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
        /* .gen_bcn_tmpl not implemented */
        /* .gen_prb_tmpl not implemented */
        /* .gen_p2p_go_bcn_ie not implemented */
@@ -6479,6 +7120,7 @@ static const struct wmi_ops wmi_10_1_ops = {
        .gen_addba_send = ath10k_wmi_op_gen_addba_send,
        .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
        .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+       .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
        /* .gen_bcn_tmpl not implemented */
        /* .gen_prb_tmpl not implemented */
        /* .gen_p2p_go_bcn_ie not implemented */
@@ -6545,6 +7187,7 @@ static const struct wmi_ops wmi_10_2_ops = {
        .gen_addba_send = ath10k_wmi_op_gen_addba_send,
        .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
        .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+       .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
 };
 
 static const struct wmi_ops wmi_10_2_4_ops = {
@@ -6606,6 +7249,8 @@ static const struct wmi_ops wmi_10_2_4_ops = {
        .gen_addba_send = ath10k_wmi_op_gen_addba_send,
        .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
        .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+       .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
+       .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
        /* .gen_bcn_tmpl not implemented */
        /* .gen_prb_tmpl not implemented */
        /* .gen_p2p_go_bcn_ie not implemented */
index 3e5a1591f772e98981ba823ccbee33ef5ab69a25..6e84d1c1a6ca5e12d8cb7d9df7144c3e8c62b122 100644 (file)
@@ -73,6 +73,25 @@ struct wmi_cmd_hdr {
 #define HTC_PROTOCOL_VERSION    0x0002
 #define WMI_PROTOCOL_VERSION    0x0002
 
+/*
+ * There is no signed version of __le32, so for a temporary solution come
+ * up with our own version. The idea is from fs/ntfs/types.h.
+ *
+ * Use a_ prefix so that it doesn't conflict if we get proper support to
+ * linux/types.h.
+ */
+typedef __s32 __bitwise a_sle32;
+
+static inline a_sle32 a_cpu_to_sle32(s32 val)
+{
+       return (__force a_sle32)cpu_to_le32(val);
+}
+
+static inline s32 a_sle32_to_cpu(a_sle32 val)
+{
+       return le32_to_cpu((__force __le32)val);
+}
+
 enum wmi_service {
        WMI_SERVICE_BEACON_OFFLOAD = 0,
        WMI_SERVICE_SCAN_OFFLOAD,
@@ -3642,8 +3661,18 @@ struct wmi_pdev_get_tpc_config_cmd {
        __le32 param;
 } __packed;
 
+#define WMI_TPC_CONFIG_PARAM           1
 #define WMI_TPC_RATE_MAX               160
 #define WMI_TPC_TX_N_CHAIN             4
+#define WMI_TPC_PREAM_TABLE_MAX                10
+#define WMI_TPC_FLAG                   3
+#define WMI_TPC_BUF_SIZE               10
+
+enum wmi_tpc_table_type {
+       WMI_TPC_TABLE_TYPE_CDD = 0,
+       WMI_TPC_TABLE_TYPE_STBC = 1,
+       WMI_TPC_TABLE_TYPE_TXBF = 2,
+};
 
 enum wmi_tpc_config_event_flag {
        WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD     = 0x1,
@@ -3657,7 +3686,7 @@ struct wmi_pdev_tpc_config_event {
        __le32 phy_mode;
        __le32 twice_antenna_reduction;
        __le32 twice_max_rd_power;
-       s32 twice_antenna_gain;
+       a_sle32 twice_antenna_gain;
        __le32 power_limit;
        __le32 rate_max;
        __le32 num_tx_chain;
@@ -4253,6 +4282,11 @@ enum wmi_rate_preamble {
        WMI_RATE_PREAMBLE_VHT,
 };
 
+#define ATH10K_HW_NSS(rate)            (1 + (((rate) >> 4) & 0x3))
+#define ATH10K_HW_PREAMBLE(rate)       (((rate) >> 6) & 0x3)
+#define ATH10K_HW_RATECODE(rate, nss, preamble)        \
+       (((preamble) << 6) | ((nss) << 4) | (rate))
+
 /* Value to disable fixed rate setting */
 #define WMI_FIXED_RATE_NONE    (0xff)
 
@@ -6064,6 +6098,7 @@ struct ath10k;
 struct ath10k_vif;
 struct ath10k_fw_stats_pdev;
 struct ath10k_fw_stats_peer;
+struct ath10k_fw_stats;
 
 int ath10k_wmi_attach(struct ath10k *ar);
 void ath10k_wmi_detach(struct ath10k *ar);
@@ -6145,4 +6180,13 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, const void *phyerr_buf,
                                 int left_len, struct wmi_phyerr_ev_arg *arg);
+void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
+                                     struct ath10k_fw_stats *fw_stats,
+                                     char *buf);
+void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
+                                    struct ath10k_fw_stats *fw_stats,
+                                    char *buf);
+size_t ath10k_wmi_fw_stats_num_peers(struct list_head *head);
+size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head);
+
 #endif /* _WMI_H_ */
index 3fda750db2a90d571fb1bb506ea543e5cc160e2f..76b682e566ab217f842648cbd3e4e0177d3ef1da 100644 (file)
@@ -2217,7 +2217,7 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
 
        /* enter / leave wow suspend on first vif always */
        first_vif = ath6kl_vif_first(ar);
-       if (WARN_ON(unlikely(!first_vif)) ||
+       if (WARN_ON(!first_vif) ||
            !ath6kl_cfg80211_ready(first_vif))
                return -EIO;
 
@@ -2297,7 +2297,7 @@ static int ath6kl_wow_resume(struct ath6kl *ar)
        int ret;
 
        vif = ath6kl_vif_first(ar);
-       if (WARN_ON(unlikely(!vif)) ||
+       if (WARN_ON(!vif) ||
            !ath6kl_cfg80211_ready(vif))
                return -EIO;
 
index e481f14b98787e88354278d26e3c59615ff2851c..fffb65b3e6526428f1758620c219a9cbaf784e08 100644 (file)
@@ -1085,9 +1085,7 @@ static int htc_setup_tx_complete(struct htc_target *target)
        send_pkt->completion = NULL;
        ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
        status = ath6kl_htc_tx_issue(target, send_pkt);
-
-       if (send_pkt != NULL)
-               htc_reclaim_txctrl_buf(target, send_pkt);
+       htc_reclaim_txctrl_buf(target, send_pkt);
 
        return status;
 }
index 6314ae2e93e34d0523b763031230bed4943244b7..9d17a5375f6497c6e02ed368e332e7cd4fc27fe6 100644 (file)
 #define AR_PHY_CCA_MIN_GOOD_VAL_9271_2GHZ      -127
 #define AR_PHY_CCA_MAX_GOOD_VAL_9271_2GHZ      -116
 
-#define AR_PHY_CCA_NOM_VAL_9287_2GHZ           -120
+#define AR_PHY_CCA_NOM_VAL_9287_2GHZ           -112
 #define AR_PHY_CCA_MIN_GOOD_VAL_9287_2GHZ    -127
-#define AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ    -110
+#define AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ    -97
 
 #endif
index 79fd3b2dcbdef9fc117b99177cb44fe46a581ae4..8b238c15916df3281189b015a7df6a9034263161 100644 (file)
@@ -857,7 +857,7 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
                               qca956x_1p0_common_rx_gain_table);
                INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
                               qca956x_1p0_common_rx_gain_bounds);
-               INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+               INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
                               qca956x_1p0_xlna_only);
        } else if (AR_SREV_9580(ah))
                INIT_INI_ARRAY(&ah->iniModesRxGain,
@@ -942,7 +942,7 @@ static void ar9003_rx_gain_table_mode2(struct ath_hw *ah)
                               ar9462_2p1_baseband_core_mix_rxgain);
                INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
                               ar9462_2p1_baseband_postamble_mix_rxgain);
-               INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+               INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
                               ar9462_2p1_baseband_postamble_5g_xlna);
        } else if (AR_SREV_9462_20(ah)) {
                INIT_INI_ARRAY(&ah->iniModesRxGain,
@@ -951,7 +951,7 @@ static void ar9003_rx_gain_table_mode2(struct ath_hw *ah)
                               ar9462_2p0_baseband_core_mix_rxgain);
                INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
                               ar9462_2p0_baseband_postamble_mix_rxgain);
-               INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+               INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
                               ar9462_2p0_baseband_postamble_5g_xlna);
        }
 }
@@ -961,12 +961,12 @@ static void ar9003_rx_gain_table_mode3(struct ath_hw *ah)
        if (AR_SREV_9462_21(ah)) {
                INIT_INI_ARRAY(&ah->iniModesRxGain,
                               ar9462_2p1_common_5g_xlna_only_rxgain);
-               INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+               INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
                               ar9462_2p1_baseband_postamble_5g_xlna);
        } else if (AR_SREV_9462_20(ah)) {
                INIT_INI_ARRAY(&ah->iniModesRxGain,
                               ar9462_2p0_common_5g_xlna_only_rxgain);
-               INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+               INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
                               ar9462_2p0_baseband_postamble_5g_xlna);
        }
 }
index 1ad66b76749b7cbfd6d76fd40843744512308ef8..201425e7f9cb94f031ad47d6d2366543b870d985 100644 (file)
@@ -926,19 +926,18 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
                 */
                if ((ar9003_hw_get_rx_gain_idx(ah) == 2) ||
                    (ar9003_hw_get_rx_gain_idx(ah) == 3)) {
-                       REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+                       REG_WRITE_ARRAY(&ah->ini_modes_rxgain_xlna,
                                        modesIndex, regWrites);
                }
-
-               if (AR_SREV_9561(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0))
-                       REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
-                                       modesIndex, regWrites);
        }
 
        if (AR_SREV_9550(ah) || AR_SREV_9561(ah))
                REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex,
                                regWrites);
 
+       if (AR_SREV_9561(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0))
+               REG_WRITE_ARRAY(&ah->ini_modes_rxgain_xlna,
+                               modesIndex, regWrites);
        /*
         * TXGAIN initvals.
         */
index e8454db17634b95450f643ada6f52db82a57aa84..4f0a3f6b0c52bab025b332332ef4e30f0a63b343 100644 (file)
@@ -919,7 +919,7 @@ struct ath_hw {
        struct ar5416IniArray iniCckfirJapan2484;
        struct ar5416IniArray iniModes_9271_ANI_reg;
        struct ar5416IniArray ini_radio_post_sys2ant;
-       struct ar5416IniArray ini_modes_rxgain_5g_xlna;
+       struct ar5416IniArray ini_modes_rxgain_xlna;
        struct ar5416IniArray ini_modes_rxgain_bb_core;
        struct ar5416IniArray ini_modes_rxgain_bb_postamble;
 
index 5d532c7b813fd1d356884134db2a4ebb7fb2d7f4..2e2b92ba96b8ba93203f72f3ecf98d875002b5a7 100644 (file)
@@ -881,6 +881,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
        hw->max_rate_tries = 10;
        hw->sta_data_size = sizeof(struct ath_node);
        hw->vif_data_size = sizeof(struct ath_vif);
+       hw->extra_tx_headroom = 4;
 
        hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
        hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
index 9d687121b2bfb2f753aa517035b793c33e12f9a1..2303ef96299d5bfe6eed3624a5c8e44a0630f943 100644 (file)
@@ -284,10 +284,10 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
        if (cd == NULL)
                return false;
 
-       dpd->last_pulse_ts = event->ts;
        /* reset detector on time stamp wraparound, caused by TSF reset */
        if (event->ts < dpd->last_pulse_ts)
                dpd_reset(dpd);
+       dpd->last_pulse_ts = event->ts;
 
        /* do type individual pattern matching */
        for (i = 0; i < dpd->num_radar_types; i++) {
index ce8c0381825e674c2eca80510761ae186283073f..6dfedc8bd6a3d7efb1670e34cc1594663bca0582 100644 (file)
@@ -1,5 +1,6 @@
 config WIL6210
        tristate "Wilocity 60g WiFi card wil6210 support"
+       select WANT_DEV_COREDUMP
        depends on CFG80211
        depends on PCI
        default n
index 64b432625fbbdb5ad5c33d9920065937ac2f5572..fdf63d5fe82bfcd12592b8b716bf7716459cf92b 100644 (file)
@@ -17,6 +17,7 @@ wil6210-y += pmc.o
 wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
 wil6210-y += wil_platform.o
 wil6210-y += ethtool.o
+wil6210-y += wil_crash_dump.o
 
 # for tracing framework to find trace.h
 CFLAGS_trace.o := -I$(src)
index d1a1e160ef31132f35d56ace1e7dd12a13b267e6..97bc186f9728247a0048476065d17fe0021a3573 100644 (file)
@@ -1373,6 +1373,12 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
                                }
                        }
                        spin_unlock_bh(&p->tid_rx_lock);
+                       seq_printf(s,
+                                  "Rx invalid frame: non-data %lu, short %lu, large %lu\n",
+                                  p->stats.rx_non_data_frame,
+                                  p->stats.rx_short_frame,
+                                  p->stats.rx_large_frame);
+
                        seq_puts(s, "Rx/MCS:");
                        for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs);
                             mcs++)
index a371f036d0546388c3bcfa6317e5d9e4ca6857f8..06fc46f85c858480092504210c5266b41e3aa3c8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -347,7 +347,12 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
        wil6210_mask_irq_misc(wil);
 
        if (isr & ISR_MISC_FW_ERROR) {
-               wil_err(wil, "Firmware error detected\n");
+               u32 fw_assert_code = wil_r(wil, RGF_FW_ASSERT_CODE);
+               u32 ucode_assert_code = wil_r(wil, RGF_UCODE_ASSERT_CODE);
+
+               wil_err(wil,
+                       "Firmware error detected, assert codes FW 0x%08x, UCODE 0x%08x\n",
+                       fw_assert_code, ucode_assert_code);
                clear_bit(wil_status_fwready, wil->status);
                /*
                 * do not clear @isr here - we do 2-nd part in thread
@@ -386,6 +391,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
        wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
 
        if (isr & ISR_MISC_FW_ERROR) {
+               wil_fw_core_dump(wil);
                wil_notify_fw_error(wil);
                isr &= ~ISR_MISC_FW_ERROR;
                wil_fw_error_recovery(wil);
index 2fb04c51da53f2dd4fd58f7995eee85a1a93dcd9..aade16b126c49b86338b8f90e5a231f89ec4eb09 100644 (file)
@@ -203,11 +203,13 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
         * - disconnect single STA, already disconnected
         * - disconnect all
         *
-        * For "disconnect all", there are 2 options:
+        * For "disconnect all", there are 3 options:
         * - bssid == NULL
+        * - bssid is broadcast address (ff:ff:ff:ff:ff:ff)
         * - bssid is our MAC address
         */
-       if (bssid && memcmp(ndev->dev_addr, bssid, ETH_ALEN)) {
+       if (bssid && !is_broadcast_ether_addr(bssid) &&
+           !ether_addr_equal_unaligned(ndev->dev_addr, bssid)) {
                cid = wil_find_cid(wil, bssid);
                wil_dbg_misc(wil, "Disconnect %pM, CID=%d, reason=%d\n",
                             bssid, cid, reason_code);
@@ -765,6 +767,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
        if (wil->hw_version == HW_VER_UNKNOWN)
                return -ENODEV;
 
+       set_bit(wil_status_resetting, wil->status);
+
        cancel_work_sync(&wil->disconnect_worker);
        wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
        wil_bcast_fini(wil);
@@ -851,6 +855,12 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 void wil_fw_error_recovery(struct wil6210_priv *wil)
 {
        wil_dbg_misc(wil, "starting fw error recovery\n");
+
+       if (test_bit(wil_status_resetting, wil->status)) {
+               wil_info(wil, "Reset already in progress\n");
+               return;
+       }
+
        wil->recovery_state = fw_recovery_pending;
        schedule_work(&wil->fw_error_worker);
 }
index feff1ef10fb3d757fc0f00a0fde7a4e127016918..1a3142c332e1f68d3141372a6f5c228753ccb982 100644 (file)
@@ -260,6 +260,7 @@ static const struct pci_device_id wil6210_pcie_ids[] = {
 MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
 
 #ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 
 static int wil6210_suspend(struct device *dev, bool is_runtime)
 {
@@ -307,7 +308,6 @@ static int wil6210_resume(struct device *dev, bool is_runtime)
        return rc;
 }
 
-#ifdef CONFIG_PM_SLEEP
 static int wil6210_pm_suspend(struct device *dev)
 {
        return wil6210_suspend(dev, false);
index 8a8cdc61b25ba46f44b97d6b5b698ed70ffecd60..5ca0307a3274dd7ab9cf3d31c950e856aeb343cc 100644 (file)
@@ -110,7 +110,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
         */
        for (i = 0; i < num_descriptors; i++) {
                struct vring_tx_desc *_d = &pmc->pring_va[i];
-               struct vring_tx_desc dd, *d = &dd;
+               struct vring_tx_desc dd = {}, *d = &dd;
                int j = 0;
 
                pmc->descriptors[i].va = dma_alloc_coherent(dev,
index 9238c1ac23dd0311509b6d769e00ee70ce18ec81..e3d1be82f314d32d84d06c82efe9717d1c61377a 100644 (file)
@@ -205,6 +205,32 @@ out:
        spin_unlock(&sta->tid_rx_lock);
 }
 
+/* process BAR frame, called in NAPI context */
+void wil_rx_bar(struct wil6210_priv *wil, u8 cid, u8 tid, u16 seq)
+{
+       struct wil_sta_info *sta = &wil->sta[cid];
+       struct wil_tid_ampdu_rx *r;
+
+       spin_lock(&sta->tid_rx_lock);
+
+       r = sta->tid_rx[tid];
+       if (!r) {
+               wil_err(wil, "BAR for non-existing CID %d TID %d\n", cid, tid);
+               goto out;
+       }
+       if (seq_less(seq, r->head_seq_num)) {
+               wil_err(wil, "BAR Seq 0x%03x preceding head 0x%03x\n",
+                       seq, r->head_seq_num);
+               goto out;
+       }
+       wil_dbg_txrx(wil, "BAR: CID %d TID %d Seq 0x%03x head 0x%03x\n",
+                    cid, tid, seq, r->head_seq_num);
+       wil_release_reorder_frames(wil, r, seq);
+
+out:
+       spin_unlock(&sta->tid_rx_lock);
+}
+
 struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
                                                int size, u16 ssn)
 {
index 6229110d558a1a043566091859144b582c34c6c9..0f8b6877497edff06b21346db23d06e017118429 100644 (file)
@@ -358,6 +358,13 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
        }
 }
 
+/* similar to ieee80211_ version, but FC contain only 1-st byte */
+static inline int wil_is_back_req(u8 fc)
+{
+       return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+              (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
+}
+
 /**
  * reap 1 frame from @swhead
  *
@@ -379,14 +386,16 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
        u16 dmalen;
        u8 ftype;
        int cid;
-       int i = (int)vring->swhead;
+       int i;
        struct wil_net_stats *stats;
 
        BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
 
+again:
        if (unlikely(wil_vring_is_empty(vring)))
                return NULL;
 
+       i = (int)vring->swhead;
        _d = &vring->va[i].rx;
        if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
                /* it is not error, we just reached end of Rx done area */
@@ -398,7 +407,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
        wil_vring_advance_head(vring, 1);
        if (!skb) {
                wil_err(wil, "No Rx skb at [%d]\n", i);
-               return NULL;
+               goto again;
        }
        d = wil_skb_rxdesc(skb);
        *d = *_d;
@@ -409,13 +418,17 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
 
        trace_wil6210_rx(i, d);
        wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
-       wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
+       wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
                          (const void *)d, sizeof(*d), false);
 
+       cid = wil_rxdesc_cid(d);
+       stats = &wil->sta[cid].stats;
+
        if (unlikely(dmalen > sz)) {
                wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
+               stats->rx_large_frame++;
                kfree_skb(skb);
-               return NULL;
+               goto again;
        }
        skb_trim(skb, dmalen);
 
@@ -424,8 +437,6 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
        wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
                          skb->data, skb_headlen(skb), false);
 
-       cid = wil_rxdesc_cid(d);
-       stats = &wil->sta[cid].stats;
        stats->last_mcs_rx = wil_rxdesc_mcs(d);
        if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
                stats->rx_per_mcs[stats->last_mcs_rx]++;
@@ -437,24 +448,47 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
        /* no extra checks if in sniffer mode */
        if (ndev->type != ARPHRD_ETHER)
                return skb;
-       /*
-        * Non-data frames may be delivered through Rx DMA channel (ex: BAR)
+       /* Non-data frames may be delivered through Rx DMA channel (ex: BAR)
         * Driver should recognize it by frame type, that is found
         * in Rx descriptor. If type is not data, it is 802.11 frame as is
         */
        ftype = wil_rxdesc_ftype(d) << 2;
        if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
-               wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
-               /* TODO: process it */
+               u8 fc1 = wil_rxdesc_fc1(d);
+               int mid = wil_rxdesc_mid(d);
+               int tid = wil_rxdesc_tid(d);
+               u16 seq = wil_rxdesc_seq(d);
+
+               wil_dbg_txrx(wil,
+                            "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
+                            fc1, mid, cid, tid, seq);
+               stats->rx_non_data_frame++;
+               if (wil_is_back_req(fc1)) {
+                       wil_dbg_txrx(wil,
+                                    "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
+                                    mid, cid, tid, seq);
+                       wil_rx_bar(wil, cid, tid, seq);
+               } else {
+                       /* print again all info. One can enable only this
+                        * without overhead for printing every Rx frame
+                        */
+                       wil_dbg_txrx(wil,
+                                    "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
+                                    fc1, mid, cid, tid, seq);
+                       wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
+                                         (const void *)d, sizeof(*d), false);
+                       wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+                                         skb->data, skb_headlen(skb), false);
+               }
                kfree_skb(skb);
-               return NULL;
+               goto again;
        }
 
        if (unlikely(skb->len < ETH_HLEN + snaplen)) {
                wil_err(wil, "Short frame, len = %d\n", skb->len);
-               /* TODO: process it (i.e. BAR) */
+               stats->rx_short_frame++;
                kfree_skb(skb);
-               return NULL;
+               goto again;
        }
 
        /* L4 IDENT is on when HW calculated checksum, check status
@@ -1633,7 +1667,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                goto drop;
        }
        if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
-               wil_err(wil, "FW not connected\n");
+               wil_err_ratelimited(wil, "FW not connected\n");
                goto drop;
        }
        if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {
index 82a8f9a030e7e9179db31f6be1cec985a801e8a3..ee7c7b4b9a178845e94530e0b63b3a62c4594456 100644 (file)
@@ -464,6 +464,12 @@ static inline int wil_rxdesc_subtype(struct vring_rx_desc *d)
        return WIL_GET_BITS(d->mac.d0, 12, 15);
 }
 
+/* 1-st byte (with frame type/subtype) of FC field */
+static inline u8 wil_rxdesc_fc1(struct vring_rx_desc *d)
+{
+       return (u8)(WIL_GET_BITS(d->mac.d0, 10, 15) << 2);
+}
+
 static inline int wil_rxdesc_seq(struct vring_rx_desc *d)
 {
        return WIL_GET_BITS(d->mac.d0, 16, 27);
@@ -501,6 +507,7 @@ static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb)
 
 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
 void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
+void wil_rx_bar(struct wil6210_priv *wil, u8 cid, u8 tid, u16 seq);
 struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
                                                int size, u16 ssn);
 void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
index dd4ea926b8e31eb8721a8c8e2f2a209fd7f0017b..f619bf234353262607cfccc08b3513b4bdb9e96e 100644 (file)
@@ -246,6 +246,10 @@ struct RGF_ICR {
 #define RGF_USER_JTAG_DEV_ID   (0x880b34) /* device ID */
        #define JTAG_DEV_ID_SPARROW_B0  (0x2632072f)
 
+/* crash codes for FW/Ucode stored here */
+#define RGF_FW_ASSERT_CODE             (0x91f020)
+#define RGF_UCODE_ASSERT_CODE          (0x91f028)
+
 enum {
        HW_VER_UNKNOWN,
        HW_VER_SPARROW_B0, /* JTAG_DEV_ID_SPARROW_B0 */
@@ -405,6 +409,7 @@ enum { /* for wil6210_priv.status */
        wil_status_reset_done,
        wil_status_irqen, /* FIXME: interrupts enabled - for debug */
        wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
+       wil_status_resetting, /* reset in progress */
        wil_status_last /* keep last */
 };
 
@@ -465,6 +470,9 @@ struct wil_net_stats {
        unsigned long   tx_bytes;
        unsigned long   tx_errors;
        unsigned long   rx_dropped;
+       unsigned long   rx_non_data_frame;
+       unsigned long   rx_short_frame;
+       unsigned long   rx_large_frame;
        u16 last_mcs_rx;
        u64 rx_per_mcs[WIL_MCS_MAX + 1];
 };
@@ -820,4 +828,6 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
 int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
 int wil_resume(struct wil6210_priv *wil, bool is_runtime);
 
+void wil_fw_core_dump(struct wil6210_priv *wil);
+
 #endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
new file mode 100644 (file)
index 0000000..7e70934
--- /dev/null
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wil6210.h"
+#include <linux/devcoredump.h>
+
+static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
+                                       u32 *out_dump_size, u32 *out_host_min)
+{
+       int i;
+       const struct fw_map *map;
+       u32 host_min, host_max, tmp_max;
+
+       if (!out_dump_size)
+               return -EINVAL;
+
+       /* calculate the total size of the unpacked crash dump */
+       BUILD_BUG_ON(ARRAY_SIZE(fw_mapping) == 0);
+       map = &fw_mapping[0];
+       host_min = map->host;
+       host_max = map->host + (map->to - map->from);
+
+       for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) {
+               map = &fw_mapping[i];
+
+               if (map->host < host_min)
+                       host_min = map->host;
+
+               tmp_max = map->host + (map->to - map->from);
+               if (tmp_max > host_max)
+                       host_max = tmp_max;
+       }
+
+       *out_dump_size = host_max - host_min;
+       if (out_host_min)
+               *out_host_min = host_min;
+
+       return 0;
+}
+
+static int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest,
+                                 u32 size)
+{
+       int i;
+       const struct fw_map *map;
+       void *data;
+       u32 host_min, dump_size, offset, len;
+
+       if (wil_fw_get_crash_dump_bounds(wil, &dump_size, &host_min)) {
+               wil_err(wil, "%s: fail to obtain crash dump size\n", __func__);
+               return -EINVAL;
+       }
+
+       if (dump_size > size) {
+               wil_err(wil, "%s: not enough space for dump. Need %d have %d\n",
+                       __func__, dump_size, size);
+               return -EINVAL;
+       }
+
+       /* copy to crash dump area */
+       for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
+               map = &fw_mapping[i];
+
+               data = (void * __force)wil->csr + HOSTADDR(map->host);
+               len = map->to - map->from;
+               offset = map->host - host_min;
+
+               wil_dbg_misc(wil, "%s() - dump %s, size %d, offset %d\n",
+                            __func__, fw_mapping[i].name, len, offset);
+
+               wil_memcpy_fromio_32((void * __force)(dest + offset),
+                                    (const void __iomem * __force)data, len);
+       }
+
+       return 0;
+}
+
+void wil_fw_core_dump(struct wil6210_priv *wil)
+{
+       void *fw_dump_data;
+       u32 fw_dump_size;
+
+       if (wil_fw_get_crash_dump_bounds(wil, &fw_dump_size, NULL)) {
+               wil_err(wil, "%s: fail to get fw dump size\n", __func__);
+               return;
+       }
+
+       fw_dump_data = vzalloc(fw_dump_size);
+       if (!fw_dump_data)
+               return;
+
+       if (wil_fw_copy_crash_dump(wil, fw_dump_data, fw_dump_size)) {
+               vfree(fw_dump_data);
+               return;
+       }
+       /* fw_dump_data will be free in device coredump release function
+        * after 5 min
+        */
+       dev_coredumpv(wil_to_dev(wil), fw_dump_data, fw_dump_size, GFP_KERNEL);
+       wil_info(wil, "%s: fw core dumped, size %d bytes\n", __func__,
+                fw_dump_size);
+}
index 2f35d4c51f344332461a0b8e2f4b19cc2a6986a4..61121892c6cae5cf1d03be2ecb8007bf9ccfea98 100644 (file)
@@ -1120,7 +1120,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
                        cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
                cmd.sniffer_cfg.phy_support =
                        cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
-                                   ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
+                                   ? WMI_SNIFFER_CP : WMI_SNIFFER_BOTH_PHYS);
        } else {
                /* Initialize offload (in non-sniffer mode).
                 * Linux IP stack always calculates IP checksum
index 28490702124a0da53bb2834f1ca880a452d6523b..71d3e9adbf3c02b4d5ff50843a2b4b06cd96b6a5 100644 (file)
@@ -120,6 +120,7 @@ MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if over
 #ifdef CONFIG_B43_BCMA
 static const struct bcma_device_id b43_bcma_tbl[] = {
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS),
+       BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x15, BCMA_ANY_CLASS),
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS),
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS),
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1C, BCMA_ANY_CLASS),
index fe3dc126b149f8fd014f0bd18ff407d80af00003..ab42b1fea03c170e6728f5b71fe720aa61174053 100644 (file)
@@ -82,5 +82,6 @@ config BRCM_TRACING
 config BRCMDBG
        bool "Broadcom driver debug functions"
        depends on BRCMSMAC || BRCMFMAC
+       select WANT_DEV_COREDUMP
        ---help---
          Selecting this enables additional code for debug purposes.
index 89e6a4dc105ecff9e0251d44cc576bc8e5f4facd..230cad788ace44ebe9bff08b3c92562bf0887159 100644 (file)
@@ -65,6 +65,8 @@ struct brcmf_bus_dcmd {
  * @rxctl: receive a control response message from dongle.
  * @gettxq: obtain a reference of bus transmit queue (optional).
  * @wowl_config: specify if dongle is configured for wowl when going to suspend
+ * @get_ramsize: obtain size of device memory.
+ * @get_memdump: obtain device memory dump in provided buffer.
  *
  * This structure provides an abstract interface towards the
  * bus specific driver. For control messages to common driver
@@ -79,6 +81,8 @@ struct brcmf_bus_ops {
        int (*rxctl)(struct device *dev, unsigned char *msg, uint len);
        struct pktq * (*gettxq)(struct device *dev);
        void (*wowl_config)(struct device *dev, bool enabled);
+       size_t (*get_ramsize)(struct device *dev);
+       int (*get_memdump)(struct device *dev, void *data, size_t len);
 };
 
 
@@ -185,6 +189,23 @@ void brcmf_bus_wowl_config(struct brcmf_bus *bus, bool enabled)
                bus->ops->wowl_config(bus->dev, enabled);
 }
 
+static inline size_t brcmf_bus_get_ramsize(struct brcmf_bus *bus)
+{
+       if (!bus->ops->get_ramsize)
+               return 0;
+
+       return bus->ops->get_ramsize(bus->dev);
+}
+
+static inline
+int brcmf_bus_get_memdump(struct brcmf_bus *bus, void *data, size_t len)
+{
+       if (!bus->ops->get_memdump)
+               return -EOPNOTSUPP;
+
+       return bus->ops->get_memdump(bus->dev, data, len);
+}
+
 /*
  * interface functions from common layer
  */
index 891f4ed8c5e381d52ddf89438e95c763bc6087ae..deb5f78dcacc0d9ef188360d300a7b4f1bcfba19 100644 (file)
@@ -840,7 +840,6 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
                        err = brcmf_p2p_ifchange(cfg, BRCMF_FIL_P2P_IF_GO);
                }
                if (!err) {
-                       set_bit(BRCMF_VIF_STATUS_AP_CREATING, &vif->sme_state);
                        brcmf_dbg(INFO, "IF Type = AP\n");
                }
        } else {
@@ -2432,6 +2431,9 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
        struct brcmf_sta_info_le sta_info_le;
        u32 sta_flags;
        u32 is_tdls_peer;
+       s32 total_rssi;
+       s32 count_rssi;
+       u32 i;
 
        brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
        if (!check_vif_up(ifp->vif))
@@ -2478,13 +2480,13 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
                sinfo->rx_packets += le32_to_cpu(sta_info_le.rx_mcast_pkts);
                if (sinfo->tx_packets) {
                        sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
-                       sinfo->txrate.legacy = le32_to_cpu(sta_info_le.tx_rate);
-                       sinfo->txrate.legacy /= 100;
+                       sinfo->txrate.legacy =
+                               le32_to_cpu(sta_info_le.tx_rate) / 100;
                }
                if (sinfo->rx_packets) {
                        sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
-                       sinfo->rxrate.legacy = le32_to_cpu(sta_info_le.rx_rate);
-                       sinfo->rxrate.legacy /= 100;
+                       sinfo->rxrate.legacy =
+                               le32_to_cpu(sta_info_le.rx_rate) / 100;
                }
                if (le16_to_cpu(sta_info_le.ver) >= 4) {
                        sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES);
@@ -2492,12 +2494,61 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
                        sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES);
                        sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes);
                }
+               total_rssi = 0;
+               count_rssi = 0;
+               for (i = 0; i < BRCMF_ANT_MAX; i++) {
+                       if (sta_info_le.rssi[i]) {
+                               sinfo->chain_signal_avg[count_rssi] =
+                                       sta_info_le.rssi[i];
+                               sinfo->chain_signal[count_rssi] =
+                                       sta_info_le.rssi[i];
+                               total_rssi += sta_info_le.rssi[i];
+                               count_rssi++;
+                       }
+               }
+               if (count_rssi) {
+                       sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL);
+                       sinfo->chains = count_rssi;
+
+                       sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+                       total_rssi /= count_rssi;
+                       sinfo->signal = total_rssi;
+               }
        }
 done:
        brcmf_dbg(TRACE, "Exit\n");
        return err;
 }
 
+static int
+brcmf_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *ndev,
+                           int idx, u8 *mac, struct station_info *sinfo)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct brcmf_if *ifp = netdev_priv(ndev);
+       s32 err;
+
+       brcmf_dbg(TRACE, "Enter, idx %d\n", idx);
+
+       if (idx == 0) {
+               cfg->assoclist.count = cpu_to_le32(BRCMF_MAX_ASSOCLIST);
+               err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_ASSOCLIST,
+                                            &cfg->assoclist,
+                                            sizeof(cfg->assoclist));
+               if (err) {
+                       brcmf_err("BRCMF_C_GET_ASSOCLIST unsupported, err=%d\n",
+                                 err);
+                       cfg->assoclist.count = 0;
+                       return -EOPNOTSUPP;
+               }
+       }
+       if (idx < le32_to_cpu(cfg->assoclist.count)) {
+               memcpy(mac, cfg->assoclist.mac[idx], ETH_ALEN);
+               return brcmf_cfg80211_get_station(wiphy, ndev, mac, sinfo);
+       }
+       return -ENOENT;
+}
+
 static s32
 brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
                           bool enabled, s32 timeout)
@@ -4199,8 +4250,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
 
                brcmf_dbg(TRACE, "GO mode configuration complete\n");
        }
-       clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
        set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+       brcmf_net_setcarrier(ifp, true);
 
 exit:
        if ((err) && (!mbss)) {
@@ -4264,8 +4315,8 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
        }
        brcmf_set_mpc(ifp, 1);
        brcmf_configure_arp_offload(ifp, true);
-       set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
        clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+       brcmf_net_setcarrier(ifp, false);
 
        return err;
 }
@@ -4597,6 +4648,7 @@ static struct cfg80211_ops wl_cfg80211_ops = {
        .join_ibss = brcmf_cfg80211_join_ibss,
        .leave_ibss = brcmf_cfg80211_leave_ibss,
        .get_station = brcmf_cfg80211_get_station,
+       .dump_station = brcmf_cfg80211_dump_station,
        .set_tx_power = brcmf_cfg80211_set_tx_power,
        .get_tx_power = brcmf_cfg80211_get_tx_power,
        .add_key = brcmf_cfg80211_add_key,
@@ -4974,6 +5026,7 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
                                &ifp->vif->sme_state);
                } else
                        brcmf_bss_connect_done(cfg, ndev, e, true);
+               brcmf_net_setcarrier(ifp, true);
        } else if (brcmf_is_linkdown(e)) {
                brcmf_dbg(CONN, "Linkdown\n");
                if (!brcmf_is_ibssmode(ifp->vif)) {
@@ -4983,6 +5036,7 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
                brcmf_init_prof(ndev_to_prof(ndev));
                if (ndev != cfg_to_ndev(cfg))
                        complete(&cfg->vif_disabled);
+               brcmf_net_setcarrier(ifp, false);
        } else if (brcmf_is_nonetwork(cfg, e)) {
                if (brcmf_is_ibssmode(ifp->vif))
                        clear_bit(BRCMF_VIF_STATUS_CONNECTING,
@@ -6238,6 +6292,17 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
                else
                        *cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
        }
+       /* p2p might require that "if-events" get processed by fweh. So
+        * activate the already registered event handlers now and activate
+        * the rest when initialization has completed. drvr->config needs to
+        * be assigned before activating events.
+        */
+       drvr->config = cfg;
+       err = brcmf_fweh_activate_events(ifp);
+       if (err) {
+               brcmf_err("FWEH activation failed (%d)\n", err);
+               goto wiphy_unreg_out;
+       }
 
        err = brcmf_p2p_attach(cfg, p2pdev_forced);
        if (err) {
@@ -6260,6 +6325,13 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
                                    brcmf_notify_tdls_peer_event);
        }
 
+       /* (re-) activate FWEH event handling */
+       err = brcmf_fweh_activate_events(ifp);
+       if (err) {
+               brcmf_err("FWEH activation failed (%d)\n", err);
+               goto wiphy_unreg_out;
+       }
+
        return cfg;
 
 wiphy_unreg_out:
index 3f5e5505d3291ed87e1cce4081dc691e6ffb8d09..6a878c8f883f9ee65afdf2948cfdfdcb5e2b6324 100644 (file)
@@ -143,7 +143,6 @@ struct brcmf_cfg80211_profile {
  * @BRCMF_VIF_STATUS_CONNECTING: connect/join in progress.
  * @BRCMF_VIF_STATUS_CONNECTED: connected/joined succesfully.
  * @BRCMF_VIF_STATUS_DISCONNECTING: disconnect/disable in progress.
- * @BRCMF_VIF_STATUS_AP_CREATING: interface configured for AP operation.
  * @BRCMF_VIF_STATUS_AP_CREATED: AP operation started.
  */
 enum brcmf_vif_status {
@@ -151,7 +150,6 @@ enum brcmf_vif_status {
        BRCMF_VIF_STATUS_CONNECTING,
        BRCMF_VIF_STATUS_CONNECTED,
        BRCMF_VIF_STATUS_DISCONNECTING,
-       BRCMF_VIF_STATUS_AP_CREATING,
        BRCMF_VIF_STATUS_AP_CREATED
 };
 
@@ -407,6 +405,7 @@ struct brcmf_cfg80211_info {
        struct brcmu_d11inf d11inf;
        bool wowl_enabled;
        u32 pre_wowl_pmmode;
+       struct brcmf_assoclist_le assoclist;
 };
 
 /**
index ffc3ace24903262a5a752aa686b77ee24c355142..f04833db2fd04eddeb7c33012198d4e769a2e9ef 100644 (file)
@@ -682,6 +682,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
        case BRCM_CC_43570_CHIP_ID:
        case BRCM_CC_4358_CHIP_ID:
        case BRCM_CC_43602_CHIP_ID:
+       case BRCM_CC_4371_CHIP_ID:
                return 0x180000;
        case BRCM_CC_4365_CHIP_ID:
        case BRCM_CC_4366_CHIP_ID:
index 0d39d80cee28792d99918a081a000e80ee1e0db1..21c7488b47329735a489647b306ede9240e2d0c9 100644 (file)
@@ -17,4 +17,7 @@
 
 extern const u8 ALLFFMAC[ETH_ALEN];
 
+/* Sets dongle media info (drv_version, mac address). */
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+
 #endif /* BRCMFMAC_COMMON_H */
index 8c2a280f0c9879f4ddc38b873cb4a176ebfc95ee..b5ab98ee14455aea9774814128faa8b0177a0071 100644 (file)
@@ -33,6 +33,7 @@
 #include "feature.h"
 #include "proto.h"
 #include "pcie.h"
+#include "common.h"
 
 MODULE_AUTHOR("Broadcom Corporation");
 MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
@@ -634,8 +635,7 @@ static int brcmf_netdev_stop(struct net_device *ndev)
 
        brcmf_cfg80211_down(ndev);
 
-       /* Set state and stop OS transmissions */
-       netif_stop_queue(ndev);
+       brcmf_net_setcarrier(ifp, false);
 
        return 0;
 }
@@ -669,8 +669,8 @@ static int brcmf_netdev_open(struct net_device *ndev)
                return -EIO;
        }
 
-       /* Allow transmit calls */
-       netif_start_queue(ndev);
+       /* Clear, carrier, set when connected or AP mode. */
+       netif_carrier_off(ndev);
        return 0;
 }
 
@@ -735,6 +735,24 @@ static void brcmf_net_detach(struct net_device *ndev)
                brcmf_cfg80211_free_netdev(ndev);
 }
 
+void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on)
+{
+       struct net_device *ndev;
+
+       brcmf_dbg(TRACE, "Enter, idx=%d carrier=%d\n", ifp->bssidx, on);
+
+       ndev = ifp->ndev;
+       brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_DISCONNECTED, !on);
+       if (on) {
+               if (!netif_carrier_ok(ndev))
+                       netif_carrier_on(ndev);
+
+       } else {
+               if (netif_carrier_ok(ndev))
+                       netif_carrier_off(ndev);
+       }
+}
+
 static int brcmf_net_p2p_open(struct net_device *ndev)
 {
        brcmf_dbg(TRACE, "Enter\n");
@@ -828,8 +846,8 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
        } else {
                brcmf_dbg(INFO, "allocate netdev interface\n");
                /* Allocate netdev, including space for private structure */
-               ndev = alloc_netdev(sizeof(*ifp), name, NET_NAME_UNKNOWN,
-                                   ether_setup);
+               ndev = alloc_netdev(sizeof(*ifp), is_p2pdev ? "p2p%d" : name,
+                                   NET_NAME_UNKNOWN, ether_setup);
                if (!ndev)
                        return ERR_PTR(-ENOMEM);
 
@@ -957,8 +975,8 @@ int brcmf_attach(struct device *dev)
        drvr->bus_if = dev_get_drvdata(dev);
        drvr->bus_if->drvr = drvr;
 
-       /* create device debugfs folder */
-       brcmf_debugfs_attach(drvr);
+       /* attach debug facilities */
+       brcmf_debug_attach(drvr);
 
        /* Attach and link in the protocol */
        ret = brcmf_proto_attach(drvr);
@@ -1021,12 +1039,7 @@ int brcmf_bus_start(struct device *dev)
        if (IS_ERR(ifp))
                return PTR_ERR(ifp);
 
-       if (brcmf_p2p_enable)
-               p2p_ifp = brcmf_add_if(drvr, 1, 0, false, "p2p%d", NULL);
-       else
-               p2p_ifp = NULL;
-       if (IS_ERR(p2p_ifp))
-               p2p_ifp = NULL;
+       p2p_ifp = NULL;
 
        /* signal bus ready */
        brcmf_bus_change_state(bus_if, BRCMF_BUS_UP);
@@ -1060,11 +1073,13 @@ int brcmf_bus_start(struct device *dev)
                goto fail;
        }
 
-       ret = brcmf_fweh_activate_events(ifp);
-       if (ret < 0)
-               goto fail;
-
        ret = brcmf_net_attach(ifp, false);
+
+       if ((!ret) && (brcmf_p2p_enable)) {
+               p2p_ifp = drvr->iflist[1];
+               if (p2p_ifp)
+                       ret = brcmf_net_p2p_attach(p2p_ifp);
+       }
 fail:
        if (ret < 0) {
                brcmf_err("failed: %d\n", ret);
@@ -1076,20 +1091,12 @@ fail:
                        brcmf_fws_del_interface(ifp);
                        brcmf_fws_deinit(drvr);
                }
-               if (drvr->iflist[0]) {
+               if (ifp)
                        brcmf_net_detach(ifp->ndev);
-                       drvr->iflist[0] = NULL;
-               }
-               if (p2p_ifp) {
+               if (p2p_ifp)
                        brcmf_net_detach(p2p_ifp->ndev);
-                       drvr->iflist[1] = NULL;
-               }
                return ret;
        }
-       if ((brcmf_p2p_enable) && (p2p_ifp))
-               if (brcmf_net_p2p_attach(p2p_ifp) < 0)
-                       brcmf_p2p_enable = 0;
-
        return 0;
 }
 
@@ -1155,7 +1162,7 @@ void brcmf_detach(struct device *dev)
 
        brcmf_proto_detach(drvr);
 
-       brcmf_debugfs_detach(drvr);
+       brcmf_debug_detach(drvr);
        bus_if->drvr = NULL;
        kfree(drvr);
 }
index d81ff95acab5c3df29895b3a678343ade4f27505..2f9101b2ad343e29dfd4689e3bb2e937a39426b6 100644 (file)
@@ -154,10 +154,13 @@ struct brcmf_fws_mac_descriptor;
  *     netif stopped due to firmware signalling flow control.
  * @BRCMF_NETIF_STOP_REASON_FLOW:
  *     netif stopped due to flowring full.
+ * @BRCMF_NETIF_STOP_REASON_DISCONNECTED:
+ *     netif stopped due to not being connected (STA mode).
  */
 enum brcmf_netif_stop_reason {
-       BRCMF_NETIF_STOP_REASON_FWS_FC = 1,
-       BRCMF_NETIF_STOP_REASON_FLOW = 2
+       BRCMF_NETIF_STOP_REASON_FWS_FC = BIT(0),
+       BRCMF_NETIF_STOP_REASON_FLOW = BIT(1),
+       BRCMF_NETIF_STOP_REASON_DISCONNECTED = BIT(2)
 };
 
 /**
@@ -213,8 +216,6 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
                          enum brcmf_netif_stop_reason reason, bool state);
 void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
 void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
-
-/* Sets dongle media info (drv_version, mac address). */
-int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
 
 #endif /* BRCMFMAC_CORE_H */
index 2d6d0055385877087a467dfa1ab265538fdb7102..1299dccc78b4da22293677ec4cf0a1f763af2a19 100644 (file)
 #include <linux/debugfs.h>
 #include <linux/netdevice.h>
 #include <linux/module.h>
+#include <linux/devcoredump.h>
 
 #include <brcmu_wifi.h>
 #include <brcmu_utils.h>
 #include "core.h"
 #include "bus.h"
+#include "fweh.h"
 #include "debug.h"
 
 static struct dentry *root_folder;
 
+static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
+                                     size_t len)
+{
+       void *dump;
+       size_t ramsize;
+
+       ramsize = brcmf_bus_get_ramsize(bus);
+       if (ramsize) {
+               dump = vzalloc(len + ramsize);
+               if (!dump)
+                       return -ENOMEM;
+               memcpy(dump, data, len);
+               brcmf_bus_get_memdump(bus, dump + len, ramsize);
+               dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
+       }
+       return 0;
+}
+
+static int brcmf_debug_psm_watchdog_notify(struct brcmf_if *ifp,
+                                          const struct brcmf_event_msg *evtmsg,
+                                          void *data)
+{
+       brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx);
+
+       return brcmf_debug_create_memdump(ifp->drvr->bus_if, data,
+                                         evtmsg->datalen);
+}
+
 void brcmf_debugfs_init(void)
 {
        root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL);
@@ -41,7 +71,7 @@ void brcmf_debugfs_exit(void)
        root_folder = NULL;
 }
 
-int brcmf_debugfs_attach(struct brcmf_pub *drvr)
+int brcmf_debug_attach(struct brcmf_pub *drvr)
 {
        struct device *dev = drvr->bus_if->dev;
 
@@ -49,12 +79,18 @@ int brcmf_debugfs_attach(struct brcmf_pub *drvr)
                return -ENODEV;
 
        drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder);
+       if (IS_ERR(drvr->dbgfs_dir))
+               return PTR_ERR(drvr->dbgfs_dir);
 
-       return PTR_ERR_OR_ZERO(drvr->dbgfs_dir);
+
+       return brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG,
+                                  brcmf_debug_psm_watchdog_notify);
 }
 
-void brcmf_debugfs_detach(struct brcmf_pub *drvr)
+void brcmf_debug_detach(struct brcmf_pub *drvr)
 {
+       brcmf_fweh_unregister(drvr, BRCMF_E_PSM_WATCHDOG);
+
        if (!IS_ERR_OR_NULL(drvr->dbgfs_dir))
                debugfs_remove_recursive(drvr->dbgfs_dir);
 }
index 48648ca44ba8e4d017a6c93f3af4a43b802490a3..d0d9676f7f9de952692f40523e97409c104a8e91 100644 (file)
@@ -109,8 +109,8 @@ struct brcmf_pub;
 #ifdef DEBUG
 void brcmf_debugfs_init(void);
 void brcmf_debugfs_exit(void);
-int brcmf_debugfs_attach(struct brcmf_pub *drvr);
-void brcmf_debugfs_detach(struct brcmf_pub *drvr);
+int brcmf_debug_attach(struct brcmf_pub *drvr);
+void brcmf_debug_detach(struct brcmf_pub *drvr);
 struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
 int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
                            int (*read_fn)(struct seq_file *seq, void *data));
@@ -121,11 +121,11 @@ static inline void brcmf_debugfs_init(void)
 static inline void brcmf_debugfs_exit(void)
 {
 }
-static inline int brcmf_debugfs_attach(struct brcmf_pub *drvr)
+static inline int brcmf_debug_attach(struct brcmf_pub *drvr)
 {
        return 0;
 }
-static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr)
+static inline void brcmf_debug_detach(struct brcmf_pub *drvr)
 {
 }
 static inline
index 971920f77b68eb1bcc02043d7a19f14c5a9e68b2..4248f3c80e78e10016805cf4caed296b801935e2 100644 (file)
@@ -29,7 +29,7 @@
 #define BRCMF_FW_NVRAM_PCIEDEV_LEN             10      /* pcie/1/4/ + \0 */
 
 char brcmf_firmware_path[BRCMF_FW_PATH_LEN];
-module_param_string(firmware_path, brcmf_firmware_path,
+module_param_string(alternative_fw_path, brcmf_firmware_path,
                    BRCMF_FW_PATH_LEN, 0440);
 
 enum nvram_parser_state {
index 383d6faf426b095f67719267d615bbcd9566e1da..3878b6f6cfce4ffeb6b583059d1c3b01f0e29c7e 100644 (file)
@@ -213,7 +213,8 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
                                   is_p2pdev, emsg->ifname, emsg->addr);
                if (IS_ERR(ifp))
                        return;
-               brcmf_fws_add_interface(ifp);
+               if (!is_p2pdev)
+                       brcmf_fws_add_interface(ifp);
                if (!drvr->fweh.evt_handler[BRCMF_E_IF])
                        if (brcmf_net_attach(ifp, false) < 0)
                                return;
index 5434dcf64f7d8012206975ed743fe96145b924fb..b20fc0f82a4828a5111c8b4061af2928f56fc595 100644 (file)
@@ -72,6 +72,7 @@
 #define BRCMF_C_GET_BSS_INFO                   136
 #define BRCMF_C_GET_BANDLIST                   140
 #define BRCMF_C_SET_SCB_TIMEOUT                        158
+#define BRCMF_C_GET_ASSOCLIST                  159
 #define BRCMF_C_GET_PHYLIST                    180
 #define BRCMF_C_SET_SCAN_CHANNEL_TIME          185
 #define BRCMF_C_SET_SCAN_UNASSOC_TIME          187
index 297911f38fa0458e27ac1cfdd589dc01c4ef93b9..daa427b46712e8243be5365fca928183e2745fe3 100644 (file)
 #define BRCMF_COUNTRY_BUF_SZ           4
 #define BRCMF_ANT_MAX                  4
 
+#define BRCMF_MAX_ASSOCLIST            128
+
 /* join preference types for join_pref iovar */
 enum brcmf_join_pref_types {
        BRCMF_JOIN_PREF_RSSI = 1,
@@ -621,4 +623,15 @@ struct brcmf_rev_info_le {
        __le32 nvramrev;
 };
 
+/**
+ * struct brcmf_assoclist_le - request assoc list.
+ *
+ * @count: indicates number of stations.
+ * @mac: MAC addresses of stations.
+ */
+struct brcmf_assoclist_le {
+       __le32 count;
+       u8 mac[BRCMF_MAX_ASSOCLIST][ETH_ALEN];
+};
+
 #endif /* FWIL_TYPES_H_ */
index 7eff9de6885bb2be078686236fdc118222fe2e48..44e618f9d89046b9350ce6fbc4eabb16f392458e 100644 (file)
@@ -873,9 +873,6 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
        commonring = msgbuf->flowrings[flowid];
        atomic_dec(&commonring->outstanding_tx);
 
-       /* Hante: i believe this was a bug as tx_status->msg.ifidx was used
-        * in brcmf_txfinalize as index in drvr->iflist. Can you confirm/deny?
-        */
        brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx),
                         skb, true);
 }
index 37a8c352e077333198bf2e7e0428eaf8acf9574e..d224b3dd72edea583d5a952913c61915fe60b253 100644 (file)
@@ -2353,83 +2353,30 @@ void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev)
  * brcmf_p2p_attach() - attach for P2P.
  *
  * @cfg: driver private data for cfg80211 interface.
+ * @p2pdev_forced: create p2p device interface at attach.
  */
 s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced)
 {
-       struct brcmf_if *pri_ifp;
-       struct brcmf_if *p2p_ifp;
-       struct brcmf_cfg80211_vif *p2p_vif;
        struct brcmf_p2p_info *p2p;
-       struct brcmf_pub *drvr;
-       s32 bssidx;
+       struct brcmf_if *pri_ifp;
        s32 err = 0;
+       void *err_ptr;
 
        p2p = &cfg->p2p;
        p2p->cfg = cfg;
 
-       drvr = cfg->pub;
-
-       pri_ifp = brcmf_get_ifp(drvr, 0);
+       pri_ifp = brcmf_get_ifp(cfg->pub, 0);
        p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif;
 
        if (p2pdev_forced) {
-               p2p_ifp = drvr->iflist[1];
+               err_ptr = brcmf_p2p_create_p2pdev(p2p, NULL, NULL);
+               if (IS_ERR(err_ptr)) {
+                       brcmf_err("P2P device creation failed.\n");
+                       err = PTR_ERR(err_ptr);
+               }
        } else {
-               p2p_ifp = NULL;
                p2p->p2pdev_dynamically = true;
        }
-       if (p2p_ifp) {
-               p2p_vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_P2P_DEVICE,
-                                         false);
-               if (IS_ERR(p2p_vif)) {
-                       brcmf_err("could not create discovery vif\n");
-                       err = -ENOMEM;
-                       goto exit;
-               }
-
-               p2p_vif->ifp = p2p_ifp;
-               p2p_ifp->vif = p2p_vif;
-               p2p_vif->wdev.netdev = p2p_ifp->ndev;
-               p2p_ifp->ndev->ieee80211_ptr = &p2p_vif->wdev;
-               SET_NETDEV_DEV(p2p_ifp->ndev, wiphy_dev(cfg->wiphy));
-
-               p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = p2p_vif;
-
-               brcmf_p2p_generate_bss_mac(p2p, NULL);
-               memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN);
-               brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr);
-
-               brcmf_fweh_p2pdev_setup(pri_ifp, true);
-
-               /* Initialize P2P Discovery in the firmware */
-               err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
-               if (err < 0) {
-                       brcmf_err("set p2p_disc error\n");
-                       brcmf_free_vif(p2p_vif);
-                       goto exit;
-               }
-               /* obtain bsscfg index for P2P discovery */
-               err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx);
-               if (err < 0) {
-                       brcmf_err("retrieving discover bsscfg index failed\n");
-                       brcmf_free_vif(p2p_vif);
-                       goto exit;
-               }
-               /* Verify that firmware uses same bssidx as driver !! */
-               if (p2p_ifp->bssidx != bssidx) {
-                       brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n",
-                                 bssidx, p2p_ifp->bssidx);
-                       brcmf_free_vif(p2p_vif);
-                       goto exit;
-               }
-
-               init_completion(&p2p->send_af_done);
-               INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);
-               init_completion(&p2p->afx_hdl.act_frm_scan);
-               init_completion(&p2p->wait_next_af);
-exit:
-               brcmf_fweh_p2pdev_setup(pri_ifp, false);
-       }
        return err;
 }
 
index 30baf352e23436ee43c14b7cd3f5cb9ab5026c52..83d804221715f5a717597cbbe6b2624b4f9a7fbb 100644 (file)
@@ -59,6 +59,8 @@ enum brcmf_pcie_state {
 #define BRCMF_PCIE_4365_NVRAM_NAME             "brcm/brcmfmac4365b-pcie.txt"
 #define BRCMF_PCIE_4366_FW_NAME                        "brcm/brcmfmac4366b-pcie.bin"
 #define BRCMF_PCIE_4366_NVRAM_NAME             "brcm/brcmfmac4366b-pcie.txt"
+#define BRCMF_PCIE_4371_FW_NAME                        "brcm/brcmfmac4371-pcie.bin"
+#define BRCMF_PCIE_4371_NVRAM_NAME             "brcm/brcmfmac4371-pcie.txt"
 
 #define BRCMF_PCIE_FW_UP_TIMEOUT               2000 /* msec */
 
@@ -212,6 +214,8 @@ MODULE_FIRMWARE(BRCMF_PCIE_4365_FW_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_4365_NVRAM_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_4366_FW_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_4366_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4371_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4371_NVRAM_NAME);
 
 
 struct brcmf_pcie_console {
@@ -448,6 +452,47 @@ brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
 }
 
 
+static void
+brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+                         void *dstaddr, u32 len)
+{
+       void __iomem *address = devinfo->tcm + mem_offset;
+       __le32 *dst32;
+       __le16 *dst16;
+       u8 *dst8;
+
+       if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) {
+               if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) {
+                       dst8 = (u8 *)dstaddr;
+                       while (len) {
+                               *dst8 = ioread8(address);
+                               address++;
+                               dst8++;
+                               len--;
+                       }
+               } else {
+                       len = len / 2;
+                       dst16 = (__le16 *)dstaddr;
+                       while (len) {
+                               *dst16 = cpu_to_le16(ioread16(address));
+                               address += 2;
+                               dst16++;
+                               len--;
+                       }
+               }
+       } else {
+               len = len / 4;
+               dst32 = (__le32 *)dstaddr;
+               while (len) {
+                       *dst32 = cpu_to_le32(ioread32(address));
+                       address += 4;
+                       dst32++;
+                       len--;
+               }
+       }
+}
+
+
 #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
                CHIPCREGOFFS(reg), value)
 
@@ -1352,12 +1397,36 @@ static void brcmf_pcie_wowl_config(struct device *dev, bool enabled)
 }
 
 
+static size_t brcmf_pcie_get_ramsize(struct device *dev)
+{
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+       struct brcmf_pciedev_info *devinfo = buspub->devinfo;
+
+       return devinfo->ci->ramsize - devinfo->ci->srsize;
+}
+
+
+static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
+{
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+       struct brcmf_pciedev_info *devinfo = buspub->devinfo;
+
+       brcmf_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len);
+       brcmf_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len);
+       return 0;
+}
+
+
 static struct brcmf_bus_ops brcmf_pcie_bus_ops = {
        .txdata = brcmf_pcie_tx,
        .stop = brcmf_pcie_down,
        .txctl = brcmf_pcie_tx_ctlpkt,
        .rxctl = brcmf_pcie_rx_ctlpkt,
        .wowl_config = brcmf_pcie_wowl_config,
+       .get_ramsize = brcmf_pcie_get_ramsize,
+       .get_memdump = brcmf_pcie_get_memdump,
 };
 
 
@@ -1456,6 +1525,10 @@ static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
                fw_name = BRCMF_PCIE_4366_FW_NAME;
                nvram_name = BRCMF_PCIE_4366_NVRAM_NAME;
                break;
+       case BRCM_CC_4371_CHIP_ID:
+               fw_name = BRCMF_PCIE_4371_FW_NAME;
+               nvram_name = BRCMF_PCIE_4371_NVRAM_NAME;
+               break;
        default:
                brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip);
                return -ENODEV;
@@ -1995,6 +2068,7 @@ static struct pci_device_id brcmf_pcie_devid_table[] = {
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
+       BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
        { /* end: all zeroes */ }
 };
 
index 7f574f26cdef657a3c243acb7f1f5e301da9cce3..7e74ac3ad81519491ac01460cd3b578e6fb3cc80 100644 (file)
@@ -3539,6 +3539,51 @@ done:
        return err;
 }
 
+static size_t brcmf_sdio_bus_get_ramsize(struct device *dev)
+{
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+       struct brcmf_sdio *bus = sdiodev->bus;
+
+       return bus->ci->ramsize - bus->ci->srsize;
+}
+
+static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data,
+                                     size_t mem_size)
+{
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+       struct brcmf_sdio *bus = sdiodev->bus;
+       int err;
+       int address;
+       int offset;
+       int len;
+
+       brcmf_dbg(INFO, "dump at 0x%08x: size=%zu\n", bus->ci->rambase,
+                 mem_size);
+
+       address = bus->ci->rambase;
+       offset = err = 0;
+       sdio_claim_host(sdiodev->func[1]);
+       while (offset < mem_size) {
+               len = ((offset + MEMBLOCK) < mem_size) ? MEMBLOCK :
+                     mem_size - offset;
+               err = brcmf_sdiod_ramrw(sdiodev, false, address, data, len);
+               if (err) {
+                       brcmf_err("error %d on reading %d membytes at 0x%08x\n",
+                                 err, len, address);
+                       goto done;
+               }
+               data += len;
+               offset += len;
+               address += len;
+       }
+
+done:
+       sdio_release_host(sdiodev->func[1]);
+       return err;
+}
+
 void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus)
 {
        if (!bus->dpc_triggered) {
@@ -3987,7 +4032,9 @@ static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
        .txctl = brcmf_sdio_bus_txctl,
        .rxctl = brcmf_sdio_bus_rxctl,
        .gettxq = brcmf_sdio_bus_gettxq,
-       .wowl_config = brcmf_sdio_wowl_config
+       .wowl_config = brcmf_sdio_wowl_config,
+       .get_ramsize = brcmf_sdio_bus_get_ramsize,
+       .get_memdump = brcmf_sdio_bus_get_memdump,
 };
 
 static void brcmf_sdio_firmware_callback(struct device *dev,
index daba86d881bc1a4930f84ef8ebc5078bde4710e3..689e64d004bc56bee27f3edbae1f5c7899bbb204 100644 (file)
@@ -144,6 +144,7 @@ struct brcmf_usbdev_info {
 
        struct usb_device *usbdev;
        struct device *dev;
+       struct mutex dev_init_lock;
 
        int ctl_in_pipe, ctl_out_pipe;
        struct urb *ctl_urb; /* URB for control endpoint */
@@ -1204,6 +1205,8 @@ static void brcmf_usb_probe_phase2(struct device *dev,
        int ret;
 
        brcmf_dbg(USB, "Start fw downloading\n");
+
+       devinfo = bus->bus_priv.usb->devinfo;
        ret = check_file(fw->data);
        if (ret < 0) {
                brcmf_err("invalid firmware\n");
@@ -1211,7 +1214,6 @@ static void brcmf_usb_probe_phase2(struct device *dev,
                goto error;
        }
 
-       devinfo = bus->bus_priv.usb->devinfo;
        devinfo->image = fw->data;
        devinfo->image_len = fw->size;
 
@@ -1224,9 +1226,11 @@ static void brcmf_usb_probe_phase2(struct device *dev,
        if (ret)
                goto error;
 
+       mutex_unlock(&devinfo->dev_init_lock);
        return;
 error:
        brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
+       mutex_unlock(&devinfo->dev_init_lock);
        device_release_driver(dev);
 }
 
@@ -1264,6 +1268,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
                if (ret)
                        goto fail;
                /* we are done */
+               mutex_unlock(&devinfo->dev_init_lock);
                return 0;
        }
        bus->chip = bus_pub->devid;
@@ -1317,6 +1322,12 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
 
        devinfo->usbdev = usb;
        devinfo->dev = &usb->dev;
+       /* Take an init lock, to protect for disconnect while still loading.
+        * Necessary because of the asynchronous firmware load construction
+        */
+       mutex_init(&devinfo->dev_init_lock);
+       mutex_lock(&devinfo->dev_init_lock);
+
        usb_set_intfdata(intf, devinfo);
 
        /* Check that the device supports only one configuration */
@@ -1391,6 +1402,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
        return 0;
 
 fail:
+       mutex_unlock(&devinfo->dev_init_lock);
        kfree(devinfo);
        usb_set_intfdata(intf, NULL);
        return ret;
@@ -1403,8 +1415,19 @@ brcmf_usb_disconnect(struct usb_interface *intf)
 
        brcmf_dbg(USB, "Enter\n");
        devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
-       brcmf_usb_disconnect_cb(devinfo);
-       kfree(devinfo);
+
+       if (devinfo) {
+               mutex_lock(&devinfo->dev_init_lock);
+               /* Make sure that devinfo still exists. Firmware probe routines
+                * may have released the device and cleared the intfdata.
+                */
+               if (!usb_get_intfdata(intf))
+                       goto done;
+
+               brcmf_usb_disconnect_cb(devinfo);
+               kfree(devinfo);
+       }
+done:
        brcmf_dbg(USB, "Exit\n");
 }
 
index d823734a47130e23591ab91e75eb89fcdf867e1b..aa06ea231db32eadae6bbe7c89fcb2825fabb228 100644 (file)
@@ -50,6 +50,7 @@
 #define BRCM_CC_43602_CHIP_ID          43602
 #define BRCM_CC_4365_CHIP_ID           0x4365
 #define BRCM_CC_4366_CHIP_ID           0x4366
+#define BRCM_CC_4371_CHIP_ID           0x4371
 
 /* USB Device IDs */
 #define BRCM_USB_43143_DEVICE_ID       0xbd1e
@@ -75,6 +76,7 @@
 #define BRCM_PCIE_4366_DEVICE_ID       0x43c3
 #define BRCM_PCIE_4366_2G_DEVICE_ID    0x43c4
 #define BRCM_PCIE_4366_5G_DEVICE_ID    0x43c5
+#define BRCM_PCIE_4371_DEVICE_ID       0x440d
 
 
 /* brcmsmac IDs */
index 39f3e6f5cbcd230a49145d0bf0f589cc85d4abb7..ed0adaf1eec445defe056f8509adc6b54b708518 100644 (file)
@@ -10470,7 +10470,6 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
                 vers, date);
        strlcpy(info->bus_info, pci_name(p->pci_dev),
                sizeof(info->bus_info));
-       info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
 }
 
 static u32 ipw_ethtool_get_link(struct net_device *dev)
index ab45819c1fbbf6d0080813c26090bb095082672d..e18629a16fb0260dff9b3486c572f4bfd2166fcd 100644 (file)
@@ -1020,7 +1020,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
                        u8 *pn = seq.ccmp.pn;
 
                        ieee80211_get_key_rx_seq(key, i, &seq);
-                       aes_sc->pn = cpu_to_le64(
+                       aes_sc[i].pn = cpu_to_le64(
                                        (u64)pn[5] |
                                        ((u64)pn[4] << 8) |
                                        ((u64)pn[3] << 16) |
index d561181f2cff1f5d490285e5765cb6254ea7b8f3..1a73c7a1da77d0e0fe16fbdc6fb866d6c65226ee 100644 (file)
@@ -341,6 +341,6 @@ const struct iwl_cfg iwl7265d_n_cfg = {
 };
 
 MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
-MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
+MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
index 1d54355ad76a27da11268219caa09ddc23baf4ad..85ae902df7c08d9d1d0ad4611e7947ace08bf0f7 100644 (file)
@@ -274,18 +274,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
                break;
        case WLAN_CIPHER_SUITE_CCMP:
                if (sta) {
-                       u8 *pn = seq.ccmp.pn;
+                       u64 pn64;
 
                        aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
                        aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
 
-                       ieee80211_get_key_tx_seq(key, &seq);
-                       aes_tx_sc->pn = cpu_to_le64((u64)pn[5] |
-                                                   ((u64)pn[4] << 8) |
-                                                   ((u64)pn[3] << 16) |
-                                                   ((u64)pn[2] << 24) |
-                                                   ((u64)pn[1] << 32) |
-                                                   ((u64)pn[0] << 40));
+                       pn64 = atomic64_read(&key->tx_pn);
+                       aes_tx_sc->pn = cpu_to_le64(pn64);
                } else {
                        aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
                }
@@ -298,12 +293,12 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
                        u8 *pn = seq.ccmp.pn;
 
                        ieee80211_get_key_rx_seq(key, i, &seq);
-                       aes_sc->pn = cpu_to_le64((u64)pn[5] |
-                                                ((u64)pn[4] << 8) |
-                                                ((u64)pn[3] << 16) |
-                                                ((u64)pn[2] << 24) |
-                                                ((u64)pn[1] << 32) |
-                                                ((u64)pn[0] << 40));
+                       aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
+                                                  ((u64)pn[4] << 8) |
+                                                  ((u64)pn[3] << 16) |
+                                                  ((u64)pn[2] << 24) |
+                                                  ((u64)pn[1] << 32) |
+                                                  ((u64)pn[0] << 40));
                }
                data->use_rsc_tsc = true;
                break;
@@ -1456,15 +1451,15 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
 
                switch (key->cipher) {
                case WLAN_CIPHER_SUITE_CCMP:
-                       iwl_mvm_aes_sc_to_seq(&sc->aes.tsc, &seq);
                        iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key);
+                       atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
                        break;
                case WLAN_CIPHER_SUITE_TKIP:
                        iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
                        iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
+                       ieee80211_set_key_tx_seq(key, &seq);
                        break;
                }
-               ieee80211_set_key_tx_seq(key, &seq);
 
                /* that's it for this key */
                return;
index 834641e250fb2ace9b386eaf2edd0f867fa3ce08..d906fa13ba9710a3e9250cd481d735aa71f767de 100644 (file)
@@ -699,7 +699,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
         * abort after reading the nvm in case RF Kill is on, we will complete
         * the init seq later when RF kill will switch to off
         */
-       if (iwl_mvm_is_radio_killed(mvm)) {
+       if (iwl_mvm_is_radio_hw_killed(mvm)) {
                IWL_DEBUG_RF_KILL(mvm,
                                  "jump over all phy activities due to RF kill\n");
                iwl_remove_notification(&mvm->notif_wait, &calib_wait);
@@ -732,7 +732,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
        ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
                        MVM_UCODE_CALIB_TIMEOUT);
 
-       if (ret && iwl_mvm_is_radio_killed(mvm)) {
+       if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
                IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
                ret = 1;
        }
index 1d21e380ca119b7b14aae22bcc2255cb3f3c7277..1fb684693040eac8130cf62c348b0bf96dd4f005 100644 (file)
@@ -2435,6 +2435,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
                iwl_mvm_remove_time_event(mvm, mvmvif,
                                          &mvmvif->time_event_data);
                RCU_INIT_POINTER(mvm->csa_vif, NULL);
+               mvmvif->csa_countdown = false;
        }
 
        if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
index 4485bdb56b34ff8c9feadc7a9273dd2cb713047c..c6327cd1d071a862a61fced402b1376e5812d86c 100644 (file)
@@ -870,6 +870,11 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
               test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
 }
 
+static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm)
+{
+       return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+}
+
 /* Must be called with rcu_read_lock() held and it can only be
  * released when mvmsta is not needed anymore.
  */
index f0728b784edb25ecfb66e1ae21344785cfed02af..13c97f665ba889eb3f5824341cc8937190d665a8 100644 (file)
@@ -602,6 +602,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        ieee80211_unregister_hw(mvm->hw);
        iwl_mvm_leds_exit(mvm);
  out_free:
+       flush_delayed_work(&mvm->fw_dump_wk);
        iwl_phy_db_free(mvm->phy_db);
        kfree(mvm->scan_cmd);
        if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name)
index b0825c402c732c0514637b3b21b26288a7275444..644b58bc5226c52b3cdee0a24b9a392c25e8ac02 100644 (file)
@@ -414,6 +414,11 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)},
 
 /* 8000 Series */
        {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
index 317d99189556ab1c3025bf11afd7b61449fd124e..279167ddd2935467f62e50208b05fc0f9562623e 100644 (file)
@@ -33,12 +33,12 @@ config MWIFIEX_PCIE
          mwifiex_pcie.
 
 config MWIFIEX_USB
-       tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897/8997"
+       tristate "Marvell WiFi-Ex Driver for USB8766/8797/8997"
        depends on MWIFIEX && USB
        select FW_LOADER
        ---help---
          This adds support for wireless adapters based on Marvell
-         8797/8897/8997 chipset with USB interface.
+         8797/8997 chipset with USB interface.
 
          If you choose to build it as a module, it will be called
          mwifiex_usb.
index 30cbafbd17c69d272a4068c6b25cf2fac72c2af3..b7ac45f324d7b60d290b661afb939ff6783826d0 100644 (file)
@@ -2374,7 +2374,7 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
  * CFG802.11 operation handler for scan request.
  *
  * This function issues a scan request to the firmware based upon
- * the user specified scan configuration. On successfull completion,
+ * the user specified scan configuration. On successful completion,
  * it also informs the results.
  */
 static int
index 5583856fc5c41f268c2e7b706e4968670a49602d..9824d8dd2b4447f11304557a44f3c7ea2d629d76 100644 (file)
@@ -856,6 +856,56 @@ mwifiex_hscfg_read(struct file *file, char __user *ubuf,
        return ret;
 }
 
+static ssize_t
+mwifiex_timeshare_coex_read(struct file *file, char __user *ubuf,
+                           size_t count, loff_t *ppos)
+{
+       struct mwifiex_private *priv = file->private_data;
+       char buf[3];
+       bool timeshare_coex;
+       int ret;
+       unsigned int len;
+
+       if (priv->adapter->fw_api_ver != MWIFIEX_FW_V15)
+               return -EOPNOTSUPP;
+
+       ret = mwifiex_send_cmd(priv, HostCmd_CMD_ROBUST_COEX,
+                              HostCmd_ACT_GEN_GET, 0, &timeshare_coex, true);
+       if (ret)
+               return ret;
+
+       len = sprintf(buf, "%d\n", timeshare_coex);
+       return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static ssize_t
+mwifiex_timeshare_coex_write(struct file *file, const char __user *ubuf,
+                            size_t count, loff_t *ppos)
+{
+       bool timeshare_coex;
+       struct mwifiex_private *priv = file->private_data;
+       char kbuf[16];
+       int ret;
+
+       if (priv->adapter->fw_api_ver != MWIFIEX_FW_V15)
+               return -EOPNOTSUPP;
+
+       memset(kbuf, 0, sizeof(kbuf));
+
+       if (copy_from_user(&kbuf, ubuf, min_t(size_t, sizeof(kbuf) - 1, count)))
+               return -EFAULT;
+
+       if (strtobool(kbuf, &timeshare_coex))
+               return -EINVAL;
+
+       ret = mwifiex_send_cmd(priv, HostCmd_CMD_ROBUST_COEX,
+                              HostCmd_ACT_GEN_SET, 0, &timeshare_coex, true);
+       if (ret)
+               return ret;
+       else
+               return count;
+}
+
 #define MWIFIEX_DFS_ADD_FILE(name) do {                                 \
        if (!debugfs_create_file(#name, 0644, priv->dfs_dev_dir,        \
                        priv, &mwifiex_dfs_##name##_fops))              \
@@ -892,6 +942,7 @@ MWIFIEX_DFS_FILE_OPS(memrw);
 MWIFIEX_DFS_FILE_OPS(hscfg);
 MWIFIEX_DFS_FILE_OPS(histogram);
 MWIFIEX_DFS_FILE_OPS(debug_mask);
+MWIFIEX_DFS_FILE_OPS(timeshare_coex);
 
 /*
  * This function creates the debug FS directory structure and the files.
@@ -918,6 +969,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
        MWIFIEX_DFS_ADD_FILE(hscfg);
        MWIFIEX_DFS_ADD_FILE(histogram);
        MWIFIEX_DFS_ADD_FILE(debug_mask);
+       MWIFIEX_DFS_ADD_FILE(timeshare_coex);
 }
 
 /*
index 0e6f029458d531142f0ad29a31b23132547f5a86..1e1e81a0a8d4989876433b7cb31756d401cb40cf 100644 (file)
@@ -101,6 +101,9 @@ enum KEY_TYPE_ID {
 #define FIRMWARE_READY_SDIO                            0xfedc
 #define FIRMWARE_READY_PCIE                            0xfedcba00
 
+#define MWIFIEX_COEX_MODE_TIMESHARE                    0x01
+#define MWIFIEX_COEX_MODE_SPATIAL                      0x82
+
 enum mwifiex_usb_ep {
        MWIFIEX_USB_EP_CMD_EVENT = 1,
        MWIFIEX_USB_EP_DATA = 2,
@@ -163,6 +166,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_CHANRPT_11H_BASIC  (PROPRIETARY_TLV_BASE_ID + 91)
 #define TLV_TYPE_UAP_RETRY_LIMIT    (PROPRIETARY_TLV_BASE_ID + 93)
 #define TLV_TYPE_WAPI_IE            (PROPRIETARY_TLV_BASE_ID + 94)
+#define TLV_TYPE_ROBUST_COEX        (PROPRIETARY_TLV_BASE_ID + 96)
 #define TLV_TYPE_UAP_MGMT_FRAME     (PROPRIETARY_TLV_BASE_ID + 104)
 #define TLV_TYPE_MGMT_IE            (PROPRIETARY_TLV_BASE_ID + 105)
 #define TLV_TYPE_AUTO_DS_PARAM      (PROPRIETARY_TLV_BASE_ID + 113)
@@ -354,6 +358,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_AMSDU_AGGR_CTRL                   0x00df
 #define HostCmd_CMD_TXPWR_CFG                         0x00d1
 #define HostCmd_CMD_TX_RATE_CFG                       0x00d6
+#define HostCmd_CMD_ROBUST_COEX                       0x00e0
 #define HostCmd_CMD_802_11_PS_MODE_ENH                0x00e4
 #define HostCmd_CMD_802_11_HS_CFG_ENH                 0x00e5
 #define HostCmd_CMD_P2P_MODE_CFG                      0x00eb
@@ -1877,6 +1882,11 @@ struct mwifiex_ie_types_btcoex_aggr_win_size {
        u8 reserved;
 } __packed;
 
+struct mwifiex_ie_types_robust_coex {
+       struct mwifiex_ie_types_header header;
+       __le32 mode;
+} __packed;
+
 struct host_cmd_ds_version_ext {
        u8 version_str_sel;
        char version_str[128];
@@ -2078,6 +2088,11 @@ struct host_cmd_ds_multi_chan_policy {
        __le16 policy;
 } __packed;
 
+struct host_cmd_ds_robust_coex {
+       __le16 action;
+       __le16 reserved;
+} __packed;
+
 struct host_cmd_ds_command {
        __le16 command;
        __le16 size;
@@ -2147,6 +2162,7 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_chan_rpt_req chan_rpt_req;
                struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg;
                struct host_cmd_ds_multi_chan_policy mc_policy;
+               struct host_cmd_ds_robust_coex coex;
        } params;
 } __packed;
 
index 6e3faa74389c56283980779611955a16bb21cd45..969ca1e1f3e9f805d26edd3ed9f6ea8d839fc9b7 100644 (file)
@@ -1199,6 +1199,7 @@ static const struct net_device_ops mwifiex_netdev_ops = {
        .ndo_stop = mwifiex_close,
        .ndo_start_xmit = mwifiex_hard_start_xmit,
        .ndo_set_mac_address = mwifiex_set_mac_address,
+       .ndo_validate_addr = eth_validate_addr,
        .ndo_tx_timeout = mwifiex_tx_timeout,
        .ndo_get_stats = mwifiex_get_stats,
        .ndo_set_rx_mode = mwifiex_set_multicast_list,
index 504b321301ec46ce99bc986fe8792011194fcef2..e486867a4c67520fc2a8a8520368009975fd8878 100644 (file)
@@ -1531,6 +1531,33 @@ mwifiex_cmd_set_mc_policy(struct mwifiex_private *priv,
        return 0;
 }
 
+static int mwifiex_cmd_robust_coex(struct mwifiex_private *priv,
+                                  struct host_cmd_ds_command *cmd,
+                                  u16 cmd_action, bool *is_timeshare)
+{
+       struct host_cmd_ds_robust_coex *coex = &cmd->params.coex;
+       struct mwifiex_ie_types_robust_coex *coex_tlv;
+
+       cmd->command = cpu_to_le16(HostCmd_CMD_ROBUST_COEX);
+       cmd->size = cpu_to_le16(sizeof(*coex) + sizeof(*coex_tlv) + S_DS_GEN);
+
+       coex->action = cpu_to_le16(cmd_action);
+       coex_tlv = (struct mwifiex_ie_types_robust_coex *)
+                               ((u8 *)coex + sizeof(*coex));
+       coex_tlv->header.type = cpu_to_le16(TLV_TYPE_ROBUST_COEX);
+       coex_tlv->header.len = cpu_to_le16(sizeof(coex_tlv->mode));
+
+       if (coex->action == HostCmd_ACT_GEN_GET)
+               return 0;
+
+       if (*is_timeshare)
+               coex_tlv->mode = cpu_to_le32(MWIFIEX_COEX_MODE_TIMESHARE);
+       else
+               coex_tlv->mode = cpu_to_le32(MWIFIEX_COEX_MODE_SPATIAL);
+
+       return 0;
+}
+
 static int
 mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
                         struct host_cmd_ds_command *cmd,
@@ -2040,6 +2067,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action,
                                                data_buf);
                break;
+       case HostCmd_CMD_ROBUST_COEX:
+               ret = mwifiex_cmd_robust_coex(priv, cmd_ptr, cmd_action,
+                                             data_buf);
+               break;
        default:
                mwifiex_dbg(priv->adapter, ERROR,
                            "PREP_CMD: unknown cmd- %#x\n", cmd_no);
index d0961635c7b36c72a7952de11fd1b751c2b66ac2..9ac7aa2431b41533ab68f8495693c36c41f40801 100644 (file)
@@ -1007,6 +1007,28 @@ static int mwifiex_ret_sdio_rx_aggr_cfg(struct mwifiex_private *priv,
        return 0;
 }
 
+static int mwifiex_ret_robust_coex(struct mwifiex_private *priv,
+                                  struct host_cmd_ds_command *resp,
+                                  bool *is_timeshare)
+{
+       struct host_cmd_ds_robust_coex *coex = &resp->params.coex;
+       struct mwifiex_ie_types_robust_coex *coex_tlv;
+       u16 action = le16_to_cpu(coex->action);
+       u32 mode;
+
+       coex_tlv = (struct mwifiex_ie_types_robust_coex
+                   *)((u8 *)coex + sizeof(struct host_cmd_ds_robust_coex));
+       if (action == HostCmd_ACT_GEN_GET) {
+               mode = le32_to_cpu(coex_tlv->mode);
+               if (mode == MWIFIEX_COEX_MODE_TIMESHARE)
+                       *is_timeshare = true;
+               else
+                       *is_timeshare = false;
+       }
+
+       return 0;
+}
+
 /*
  * This function handles the command responses.
  *
@@ -1213,6 +1235,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
                break;
        case HostCmd_CMD_TDLS_CONFIG:
                break;
+       case HostCmd_CMD_ROBUST_COEX:
+               ret = mwifiex_ret_robust_coex(priv, resp, data_buf);
+               break;
        default:
                mwifiex_dbg(adapter, ERROR,
                            "CMD_RESP: unknown cmd response %#x\n",
index 4d5a6e3b6361700c9bcab06201a70cd29d38b66e..759a6ada5b0fb544b9ac384dac8aae7c182bc877 100644 (file)
@@ -846,22 +846,6 @@ int mwifiex_config_start_uap(struct mwifiex_private *priv,
 {
        enum state_11d_t state_11d;
 
-       if (mwifiex_del_mgmt_ies(priv))
-               mwifiex_dbg(priv->adapter, ERROR,
-                           "Failed to delete mgmt IEs!\n");
-
-       if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
-                            HostCmd_ACT_GEN_SET, 0, NULL, true)) {
-               mwifiex_dbg(priv->adapter, ERROR, "Failed to stop the BSS\n");
-               return -1;
-       }
-
-       if (mwifiex_send_cmd(priv, HOST_CMD_APCMD_SYS_RESET,
-                            HostCmd_ACT_GEN_SET, 0, NULL, true)) {
-               mwifiex_dbg(priv->adapter, ERROR, "Failed to reset BSS\n");
-               return -1;
-       }
-
        if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
                             HostCmd_ACT_GEN_SET,
                             UAP_BSS_PARAMS_I, bss_cfg, false)) {
index 9f5356ef0531a9b7e22b3f9cdf19a9ac94fc537e..e43aff932360ef3e99801956b5bca6b8e8649303 100644 (file)
@@ -42,11 +42,6 @@ static struct usb_device_id mwifiex_usb_table[] = {
        {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8801_PID_2,
                                       USB_CLASS_VENDOR_SPEC,
                                       USB_SUBCLASS_VENDOR_SPEC, 0xff)},
-       /* 8897 */
-       {USB_DEVICE(USB8XXX_VID, USB8897_PID_1)},
-       {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2,
-                                      USB_CLASS_VENDOR_SPEC,
-                                      USB_SUBCLASS_VENDOR_SPEC, 0xff)},
        /* 8997 */
        {USB_DEVICE(USB8XXX_VID, USB8997_PID_1)},
        {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8997_PID_2,
@@ -403,14 +398,12 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
        case USB8766_PID_1:
        case USB8797_PID_1:
        case USB8801_PID_1:
-       case USB8897_PID_1:
        case USB8997_PID_1:
                card->usb_boot_state = USB8XXX_FW_DNLD;
                break;
        case USB8766_PID_2:
        case USB8797_PID_2:
        case USB8801_PID_2:
-       case USB8897_PID_2:
        case USB8997_PID_2:
                card->usb_boot_state = USB8XXX_FW_READY;
                break;
@@ -964,12 +957,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
                strcpy(adapter->fw_name, USB8997_DEFAULT_FW_NAME);
                adapter->ext_scan = true;
                break;
-       case USB8897_PID_1:
-       case USB8897_PID_2:
-               adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
-               strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME);
-               adapter->ext_scan = true;
-               break;
        case USB8766_PID_1:
        case USB8766_PID_2:
                adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
@@ -1277,5 +1264,4 @@ MODULE_LICENSE("GPL v2");
 MODULE_FIRMWARE(USB8766_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8801_DEFAULT_FW_NAME);
-MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8997_DEFAULT_FW_NAME);
index bab10ee41923a4286d6854767bb73a3f59f03e87..b4e9246bbcdc9080d3b9bddec7e0f3aab1d16fa1 100644 (file)
 #define USB8766_PID_2          0x2042
 #define USB8797_PID_1          0x2043
 #define USB8797_PID_2          0x2044
-#define USB8897_PID_1          0x2045
-#define USB8897_PID_2          0x2046
 #define USB8801_PID_1          0x2049
 #define USB8801_PID_2          0x204a
-#define USB8997_PID_1          0x204d
+#define USB8997_PID_1          0x2052
 #define USB8997_PID_2          0x204e
 
 
@@ -48,7 +46,6 @@
 #define USB8766_DEFAULT_FW_NAME        "mrvl/usb8766_uapsta.bin"
 #define USB8797_DEFAULT_FW_NAME        "mrvl/usb8797_uapsta.bin"
 #define USB8801_DEFAULT_FW_NAME        "mrvl/usb8801_uapsta.bin"
-#define USB8897_DEFAULT_FW_NAME        "mrvl/usb8897_uapsta.bin"
 #define USB8997_DEFAULT_FW_NAME        "mrvl/usb8997_uapsta.bin"
 
 #define FW_DNLD_TX_BUF_SIZE    620
index 57c13ec3d3defd11cefc91dda1a9c001b5aa8c67..acccd6734e3b332cb172789e9b210c077465371f 100644 (file)
@@ -684,7 +684,7 @@ void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
                        if (!memcmp(ra_list->ra, mac, ETH_ALEN))
                                continue;
 
-                       if (ra_list && ra_list->tx_paused != tx_pause) {
+                       if (ra_list->tx_paused != tx_pause) {
                                pkt_cnt += ra_list->total_pkt_count;
                                ra_list->tx_paused = tx_pause;
                                if (tx_pause)
diff --git a/drivers/net/wireless/realtek/Makefile b/drivers/net/wireless/realtek/Makefile
new file mode 100644 (file)
index 0000000..9c78deb
--- /dev/null
@@ -0,0 +1,9 @@
+#
+# Makefile for the Linux Wireless network device drivers for Realtek units
+#
+
+obj-$(CONFIG_RTL8180)          += rtl818x/
+obj-$(CONFIG_RTL8187)          += rtl818x/
+obj-$(CONFIG_RTLWIFI)          += rtlwifi/
+obj-$(CONFIG_RTL8XXXU)         += rtl8xxxu/
+
similarity index 69%
rename from drivers/net/wireless/rtl818x/rtl8180/Makefile
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
index 21005bd8b43c973da6ebd24556da848032727bcf..2966681efaef41845209d1b0bc055a0521da2368 100644 (file)
@@ -2,4 +2,4 @@ rtl818x_pci-objs        := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
 
 obj-$(CONFIG_RTL8180)  += rtl818x_pci.o
 
-ccflags-y += -Idrivers/net/wireless/rtl818x
+ccflags-y += -Idrivers/net/wireless/realtek/rtl818x
similarity index 62%
rename from drivers/net/wireless/rtl818x/rtl8187/Makefile
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
index 7b6299268ecf346abd9c814bab564d9b7a56b7d3..ff074912a095215255af28c9810e658bc45c9bfe 100644 (file)
@@ -2,4 +2,4 @@ rtl8187-objs            := dev.o rtl8225.o leds.o rfkill.o
 
 obj-$(CONFIG_RTL8187)  += rtl8187.o
 
-ccflags-y += -Idrivers/net/wireless/rtl818x
+ccflags-y += -Idrivers/net/wireless/realtek/rtl818x
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
new file mode 100644 (file)
index 0000000..dd4d626
--- /dev/null
@@ -0,0 +1,34 @@
+#
+# RTL8XXXU Wireless LAN device configuration
+#
+config RTL8XXXU
+       tristate "RTL8723AU/RTL8188[CR]U/RTL819[12]CU (mac80211) support"
+       depends on MAC80211 && USB
+       ---help---
+         This is an alternative driver for various Realtek RTL8XXX
+         parts written to utilize the Linux mac80211 stack.
+         The driver is known to work with a number of RTL8723AU,
+         RL8188CU, RTL8188RU, RTL8191CU, and RTL8192CU devices
+
+         This driver is under development and has a limited feature
+         set. In particular it does not yet support 40MHz channels
+         and power management. However it should have a smaller
+         memory footprint than the vendor drivers and benetifs
+         from the in kernel mac80211 stack.
+
+         It can coexist with drivers from drivers/staging/rtl8723au,
+         drivers/staging/rtl8192u, and drivers/net/wireless/rtlwifi,
+         but you will need to control which module you wish to load.
+
+         To compile this driver as a module, choose M here: the module will
+         be called r8xxxu.  If unsure, say N.
+
+config RTL8XXXU_UNTESTED
+       bool "Include support for untested Realtek 8xxx USB devices (EXPERIMENTAL)"
+       depends on RTL8XXXU
+       ---help---
+         This option enables detection of Realtek 8723/8188/8191/8192 WiFi
+         USB devices which have not been tested directly by the driver
+         author or reported to be working by third parties.
+
+         Please report your results!
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Makefile b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
new file mode 100644 (file)
index 0000000..5dea3bb
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_RTL8XXXU) += rtl8xxxu.o
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
new file mode 100644 (file)
index 0000000..6aed923
--- /dev/null
@@ -0,0 +1,5993 @@
+/*
+ * RTL8XXXU mac80211 USB driver
+ *
+ * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * Portions, notably calibration code:
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This driver was written as a replacement for the vendor provided
+ * rtl8723au driver. As the Realtek 8xxx chips are very similar in
+ * their programming interface, I have started adding support for
+ * additional 8xxx chips like the 8192cu, 8188cus, etc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/moduleparam.h>
+#include <net/mac80211.h>
+#include "rtl8xxxu.h"
+#include "rtl8xxxu_regs.h"
+
+#define DRIVER_NAME "rtl8xxxu"
+
+static int rtl8xxxu_debug;
+static bool rtl8xxxu_ht40_2g;
+
+MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>");
+MODULE_DESCRIPTION("RTL8XXXu USB mac80211 Wireless LAN Driver");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_A.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B_NoBT.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_A.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_B.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_TMSC.bin");
+
+module_param_named(debug, rtl8xxxu_debug, int, 0600);
+MODULE_PARM_DESC(debug, "Set debug mask");
+module_param_named(ht40_2g, rtl8xxxu_ht40_2g, bool, 0600);
+MODULE_PARM_DESC(ht40_2g, "Enable HT40 support on the 2.4GHz band");
+
+#define USB_VENDOR_ID_REALTEK          0x0bda
+/* Minimum IEEE80211_MAX_FRAME_LEN */
+#define RTL_RX_BUFFER_SIZE             IEEE80211_MAX_FRAME_LEN
+#define RTL8XXXU_RX_URBS               32
+#define RTL8XXXU_RX_URB_PENDING_WATER  8
+#define RTL8XXXU_TX_URBS               64
+#define RTL8XXXU_TX_URB_LOW_WATER      25
+#define RTL8XXXU_TX_URB_HIGH_WATER     32
+
+static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv,
+                                 struct rtl8xxxu_rx_urb *rx_urb);
+
+static struct ieee80211_rate rtl8xxxu_rates[] = {
+       { .bitrate = 10, .hw_value = DESC_RATE_1M, .flags = 0 },
+       { .bitrate = 20, .hw_value = DESC_RATE_2M, .flags = 0 },
+       { .bitrate = 55, .hw_value = DESC_RATE_5_5M, .flags = 0 },
+       { .bitrate = 110, .hw_value = DESC_RATE_11M, .flags = 0 },
+       { .bitrate = 60, .hw_value = DESC_RATE_6M, .flags = 0 },
+       { .bitrate = 90, .hw_value = DESC_RATE_9M, .flags = 0 },
+       { .bitrate = 120, .hw_value = DESC_RATE_12M, .flags = 0 },
+       { .bitrate = 180, .hw_value = DESC_RATE_18M, .flags = 0 },
+       { .bitrate = 240, .hw_value = DESC_RATE_24M, .flags = 0 },
+       { .bitrate = 360, .hw_value = DESC_RATE_36M, .flags = 0 },
+       { .bitrate = 480, .hw_value = DESC_RATE_48M, .flags = 0 },
+       { .bitrate = 540, .hw_value = DESC_RATE_54M, .flags = 0 },
+};
+
+static struct ieee80211_channel rtl8xxxu_channels_2g[] = {
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412,
+         .hw_value = 1, .max_power = 30 },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417,
+         .hw_value = 2, .max_power = 30 },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422,
+         .hw_value = 3, .max_power = 30 },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427,
+         .hw_value = 4, .max_power = 30 },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432,
+         .hw_value = 5, .max_power = 30 },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437,
+         .hw_value = 6, .max_power = 30 },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442,
+         .hw_value = 7, .max_power = 30 },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447,
+         .hw_value = 8, .max_power = 30 },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452,
+         .hw_value = 9, .max_power = 30 },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457,
+         .hw_value = 10, .max_power = 30 },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462,
+         .hw_value = 11, .max_power = 30 },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467,
+         .hw_value = 12, .max_power = 30 },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472,
+         .hw_value = 13, .max_power = 30 },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484,
+         .hw_value = 14, .max_power = 30 }
+};
+
+static struct ieee80211_supported_band rtl8xxxu_supported_band = {
+       .channels = rtl8xxxu_channels_2g,
+       .n_channels = ARRAY_SIZE(rtl8xxxu_channels_2g),
+       .bitrates = rtl8xxxu_rates,
+       .n_bitrates = ARRAY_SIZE(rtl8xxxu_rates),
+};
+
+static struct rtl8xxxu_reg8val rtl8723a_mac_init_table[] = {
+       {0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00},
+       {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
+       {0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00},
+       {0x43a, 0x00}, {0x43b, 0x01}, {0x43c, 0x04}, {0x43d, 0x05},
+       {0x43e, 0x06}, {0x43f, 0x07}, {0x440, 0x5d}, {0x441, 0x01},
+       {0x442, 0x00}, {0x444, 0x15}, {0x445, 0xf0}, {0x446, 0x0f},
+       {0x447, 0x00}, {0x458, 0x41}, {0x459, 0xa8}, {0x45a, 0x72},
+       {0x45b, 0xb9}, {0x460, 0x66}, {0x461, 0x66}, {0x462, 0x08},
+       {0x463, 0x03}, {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff},
+       {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2},
+       {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3},
+       {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4},
+       {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4},
+       {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a},
+       {0x515, 0x10}, {0x516, 0x0a}, {0x517, 0x10}, {0x51a, 0x16},
+       {0x524, 0x0f}, {0x525, 0x4f}, {0x546, 0x40}, {0x547, 0x00},
+       {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55a, 0x02},
+       {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a},
+       {0x652, 0x20}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e},
+       {0x63f, 0x0e}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43},
+       {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43},
+       {0x70a, 0x65}, {0x70b, 0x87}, {0xffff, 0xff},
+};
+
+static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = {
+       {0x800, 0x80040000}, {0x804, 0x00000003},
+       {0x808, 0x0000fc00}, {0x80c, 0x0000000a},
+       {0x810, 0x10001331}, {0x814, 0x020c3d10},
+       {0x818, 0x02200385}, {0x81c, 0x00000000},
+       {0x820, 0x01000100}, {0x824, 0x00390004},
+       {0x828, 0x00000000}, {0x82c, 0x00000000},
+       {0x830, 0x00000000}, {0x834, 0x00000000},
+       {0x838, 0x00000000}, {0x83c, 0x00000000},
+       {0x840, 0x00010000}, {0x844, 0x00000000},
+       {0x848, 0x00000000}, {0x84c, 0x00000000},
+       {0x850, 0x00000000}, {0x854, 0x00000000},
+       {0x858, 0x569a569a}, {0x85c, 0x001b25a4},
+       {0x860, 0x66f60110}, {0x864, 0x061f0130},
+       {0x868, 0x00000000}, {0x86c, 0x32323200},
+       {0x870, 0x07000760}, {0x874, 0x22004000},
+       {0x878, 0x00000808}, {0x87c, 0x00000000},
+       {0x880, 0xc0083070}, {0x884, 0x000004d5},
+       {0x888, 0x00000000}, {0x88c, 0xccc000c0},
+       {0x890, 0x00000800}, {0x894, 0xfffffffe},
+       {0x898, 0x40302010}, {0x89c, 0x00706050},
+       {0x900, 0x00000000}, {0x904, 0x00000023},
+       {0x908, 0x00000000}, {0x90c, 0x81121111},
+       {0xa00, 0x00d047c8}, {0xa04, 0x80ff000c},
+       {0xa08, 0x8c838300}, {0xa0c, 0x2e68120f},
+       {0xa10, 0x9500bb78}, {0xa14, 0x11144028},
+       {0xa18, 0x00881117}, {0xa1c, 0x89140f00},
+       {0xa20, 0x1a1b0000}, {0xa24, 0x090e1317},
+       {0xa28, 0x00000204}, {0xa2c, 0x00d30000},
+       {0xa70, 0x101fbf00}, {0xa74, 0x00000007},
+       {0xa78, 0x00000900},
+       {0xc00, 0x48071d40}, {0xc04, 0x03a05611},
+       {0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c},
+       {0xc10, 0x08800000}, {0xc14, 0x40000100},
+       {0xc18, 0x08800000}, {0xc1c, 0x40000100},
+       {0xc20, 0x00000000}, {0xc24, 0x00000000},
+       {0xc28, 0x00000000}, {0xc2c, 0x00000000},
+       {0xc30, 0x69e9ac44}, {0xc34, 0x469652af},
+       {0xc38, 0x49795994}, {0xc3c, 0x0a97971c},
+       {0xc40, 0x1f7c403f}, {0xc44, 0x000100b7},
+       {0xc48, 0xec020107}, {0xc4c, 0x007f037f},
+       {0xc50, 0x69543420}, {0xc54, 0x43bc0094},
+       {0xc58, 0x69543420}, {0xc5c, 0x433c0094},
+       {0xc60, 0x00000000}, {0xc64, 0x7112848b},
+       {0xc68, 0x47c00bff}, {0xc6c, 0x00000036},
+       {0xc70, 0x2c7f000d}, {0xc74, 0x018610db},
+       {0xc78, 0x0000001f}, {0xc7c, 0x00b91612},
+       {0xc80, 0x40000100}, {0xc84, 0x20f60000},
+       {0xc88, 0x40000100}, {0xc8c, 0x20200000},
+       {0xc90, 0x00121820}, {0xc94, 0x00000000},
+       {0xc98, 0x00121820}, {0xc9c, 0x00007f7f},
+       {0xca0, 0x00000000}, {0xca4, 0x00000080},
+       {0xca8, 0x00000000}, {0xcac, 0x00000000},
+       {0xcb0, 0x00000000}, {0xcb4, 0x00000000},
+       {0xcb8, 0x00000000}, {0xcbc, 0x28000000},
+       {0xcc0, 0x00000000}, {0xcc4, 0x00000000},
+       {0xcc8, 0x00000000}, {0xccc, 0x00000000},
+       {0xcd0, 0x00000000}, {0xcd4, 0x00000000},
+       {0xcd8, 0x64b22427}, {0xcdc, 0x00766932},
+       {0xce0, 0x00222222}, {0xce4, 0x00000000},
+       {0xce8, 0x37644302}, {0xcec, 0x2f97d40c},
+       {0xd00, 0x00080740}, {0xd04, 0x00020401},
+       {0xd08, 0x0000907f}, {0xd0c, 0x20010201},
+       {0xd10, 0xa0633333}, {0xd14, 0x3333bc43},
+       {0xd18, 0x7a8f5b6b}, {0xd2c, 0xcc979975},
+       {0xd30, 0x00000000}, {0xd34, 0x80608000},
+       {0xd38, 0x00000000}, {0xd3c, 0x00027293},
+       {0xd40, 0x00000000}, {0xd44, 0x00000000},
+       {0xd48, 0x00000000}, {0xd4c, 0x00000000},
+       {0xd50, 0x6437140a}, {0xd54, 0x00000000},
+       {0xd58, 0x00000000}, {0xd5c, 0x30032064},
+       {0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
+       {0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
+       {0xd70, 0x1812362e}, {0xd74, 0x322c2220},
+       {0xd78, 0x000e3c24}, {0xe00, 0x2a2a2a2a},
+       {0xe04, 0x2a2a2a2a}, {0xe08, 0x03902a2a},
+       {0xe10, 0x2a2a2a2a}, {0xe14, 0x2a2a2a2a},
+       {0xe18, 0x2a2a2a2a}, {0xe1c, 0x2a2a2a2a},
+       {0xe28, 0x00000000}, {0xe30, 0x1000dc1f},
+       {0xe34, 0x10008c1f}, {0xe38, 0x02140102},
+       {0xe3c, 0x681604c2}, {0xe40, 0x01007c00},
+       {0xe44, 0x01004800}, {0xe48, 0xfb000000},
+       {0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f},
+       {0xe54, 0x10008c1f}, {0xe58, 0x02140102},
+       {0xe5c, 0x28160d05}, {0xe60, 0x00000008},
+       {0xe68, 0x001b25a4}, {0xe6c, 0x631b25a0},
+       {0xe70, 0x631b25a0}, {0xe74, 0x081b25a0},
+       {0xe78, 0x081b25a0}, {0xe7c, 0x081b25a0},
+       {0xe80, 0x081b25a0}, {0xe84, 0x631b25a0},
+       {0xe88, 0x081b25a0}, {0xe8c, 0x631b25a0},
+       {0xed0, 0x631b25a0}, {0xed4, 0x631b25a0},
+       {0xed8, 0x631b25a0}, {0xedc, 0x001b25a0},
+       {0xee0, 0x001b25a0}, {0xeec, 0x6b1b25a0},
+       {0xf14, 0x00000003}, {0xf4c, 0x00000000},
+       {0xf00, 0x00000300},
+       {0xffff, 0xffffffff},
+};
+
+static struct rtl8xxxu_reg32val rtl8192cu_phy_2t_init_table[] = {
+       {0x024, 0x0011800f}, {0x028, 0x00ffdb83},
+       {0x800, 0x80040002}, {0x804, 0x00000003},
+       {0x808, 0x0000fc00}, {0x80c, 0x0000000a},
+       {0x810, 0x10000330}, {0x814, 0x020c3d10},
+       {0x818, 0x02200385}, {0x81c, 0x00000000},
+       {0x820, 0x01000100}, {0x824, 0x00390004},
+       {0x828, 0x01000100}, {0x82c, 0x00390004},
+       {0x830, 0x27272727}, {0x834, 0x27272727},
+       {0x838, 0x27272727}, {0x83c, 0x27272727},
+       {0x840, 0x00010000}, {0x844, 0x00010000},
+       {0x848, 0x27272727}, {0x84c, 0x27272727},
+       {0x850, 0x00000000}, {0x854, 0x00000000},
+       {0x858, 0x569a569a}, {0x85c, 0x0c1b25a4},
+       {0x860, 0x66e60230}, {0x864, 0x061f0130},
+       {0x868, 0x27272727}, {0x86c, 0x2b2b2b27},
+       {0x870, 0x07000700}, {0x874, 0x22184000},
+       {0x878, 0x08080808}, {0x87c, 0x00000000},
+       {0x880, 0xc0083070}, {0x884, 0x000004d5},
+       {0x888, 0x00000000}, {0x88c, 0xcc0000c0},
+       {0x890, 0x00000800}, {0x894, 0xfffffffe},
+       {0x898, 0x40302010}, {0x89c, 0x00706050},
+       {0x900, 0x00000000}, {0x904, 0x00000023},
+       {0x908, 0x00000000}, {0x90c, 0x81121313},
+       {0xa00, 0x00d047c8}, {0xa04, 0x80ff000c},
+       {0xa08, 0x8c838300}, {0xa0c, 0x2e68120f},
+       {0xa10, 0x9500bb78}, {0xa14, 0x11144028},
+       {0xa18, 0x00881117}, {0xa1c, 0x89140f00},
+       {0xa20, 0x1a1b0000}, {0xa24, 0x090e1317},
+       {0xa28, 0x00000204}, {0xa2c, 0x00d30000},
+       {0xa70, 0x101fbf00}, {0xa74, 0x00000007},
+       {0xc00, 0x48071d40}, {0xc04, 0x03a05633},
+       {0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c},
+       {0xc10, 0x08800000}, {0xc14, 0x40000100},
+       {0xc18, 0x08800000}, {0xc1c, 0x40000100},
+       {0xc20, 0x00000000}, {0xc24, 0x00000000},
+       {0xc28, 0x00000000}, {0xc2c, 0x00000000},
+       {0xc30, 0x69e9ac44}, {0xc34, 0x469652cf},
+       {0xc38, 0x49795994}, {0xc3c, 0x0a97971c},
+       {0xc40, 0x1f7c403f}, {0xc44, 0x000100b7},
+       {0xc48, 0xec020107}, {0xc4c, 0x007f037f},
+       {0xc50, 0x69543420}, {0xc54, 0x43bc0094},
+       {0xc58, 0x69543420}, {0xc5c, 0x433c0094},
+       {0xc60, 0x00000000}, {0xc64, 0x5116848b},
+       {0xc68, 0x47c00bff}, {0xc6c, 0x00000036},
+       {0xc70, 0x2c7f000d}, {0xc74, 0x2186115b},
+       {0xc78, 0x0000001f}, {0xc7c, 0x00b99612},
+       {0xc80, 0x40000100}, {0xc84, 0x20f60000},
+       {0xc88, 0x40000100}, {0xc8c, 0xa0e40000},
+       {0xc90, 0x00121820}, {0xc94, 0x00000000},
+       {0xc98, 0x00121820}, {0xc9c, 0x00007f7f},
+       {0xca0, 0x00000000}, {0xca4, 0x00000080},
+       {0xca8, 0x00000000}, {0xcac, 0x00000000},
+       {0xcb0, 0x00000000}, {0xcb4, 0x00000000},
+       {0xcb8, 0x00000000}, {0xcbc, 0x28000000},
+       {0xcc0, 0x00000000}, {0xcc4, 0x00000000},
+       {0xcc8, 0x00000000}, {0xccc, 0x00000000},
+       {0xcd0, 0x00000000}, {0xcd4, 0x00000000},
+       {0xcd8, 0x64b22427}, {0xcdc, 0x00766932},
+       {0xce0, 0x00222222}, {0xce4, 0x00000000},
+       {0xce8, 0x37644302}, {0xcec, 0x2f97d40c},
+       {0xd00, 0x00080740}, {0xd04, 0x00020403},
+       {0xd08, 0x0000907f}, {0xd0c, 0x20010201},
+       {0xd10, 0xa0633333}, {0xd14, 0x3333bc43},
+       {0xd18, 0x7a8f5b6b}, {0xd2c, 0xcc979975},
+       {0xd30, 0x00000000}, {0xd34, 0x80608000},
+       {0xd38, 0x00000000}, {0xd3c, 0x00027293},
+       {0xd40, 0x00000000}, {0xd44, 0x00000000},
+       {0xd48, 0x00000000}, {0xd4c, 0x00000000},
+       {0xd50, 0x6437140a}, {0xd54, 0x00000000},
+       {0xd58, 0x00000000}, {0xd5c, 0x30032064},
+       {0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
+       {0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
+       {0xd70, 0x1812362e}, {0xd74, 0x322c2220},
+       {0xd78, 0x000e3c24}, {0xe00, 0x2a2a2a2a},
+       {0xe04, 0x2a2a2a2a}, {0xe08, 0x03902a2a},
+       {0xe10, 0x2a2a2a2a}, {0xe14, 0x2a2a2a2a},
+       {0xe18, 0x2a2a2a2a}, {0xe1c, 0x2a2a2a2a},
+       {0xe28, 0x00000000}, {0xe30, 0x1000dc1f},
+       {0xe34, 0x10008c1f}, {0xe38, 0x02140102},
+       {0xe3c, 0x681604c2}, {0xe40, 0x01007c00},
+       {0xe44, 0x01004800}, {0xe48, 0xfb000000},
+       {0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f},
+       {0xe54, 0x10008c1f}, {0xe58, 0x02140102},
+       {0xe5c, 0x28160d05}, {0xe60, 0x00000010},
+       {0xe68, 0x001b25a4}, {0xe6c, 0x63db25a4},
+       {0xe70, 0x63db25a4}, {0xe74, 0x0c1b25a4},
+       {0xe78, 0x0c1b25a4}, {0xe7c, 0x0c1b25a4},
+       {0xe80, 0x0c1b25a4}, {0xe84, 0x63db25a4},
+       {0xe88, 0x0c1b25a4}, {0xe8c, 0x63db25a4},
+       {0xed0, 0x63db25a4}, {0xed4, 0x63db25a4},
+       {0xed8, 0x63db25a4}, {0xedc, 0x001b25a4},
+       {0xee0, 0x001b25a4}, {0xeec, 0x6fdb25a4},
+       {0xf14, 0x00000003}, {0xf4c, 0x00000000},
+       {0xf00, 0x00000300},
+       {0xffff, 0xffffffff},
+};
+
+static struct rtl8xxxu_reg32val rtl8188ru_phy_1t_highpa_table[] = {
+       {0x024, 0x0011800f}, {0x028, 0x00ffdb83},
+       {0x040, 0x000c0004}, {0x800, 0x80040000},
+       {0x804, 0x00000001}, {0x808, 0x0000fc00},
+       {0x80c, 0x0000000a}, {0x810, 0x10005388},
+       {0x814, 0x020c3d10}, {0x818, 0x02200385},
+       {0x81c, 0x00000000}, {0x820, 0x01000100},
+       {0x824, 0x00390204}, {0x828, 0x00000000},
+       {0x82c, 0x00000000}, {0x830, 0x00000000},
+       {0x834, 0x00000000}, {0x838, 0x00000000},
+       {0x83c, 0x00000000}, {0x840, 0x00010000},
+       {0x844, 0x00000000}, {0x848, 0x00000000},
+       {0x84c, 0x00000000}, {0x850, 0x00000000},
+       {0x854, 0x00000000}, {0x858, 0x569a569a},
+       {0x85c, 0x001b25a4}, {0x860, 0x66e60230},
+       {0x864, 0x061f0130}, {0x868, 0x00000000},
+       {0x86c, 0x20202000}, {0x870, 0x03000300},
+       {0x874, 0x22004000}, {0x878, 0x00000808},
+       {0x87c, 0x00ffc3f1}, {0x880, 0xc0083070},
+       {0x884, 0x000004d5}, {0x888, 0x00000000},
+       {0x88c, 0xccc000c0}, {0x890, 0x00000800},
+       {0x894, 0xfffffffe}, {0x898, 0x40302010},
+       {0x89c, 0x00706050}, {0x900, 0x00000000},
+       {0x904, 0x00000023}, {0x908, 0x00000000},
+       {0x90c, 0x81121111}, {0xa00, 0x00d047c8},
+       {0xa04, 0x80ff000c}, {0xa08, 0x8c838300},
+       {0xa0c, 0x2e68120f}, {0xa10, 0x9500bb78},
+       {0xa14, 0x11144028}, {0xa18, 0x00881117},
+       {0xa1c, 0x89140f00}, {0xa20, 0x15160000},
+       {0xa24, 0x070b0f12}, {0xa28, 0x00000104},
+       {0xa2c, 0x00d30000}, {0xa70, 0x101fbf00},
+       {0xa74, 0x00000007}, {0xc00, 0x48071d40},
+       {0xc04, 0x03a05611}, {0xc08, 0x000000e4},
+       {0xc0c, 0x6c6c6c6c}, {0xc10, 0x08800000},
+       {0xc14, 0x40000100}, {0xc18, 0x08800000},
+       {0xc1c, 0x40000100}, {0xc20, 0x00000000},
+       {0xc24, 0x00000000}, {0xc28, 0x00000000},
+       {0xc2c, 0x00000000}, {0xc30, 0x69e9ac44},
+       {0xc34, 0x469652cf}, {0xc38, 0x49795994},
+       {0xc3c, 0x0a97971c}, {0xc40, 0x1f7c403f},
+       {0xc44, 0x000100b7}, {0xc48, 0xec020107},
+       {0xc4c, 0x007f037f}, {0xc50, 0x6954342e},
+       {0xc54, 0x43bc0094}, {0xc58, 0x6954342f},
+       {0xc5c, 0x433c0094}, {0xc60, 0x00000000},
+       {0xc64, 0x5116848b}, {0xc68, 0x47c00bff},
+       {0xc6c, 0x00000036}, {0xc70, 0x2c46000d},
+       {0xc74, 0x018610db}, {0xc78, 0x0000001f},
+       {0xc7c, 0x00b91612}, {0xc80, 0x24000090},
+       {0xc84, 0x20f60000}, {0xc88, 0x24000090},
+       {0xc8c, 0x20200000}, {0xc90, 0x00121820},
+       {0xc94, 0x00000000}, {0xc98, 0x00121820},
+       {0xc9c, 0x00007f7f}, {0xca0, 0x00000000},
+       {0xca4, 0x00000080}, {0xca8, 0x00000000},
+       {0xcac, 0x00000000}, {0xcb0, 0x00000000},
+       {0xcb4, 0x00000000}, {0xcb8, 0x00000000},
+       {0xcbc, 0x28000000}, {0xcc0, 0x00000000},
+       {0xcc4, 0x00000000}, {0xcc8, 0x00000000},
+       {0xccc, 0x00000000}, {0xcd0, 0x00000000},
+       {0xcd4, 0x00000000}, {0xcd8, 0x64b22427},
+       {0xcdc, 0x00766932}, {0xce0, 0x00222222},
+       {0xce4, 0x00000000}, {0xce8, 0x37644302},
+       {0xcec, 0x2f97d40c}, {0xd00, 0x00080740},
+       {0xd04, 0x00020401}, {0xd08, 0x0000907f},
+       {0xd0c, 0x20010201}, {0xd10, 0xa0633333},
+       {0xd14, 0x3333bc43}, {0xd18, 0x7a8f5b6b},
+       {0xd2c, 0xcc979975}, {0xd30, 0x00000000},
+       {0xd34, 0x80608000}, {0xd38, 0x00000000},
+       {0xd3c, 0x00027293}, {0xd40, 0x00000000},
+       {0xd44, 0x00000000}, {0xd48, 0x00000000},
+       {0xd4c, 0x00000000}, {0xd50, 0x6437140a},
+       {0xd54, 0x00000000}, {0xd58, 0x00000000},
+       {0xd5c, 0x30032064}, {0xd60, 0x4653de68},
+       {0xd64, 0x04518a3c}, {0xd68, 0x00002101},
+       {0xd6c, 0x2a201c16}, {0xd70, 0x1812362e},
+       {0xd74, 0x322c2220}, {0xd78, 0x000e3c24},
+       {0xe00, 0x24242424}, {0xe04, 0x24242424},
+       {0xe08, 0x03902024}, {0xe10, 0x24242424},
+       {0xe14, 0x24242424}, {0xe18, 0x24242424},
+       {0xe1c, 0x24242424}, {0xe28, 0x00000000},
+       {0xe30, 0x1000dc1f}, {0xe34, 0x10008c1f},
+       {0xe38, 0x02140102}, {0xe3c, 0x681604c2},
+       {0xe40, 0x01007c00}, {0xe44, 0x01004800},
+       {0xe48, 0xfb000000}, {0xe4c, 0x000028d1},
+       {0xe50, 0x1000dc1f}, {0xe54, 0x10008c1f},
+       {0xe58, 0x02140102}, {0xe5c, 0x28160d05},
+       {0xe60, 0x00000008}, {0xe68, 0x001b25a4},
+       {0xe6c, 0x631b25a0}, {0xe70, 0x631b25a0},
+       {0xe74, 0x081b25a0}, {0xe78, 0x081b25a0},
+       {0xe7c, 0x081b25a0}, {0xe80, 0x081b25a0},
+       {0xe84, 0x631b25a0}, {0xe88, 0x081b25a0},
+       {0xe8c, 0x631b25a0}, {0xed0, 0x631b25a0},
+       {0xed4, 0x631b25a0}, {0xed8, 0x631b25a0},
+       {0xedc, 0x001b25a0}, {0xee0, 0x001b25a0},
+       {0xeec, 0x6b1b25a0}, {0xee8, 0x31555448},
+       {0xf14, 0x00000003}, {0xf4c, 0x00000000},
+       {0xf00, 0x00000300},
+       {0xffff, 0xffffffff},
+};
+
+static struct rtl8xxxu_reg32val rtl8xxx_agc_standard_table[] = {
+       {0xc78, 0x7b000001}, {0xc78, 0x7b010001},
+       {0xc78, 0x7b020001}, {0xc78, 0x7b030001},
+       {0xc78, 0x7b040001}, {0xc78, 0x7b050001},
+       {0xc78, 0x7a060001}, {0xc78, 0x79070001},
+       {0xc78, 0x78080001}, {0xc78, 0x77090001},
+       {0xc78, 0x760a0001}, {0xc78, 0x750b0001},
+       {0xc78, 0x740c0001}, {0xc78, 0x730d0001},
+       {0xc78, 0x720e0001}, {0xc78, 0x710f0001},
+       {0xc78, 0x70100001}, {0xc78, 0x6f110001},
+       {0xc78, 0x6e120001}, {0xc78, 0x6d130001},
+       {0xc78, 0x6c140001}, {0xc78, 0x6b150001},
+       {0xc78, 0x6a160001}, {0xc78, 0x69170001},
+       {0xc78, 0x68180001}, {0xc78, 0x67190001},
+       {0xc78, 0x661a0001}, {0xc78, 0x651b0001},
+       {0xc78, 0x641c0001}, {0xc78, 0x631d0001},
+       {0xc78, 0x621e0001}, {0xc78, 0x611f0001},
+       {0xc78, 0x60200001}, {0xc78, 0x49210001},
+       {0xc78, 0x48220001}, {0xc78, 0x47230001},
+       {0xc78, 0x46240001}, {0xc78, 0x45250001},
+       {0xc78, 0x44260001}, {0xc78, 0x43270001},
+       {0xc78, 0x42280001}, {0xc78, 0x41290001},
+       {0xc78, 0x402a0001}, {0xc78, 0x262b0001},
+       {0xc78, 0x252c0001}, {0xc78, 0x242d0001},
+       {0xc78, 0x232e0001}, {0xc78, 0x222f0001},
+       {0xc78, 0x21300001}, {0xc78, 0x20310001},
+       {0xc78, 0x06320001}, {0xc78, 0x05330001},
+       {0xc78, 0x04340001}, {0xc78, 0x03350001},
+       {0xc78, 0x02360001}, {0xc78, 0x01370001},
+       {0xc78, 0x00380001}, {0xc78, 0x00390001},
+       {0xc78, 0x003a0001}, {0xc78, 0x003b0001},
+       {0xc78, 0x003c0001}, {0xc78, 0x003d0001},
+       {0xc78, 0x003e0001}, {0xc78, 0x003f0001},
+       {0xc78, 0x7b400001}, {0xc78, 0x7b410001},
+       {0xc78, 0x7b420001}, {0xc78, 0x7b430001},
+       {0xc78, 0x7b440001}, {0xc78, 0x7b450001},
+       {0xc78, 0x7a460001}, {0xc78, 0x79470001},
+       {0xc78, 0x78480001}, {0xc78, 0x77490001},
+       {0xc78, 0x764a0001}, {0xc78, 0x754b0001},
+       {0xc78, 0x744c0001}, {0xc78, 0x734d0001},
+       {0xc78, 0x724e0001}, {0xc78, 0x714f0001},
+       {0xc78, 0x70500001}, {0xc78, 0x6f510001},
+       {0xc78, 0x6e520001}, {0xc78, 0x6d530001},
+       {0xc78, 0x6c540001}, {0xc78, 0x6b550001},
+       {0xc78, 0x6a560001}, {0xc78, 0x69570001},
+       {0xc78, 0x68580001}, {0xc78, 0x67590001},
+       {0xc78, 0x665a0001}, {0xc78, 0x655b0001},
+       {0xc78, 0x645c0001}, {0xc78, 0x635d0001},
+       {0xc78, 0x625e0001}, {0xc78, 0x615f0001},
+       {0xc78, 0x60600001}, {0xc78, 0x49610001},
+       {0xc78, 0x48620001}, {0xc78, 0x47630001},
+       {0xc78, 0x46640001}, {0xc78, 0x45650001},
+       {0xc78, 0x44660001}, {0xc78, 0x43670001},
+       {0xc78, 0x42680001}, {0xc78, 0x41690001},
+       {0xc78, 0x406a0001}, {0xc78, 0x266b0001},
+       {0xc78, 0x256c0001}, {0xc78, 0x246d0001},
+       {0xc78, 0x236e0001}, {0xc78, 0x226f0001},
+       {0xc78, 0x21700001}, {0xc78, 0x20710001},
+       {0xc78, 0x06720001}, {0xc78, 0x05730001},
+       {0xc78, 0x04740001}, {0xc78, 0x03750001},
+       {0xc78, 0x02760001}, {0xc78, 0x01770001},
+       {0xc78, 0x00780001}, {0xc78, 0x00790001},
+       {0xc78, 0x007a0001}, {0xc78, 0x007b0001},
+       {0xc78, 0x007c0001}, {0xc78, 0x007d0001},
+       {0xc78, 0x007e0001}, {0xc78, 0x007f0001},
+       {0xc78, 0x3800001e}, {0xc78, 0x3801001e},
+       {0xc78, 0x3802001e}, {0xc78, 0x3803001e},
+       {0xc78, 0x3804001e}, {0xc78, 0x3805001e},
+       {0xc78, 0x3806001e}, {0xc78, 0x3807001e},
+       {0xc78, 0x3808001e}, {0xc78, 0x3c09001e},
+       {0xc78, 0x3e0a001e}, {0xc78, 0x400b001e},
+       {0xc78, 0x440c001e}, {0xc78, 0x480d001e},
+       {0xc78, 0x4c0e001e}, {0xc78, 0x500f001e},
+       {0xc78, 0x5210001e}, {0xc78, 0x5611001e},
+       {0xc78, 0x5a12001e}, {0xc78, 0x5e13001e},
+       {0xc78, 0x6014001e}, {0xc78, 0x6015001e},
+       {0xc78, 0x6016001e}, {0xc78, 0x6217001e},
+       {0xc78, 0x6218001e}, {0xc78, 0x6219001e},
+       {0xc78, 0x621a001e}, {0xc78, 0x621b001e},
+       {0xc78, 0x621c001e}, {0xc78, 0x621d001e},
+       {0xc78, 0x621e001e}, {0xc78, 0x621f001e},
+       {0xffff, 0xffffffff}
+};
+
+static struct rtl8xxxu_reg32val rtl8xxx_agc_highpa_table[] = {
+       {0xc78, 0x7b000001}, {0xc78, 0x7b010001},
+       {0xc78, 0x7b020001}, {0xc78, 0x7b030001},
+       {0xc78, 0x7b040001}, {0xc78, 0x7b050001},
+       {0xc78, 0x7b060001}, {0xc78, 0x7b070001},
+       {0xc78, 0x7b080001}, {0xc78, 0x7a090001},
+       {0xc78, 0x790a0001}, {0xc78, 0x780b0001},
+       {0xc78, 0x770c0001}, {0xc78, 0x760d0001},
+       {0xc78, 0x750e0001}, {0xc78, 0x740f0001},
+       {0xc78, 0x73100001}, {0xc78, 0x72110001},
+       {0xc78, 0x71120001}, {0xc78, 0x70130001},
+       {0xc78, 0x6f140001}, {0xc78, 0x6e150001},
+       {0xc78, 0x6d160001}, {0xc78, 0x6c170001},
+       {0xc78, 0x6b180001}, {0xc78, 0x6a190001},
+       {0xc78, 0x691a0001}, {0xc78, 0x681b0001},
+       {0xc78, 0x671c0001}, {0xc78, 0x661d0001},
+       {0xc78, 0x651e0001}, {0xc78, 0x641f0001},
+       {0xc78, 0x63200001}, {0xc78, 0x62210001},
+       {0xc78, 0x61220001}, {0xc78, 0x60230001},
+       {0xc78, 0x46240001}, {0xc78, 0x45250001},
+       {0xc78, 0x44260001}, {0xc78, 0x43270001},
+       {0xc78, 0x42280001}, {0xc78, 0x41290001},
+       {0xc78, 0x402a0001}, {0xc78, 0x262b0001},
+       {0xc78, 0x252c0001}, {0xc78, 0x242d0001},
+       {0xc78, 0x232e0001}, {0xc78, 0x222f0001},
+       {0xc78, 0x21300001}, {0xc78, 0x20310001},
+       {0xc78, 0x06320001}, {0xc78, 0x05330001},
+       {0xc78, 0x04340001}, {0xc78, 0x03350001},
+       {0xc78, 0x02360001}, {0xc78, 0x01370001},
+       {0xc78, 0x00380001}, {0xc78, 0x00390001},
+       {0xc78, 0x003a0001}, {0xc78, 0x003b0001},
+       {0xc78, 0x003c0001}, {0xc78, 0x003d0001},
+       {0xc78, 0x003e0001}, {0xc78, 0x003f0001},
+       {0xc78, 0x7b400001}, {0xc78, 0x7b410001},
+       {0xc78, 0x7b420001}, {0xc78, 0x7b430001},
+       {0xc78, 0x7b440001}, {0xc78, 0x7b450001},
+       {0xc78, 0x7b460001}, {0xc78, 0x7b470001},
+       {0xc78, 0x7b480001}, {0xc78, 0x7a490001},
+       {0xc78, 0x794a0001}, {0xc78, 0x784b0001},
+       {0xc78, 0x774c0001}, {0xc78, 0x764d0001},
+       {0xc78, 0x754e0001}, {0xc78, 0x744f0001},
+       {0xc78, 0x73500001}, {0xc78, 0x72510001},
+       {0xc78, 0x71520001}, {0xc78, 0x70530001},
+       {0xc78, 0x6f540001}, {0xc78, 0x6e550001},
+       {0xc78, 0x6d560001}, {0xc78, 0x6c570001},
+       {0xc78, 0x6b580001}, {0xc78, 0x6a590001},
+       {0xc78, 0x695a0001}, {0xc78, 0x685b0001},
+       {0xc78, 0x675c0001}, {0xc78, 0x665d0001},
+       {0xc78, 0x655e0001}, {0xc78, 0x645f0001},
+       {0xc78, 0x63600001}, {0xc78, 0x62610001},
+       {0xc78, 0x61620001}, {0xc78, 0x60630001},
+       {0xc78, 0x46640001}, {0xc78, 0x45650001},
+       {0xc78, 0x44660001}, {0xc78, 0x43670001},
+       {0xc78, 0x42680001}, {0xc78, 0x41690001},
+       {0xc78, 0x406a0001}, {0xc78, 0x266b0001},
+       {0xc78, 0x256c0001}, {0xc78, 0x246d0001},
+       {0xc78, 0x236e0001}, {0xc78, 0x226f0001},
+       {0xc78, 0x21700001}, {0xc78, 0x20710001},
+       {0xc78, 0x06720001}, {0xc78, 0x05730001},
+       {0xc78, 0x04740001}, {0xc78, 0x03750001},
+       {0xc78, 0x02760001}, {0xc78, 0x01770001},
+       {0xc78, 0x00780001}, {0xc78, 0x00790001},
+       {0xc78, 0x007a0001}, {0xc78, 0x007b0001},
+       {0xc78, 0x007c0001}, {0xc78, 0x007d0001},
+       {0xc78, 0x007e0001}, {0xc78, 0x007f0001},
+       {0xc78, 0x3800001e}, {0xc78, 0x3801001e},
+       {0xc78, 0x3802001e}, {0xc78, 0x3803001e},
+       {0xc78, 0x3804001e}, {0xc78, 0x3805001e},
+       {0xc78, 0x3806001e}, {0xc78, 0x3807001e},
+       {0xc78, 0x3808001e}, {0xc78, 0x3c09001e},
+       {0xc78, 0x3e0a001e}, {0xc78, 0x400b001e},
+       {0xc78, 0x440c001e}, {0xc78, 0x480d001e},
+       {0xc78, 0x4c0e001e}, {0xc78, 0x500f001e},
+       {0xc78, 0x5210001e}, {0xc78, 0x5611001e},
+       {0xc78, 0x5a12001e}, {0xc78, 0x5e13001e},
+       {0xc78, 0x6014001e}, {0xc78, 0x6015001e},
+       {0xc78, 0x6016001e}, {0xc78, 0x6217001e},
+       {0xc78, 0x6218001e}, {0xc78, 0x6219001e},
+       {0xc78, 0x621a001e}, {0xc78, 0x621b001e},
+       {0xc78, 0x621c001e}, {0xc78, 0x621d001e},
+       {0xc78, 0x621e001e}, {0xc78, 0x621f001e},
+       {0xffff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = {
+       {0x00, 0x00030159}, {0x01, 0x00031284},
+       {0x02, 0x00098000}, {0x03, 0x00039c63},
+       {0x04, 0x000210e7}, {0x09, 0x0002044f},
+       {0x0a, 0x0001a3f1}, {0x0b, 0x00014787},
+       {0x0c, 0x000896fe}, {0x0d, 0x0000e02c},
+       {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+       {0x19, 0x00000000}, {0x1a, 0x00030355},
+       {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+       {0x1d, 0x000a1250}, {0x1e, 0x0000024f},
+       {0x1f, 0x00000000}, {0x20, 0x0000b614},
+       {0x21, 0x0006c000}, {0x22, 0x00000000},
+       {0x23, 0x00001558}, {0x24, 0x00000060},
+       {0x25, 0x00000483}, {0x26, 0x0004f000},
+       {0x27, 0x000ec7d9}, {0x28, 0x00057730},
+       {0x29, 0x00004783}, {0x2a, 0x00000001},
+       {0x2b, 0x00021334}, {0x2a, 0x00000000},
+       {0x2b, 0x00000054}, {0x2a, 0x00000001},
+       {0x2b, 0x00000808}, {0x2b, 0x00053333},
+       {0x2c, 0x0000000c}, {0x2a, 0x00000002},
+       {0x2b, 0x00000808}, {0x2b, 0x0005b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000003},
+       {0x2b, 0x00000808}, {0x2b, 0x00063333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000004},
+       {0x2b, 0x00000808}, {0x2b, 0x0006b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000005},
+       {0x2b, 0x00000808}, {0x2b, 0x00073333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000006},
+       {0x2b, 0x00000709}, {0x2b, 0x0005b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000007},
+       {0x2b, 0x00000709}, {0x2b, 0x00063333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000008},
+       {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000009},
+       {0x2b, 0x0000060a}, {0x2b, 0x00053333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+       {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+       {0x2b, 0x0000060a}, {0x2b, 0x00063333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+       {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+       {0x2b, 0x0000060a}, {0x2b, 0x00073333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+       {0x2b, 0x0000050b}, {0x2b, 0x00066666},
+       {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+       {0x10, 0x0004000f}, {0x11, 0x000e31fc},
+       {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+       {0x10, 0x0002000f}, {0x11, 0x000203f9},
+       {0x10, 0x0003000f}, {0x11, 0x000ff500},
+       {0x10, 0x00000000}, {0x11, 0x00000000},
+       {0x10, 0x0008000f}, {0x11, 0x0003f100},
+       {0x10, 0x0009000f}, {0x11, 0x00023100},
+       {0x12, 0x00032000}, {0x12, 0x00071000},
+       {0x12, 0x000b0000}, {0x12, 0x000fc000},
+       {0x13, 0x000287b3}, {0x13, 0x000244b7},
+       {0x13, 0x000204ab}, {0x13, 0x0001c49f},
+       {0x13, 0x00018493}, {0x13, 0x0001429b},
+       {0x13, 0x00010299}, {0x13, 0x0000c29c},
+       {0x13, 0x000081a0}, {0x13, 0x000040ac},
+       {0x13, 0x00000020}, {0x14, 0x0001944c},
+       {0x14, 0x00059444}, {0x14, 0x0009944c},
+       {0x14, 0x000d9444}, {0x15, 0x0000f474},
+       {0x15, 0x0004f477}, {0x15, 0x0008f455},
+       {0x15, 0x000cf455}, {0x16, 0x00000339},
+       {0x16, 0x00040339}, {0x16, 0x00080339},
+       {0x16, 0x000c0366}, {0x00, 0x00010159},
+       {0x18, 0x0000f401}, {0xfe, 0x00000000},
+       {0xfe, 0x00000000}, {0x1f, 0x00000003},
+       {0xfe, 0x00000000}, {0xfe, 0x00000000},
+       {0x1e, 0x00000247}, {0x1f, 0x00000000},
+       {0x00, 0x00030159},
+       {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = {
+       {0x00, 0x00030159}, {0x01, 0x00031284},
+       {0x02, 0x00098000}, {0x03, 0x00018c63},
+       {0x04, 0x000210e7}, {0x09, 0x0002044f},
+       {0x0a, 0x0001adb1}, {0x0b, 0x00054867},
+       {0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
+       {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+       {0x19, 0x00000000}, {0x1a, 0x00010255},
+       {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+       {0x1d, 0x000a1250}, {0x1e, 0x0004445f},
+       {0x1f, 0x00080001}, {0x20, 0x0000b614},
+       {0x21, 0x0006c000}, {0x22, 0x00000000},
+       {0x23, 0x00001558}, {0x24, 0x00000060},
+       {0x25, 0x00000483}, {0x26, 0x0004f000},
+       {0x27, 0x000ec7d9}, {0x28, 0x000577c0},
+       {0x29, 0x00004783}, {0x2a, 0x00000001},
+       {0x2b, 0x00021334}, {0x2a, 0x00000000},
+       {0x2b, 0x00000054}, {0x2a, 0x00000001},
+       {0x2b, 0x00000808}, {0x2b, 0x00053333},
+       {0x2c, 0x0000000c}, {0x2a, 0x00000002},
+       {0x2b, 0x00000808}, {0x2b, 0x0005b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000003},
+       {0x2b, 0x00000808}, {0x2b, 0x00063333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000004},
+       {0x2b, 0x00000808}, {0x2b, 0x0006b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000005},
+       {0x2b, 0x00000808}, {0x2b, 0x00073333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000006},
+       {0x2b, 0x00000709}, {0x2b, 0x0005b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000007},
+       {0x2b, 0x00000709}, {0x2b, 0x00063333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000008},
+       {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000009},
+       {0x2b, 0x0000060a}, {0x2b, 0x00053333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+       {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+       {0x2b, 0x0000060a}, {0x2b, 0x00063333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+       {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+       {0x2b, 0x0000060a}, {0x2b, 0x00073333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+       {0x2b, 0x0000050b}, {0x2b, 0x00066666},
+       {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+       {0x10, 0x0004000f}, {0x11, 0x000e31fc},
+       {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+       {0x10, 0x0002000f}, {0x11, 0x000203f9},
+       {0x10, 0x0003000f}, {0x11, 0x000ff500},
+       {0x10, 0x00000000}, {0x11, 0x00000000},
+       {0x10, 0x0008000f}, {0x11, 0x0003f100},
+       {0x10, 0x0009000f}, {0x11, 0x00023100},
+       {0x12, 0x00032000}, {0x12, 0x00071000},
+       {0x12, 0x000b0000}, {0x12, 0x000fc000},
+       {0x13, 0x000287b3}, {0x13, 0x000244b7},
+       {0x13, 0x000204ab}, {0x13, 0x0001c49f},
+       {0x13, 0x00018493}, {0x13, 0x0001429b},
+       {0x13, 0x00010299}, {0x13, 0x0000c29c},
+       {0x13, 0x000081a0}, {0x13, 0x000040ac},
+       {0x13, 0x00000020}, {0x14, 0x0001944c},
+       {0x14, 0x00059444}, {0x14, 0x0009944c},
+       {0x14, 0x000d9444}, {0x15, 0x0000f424},
+       {0x15, 0x0004f424}, {0x15, 0x0008f424},
+       {0x15, 0x000cf424}, {0x16, 0x000e0330},
+       {0x16, 0x000a0330}, {0x16, 0x00060330},
+       {0x16, 0x00020330}, {0x00, 0x00010159},
+       {0x18, 0x0000f401}, {0xfe, 0x00000000},
+       {0xfe, 0x00000000}, {0x1f, 0x00080003},
+       {0xfe, 0x00000000}, {0xfe, 0x00000000},
+       {0x1e, 0x00044457}, {0x1f, 0x00080000},
+       {0x00, 0x00030159},
+       {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192cu_radiob_2t_init_table[] = {
+       {0x00, 0x00030159}, {0x01, 0x00031284},
+       {0x02, 0x00098000}, {0x03, 0x00018c63},
+       {0x04, 0x000210e7}, {0x09, 0x0002044f},
+       {0x0a, 0x0001adb1}, {0x0b, 0x00054867},
+       {0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
+       {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+       {0x12, 0x00032000}, {0x12, 0x00071000},
+       {0x12, 0x000b0000}, {0x12, 0x000fc000},
+       {0x13, 0x000287af}, {0x13, 0x000244b7},
+       {0x13, 0x000204ab}, {0x13, 0x0001c49f},
+       {0x13, 0x00018493}, {0x13, 0x00014297},
+       {0x13, 0x00010295}, {0x13, 0x0000c298},
+       {0x13, 0x0000819c}, {0x13, 0x000040a8},
+       {0x13, 0x0000001c}, {0x14, 0x0001944c},
+       {0x14, 0x00059444}, {0x14, 0x0009944c},
+       {0x14, 0x000d9444}, {0x15, 0x0000f424},
+       {0x15, 0x0004f424}, {0x15, 0x0008f424},
+       {0x15, 0x000cf424}, {0x16, 0x000e0330},
+       {0x16, 0x000a0330}, {0x16, 0x00060330},
+       {0x16, 0x00020330},
+       {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192cu_radioa_1t_init_table[] = {
+       {0x00, 0x00030159}, {0x01, 0x00031284},
+       {0x02, 0x00098000}, {0x03, 0x00018c63},
+       {0x04, 0x000210e7}, {0x09, 0x0002044f},
+       {0x0a, 0x0001adb1}, {0x0b, 0x00054867},
+       {0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
+       {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+       {0x19, 0x00000000}, {0x1a, 0x00010255},
+       {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+       {0x1d, 0x000a1250}, {0x1e, 0x0004445f},
+       {0x1f, 0x00080001}, {0x20, 0x0000b614},
+       {0x21, 0x0006c000}, {0x22, 0x00000000},
+       {0x23, 0x00001558}, {0x24, 0x00000060},
+       {0x25, 0x00000483}, {0x26, 0x0004f000},
+       {0x27, 0x000ec7d9}, {0x28, 0x000577c0},
+       {0x29, 0x00004783}, {0x2a, 0x00000001},
+       {0x2b, 0x00021334}, {0x2a, 0x00000000},
+       {0x2b, 0x00000054}, {0x2a, 0x00000001},
+       {0x2b, 0x00000808}, {0x2b, 0x00053333},
+       {0x2c, 0x0000000c}, {0x2a, 0x00000002},
+       {0x2b, 0x00000808}, {0x2b, 0x0005b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000003},
+       {0x2b, 0x00000808}, {0x2b, 0x00063333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000004},
+       {0x2b, 0x00000808}, {0x2b, 0x0006b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000005},
+       {0x2b, 0x00000808}, {0x2b, 0x00073333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000006},
+       {0x2b, 0x00000709}, {0x2b, 0x0005b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000007},
+       {0x2b, 0x00000709}, {0x2b, 0x00063333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000008},
+       {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000009},
+       {0x2b, 0x0000060a}, {0x2b, 0x00053333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+       {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+       {0x2b, 0x0000060a}, {0x2b, 0x00063333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+       {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+       {0x2b, 0x0000060a}, {0x2b, 0x00073333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+       {0x2b, 0x0000050b}, {0x2b, 0x00066666},
+       {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+       {0x10, 0x0004000f}, {0x11, 0x000e31fc},
+       {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+       {0x10, 0x0002000f}, {0x11, 0x000203f9},
+       {0x10, 0x0003000f}, {0x11, 0x000ff500},
+       {0x10, 0x00000000}, {0x11, 0x00000000},
+       {0x10, 0x0008000f}, {0x11, 0x0003f100},
+       {0x10, 0x0009000f}, {0x11, 0x00023100},
+       {0x12, 0x00032000}, {0x12, 0x00071000},
+       {0x12, 0x000b0000}, {0x12, 0x000fc000},
+       {0x13, 0x000287b3}, {0x13, 0x000244b7},
+       {0x13, 0x000204ab}, {0x13, 0x0001c49f},
+       {0x13, 0x00018493}, {0x13, 0x0001429b},
+       {0x13, 0x00010299}, {0x13, 0x0000c29c},
+       {0x13, 0x000081a0}, {0x13, 0x000040ac},
+       {0x13, 0x00000020}, {0x14, 0x0001944c},
+       {0x14, 0x00059444}, {0x14, 0x0009944c},
+       {0x14, 0x000d9444}, {0x15, 0x0000f405},
+       {0x15, 0x0004f405}, {0x15, 0x0008f405},
+       {0x15, 0x000cf405}, {0x16, 0x000e0330},
+       {0x16, 0x000a0330}, {0x16, 0x00060330},
+       {0x16, 0x00020330}, {0x00, 0x00010159},
+       {0x18, 0x0000f401}, {0xfe, 0x00000000},
+       {0xfe, 0x00000000}, {0x1f, 0x00080003},
+       {0xfe, 0x00000000}, {0xfe, 0x00000000},
+       {0x1e, 0x00044457}, {0x1f, 0x00080000},
+       {0x00, 0x00030159},
+       {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8188ru_radioa_1t_highpa_table[] = {
+       {0x00, 0x00030159}, {0x01, 0x00031284},
+       {0x02, 0x00098000}, {0x03, 0x00018c63},
+       {0x04, 0x000210e7}, {0x09, 0x0002044f},
+       {0x0a, 0x0001adb0}, {0x0b, 0x00054867},
+       {0x0c, 0x0008992e}, {0x0d, 0x0000e529},
+       {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+       {0x19, 0x00000000}, {0x1a, 0x00000255},
+       {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+       {0x1d, 0x000a1250}, {0x1e, 0x0004445f},
+       {0x1f, 0x00080001}, {0x20, 0x0000b614},
+       {0x21, 0x0006c000}, {0x22, 0x0000083c},
+       {0x23, 0x00001558}, {0x24, 0x00000060},
+       {0x25, 0x00000483}, {0x26, 0x0004f000},
+       {0x27, 0x000ec7d9}, {0x28, 0x000977c0},
+       {0x29, 0x00004783}, {0x2a, 0x00000001},
+       {0x2b, 0x00021334}, {0x2a, 0x00000000},
+       {0x2b, 0x00000054}, {0x2a, 0x00000001},
+       {0x2b, 0x00000808}, {0x2b, 0x00053333},
+       {0x2c, 0x0000000c}, {0x2a, 0x00000002},
+       {0x2b, 0x00000808}, {0x2b, 0x0005b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000003},
+       {0x2b, 0x00000808}, {0x2b, 0x00063333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000004},
+       {0x2b, 0x00000808}, {0x2b, 0x0006b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000005},
+       {0x2b, 0x00000808}, {0x2b, 0x00073333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000006},
+       {0x2b, 0x00000709}, {0x2b, 0x0005b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000007},
+       {0x2b, 0x00000709}, {0x2b, 0x00063333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000008},
+       {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x00000009},
+       {0x2b, 0x0000060a}, {0x2b, 0x00053333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+       {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+       {0x2b, 0x0000060a}, {0x2b, 0x00063333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+       {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+       {0x2b, 0x0000060a}, {0x2b, 0x00073333},
+       {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+       {0x2b, 0x0000050b}, {0x2b, 0x00066666},
+       {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+       {0x10, 0x0004000f}, {0x11, 0x000e31fc},
+       {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+       {0x10, 0x0002000f}, {0x11, 0x000203f9},
+       {0x10, 0x0003000f}, {0x11, 0x000ff500},
+       {0x10, 0x00000000}, {0x11, 0x00000000},
+       {0x10, 0x0008000f}, {0x11, 0x0003f100},
+       {0x10, 0x0009000f}, {0x11, 0x00023100},
+       {0x12, 0x000d8000}, {0x12, 0x00090000},
+       {0x12, 0x00051000}, {0x12, 0x00012000},
+       {0x13, 0x00028fb4}, {0x13, 0x00024fa8},
+       {0x13, 0x000207a4}, {0x13, 0x0001c3b0},
+       {0x13, 0x000183a4}, {0x13, 0x00014398},
+       {0x13, 0x000101a4}, {0x13, 0x0000c198},
+       {0x13, 0x000080a4}, {0x13, 0x00004098},
+       {0x13, 0x00000000}, {0x14, 0x0001944c},
+       {0x14, 0x00059444}, {0x14, 0x0009944c},
+       {0x14, 0x000d9444}, {0x15, 0x0000f405},
+       {0x15, 0x0004f405}, {0x15, 0x0008f405},
+       {0x15, 0x000cf405}, {0x16, 0x000e0330},
+       {0x16, 0x000a0330}, {0x16, 0x00060330},
+       {0x16, 0x00020330}, {0x00, 0x00010159},
+       {0x18, 0x0000f401}, {0xfe, 0x00000000},
+       {0xfe, 0x00000000}, {0x1f, 0x00080003},
+       {0xfe, 0x00000000}, {0xfe, 0x00000000},
+       {0x1e, 0x00044457}, {0x1f, 0x00080000},
+       {0x00, 0x00030159},
+       {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregs rtl8xxxu_rfregs[] = {
+       {       /* RF_A */
+               .hssiparm1 = REG_FPGA0_XA_HSSI_PARM1,
+               .hssiparm2 = REG_FPGA0_XA_HSSI_PARM2,
+               .lssiparm = REG_FPGA0_XA_LSSI_PARM,
+               .hspiread = REG_HSPI_XA_READBACK,
+               .lssiread = REG_FPGA0_XA_LSSI_READBACK,
+               .rf_sw_ctrl = REG_FPGA0_XA_RF_SW_CTRL,
+       },
+       {       /* RF_B */
+               .hssiparm1 = REG_FPGA0_XB_HSSI_PARM1,
+               .hssiparm2 = REG_FPGA0_XB_HSSI_PARM2,
+               .lssiparm = REG_FPGA0_XB_LSSI_PARM,
+               .hspiread = REG_HSPI_XB_READBACK,
+               .lssiread = REG_FPGA0_XB_LSSI_READBACK,
+               .rf_sw_ctrl = REG_FPGA0_XB_RF_SW_CTRL,
+       },
+};
+
+static const u32 rtl8723au_iqk_phy_iq_bb_reg[RTL8XXXU_BB_REGS] = {
+       REG_OFDM0_XA_RX_IQ_IMBALANCE,
+       REG_OFDM0_XB_RX_IQ_IMBALANCE,
+       REG_OFDM0_ENERGY_CCA_THRES,
+       REG_OFDM0_AGCR_SSI_TABLE,
+       REG_OFDM0_XA_TX_IQ_IMBALANCE,
+       REG_OFDM0_XB_TX_IQ_IMBALANCE,
+       REG_OFDM0_XC_TX_AFE,
+       REG_OFDM0_XD_TX_AFE,
+       REG_OFDM0_RX_IQ_EXT_ANTA
+};
+
+static u8 rtl8xxxu_read8(struct rtl8xxxu_priv *priv, u16 addr)
+{
+       struct usb_device *udev = priv->udev;
+       int len;
+       u8 data;
+
+       mutex_lock(&priv->usb_buf_mutex);
+       len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+                             REALTEK_USB_CMD_REQ, REALTEK_USB_READ,
+                             addr, 0, &priv->usb_buf.val8, sizeof(u8),
+                             RTW_USB_CONTROL_MSG_TIMEOUT);
+       data = priv->usb_buf.val8;
+       mutex_unlock(&priv->usb_buf_mutex);
+
+       if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_READ)
+               dev_info(&udev->dev, "%s(%04x)   = 0x%02x, len %i\n",
+                        __func__, addr, data, len);
+       return data;
+}
+
+static u16 rtl8xxxu_read16(struct rtl8xxxu_priv *priv, u16 addr)
+{
+       struct usb_device *udev = priv->udev;
+       int len;
+       u16 data;
+
+       mutex_lock(&priv->usb_buf_mutex);
+       len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+                             REALTEK_USB_CMD_REQ, REALTEK_USB_READ,
+                             addr, 0, &priv->usb_buf.val16, sizeof(u16),
+                             RTW_USB_CONTROL_MSG_TIMEOUT);
+       data = le16_to_cpu(priv->usb_buf.val16);
+       mutex_unlock(&priv->usb_buf_mutex);
+
+       if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_READ)
+               dev_info(&udev->dev, "%s(%04x)  = 0x%04x, len %i\n",
+                        __func__, addr, data, len);
+       return data;
+}
+
+static u32 rtl8xxxu_read32(struct rtl8xxxu_priv *priv, u16 addr)
+{
+       struct usb_device *udev = priv->udev;
+       int len;
+       u32 data;
+
+       mutex_lock(&priv->usb_buf_mutex);
+       len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+                             REALTEK_USB_CMD_REQ, REALTEK_USB_READ,
+                             addr, 0, &priv->usb_buf.val32, sizeof(u32),
+                             RTW_USB_CONTROL_MSG_TIMEOUT);
+       data = le32_to_cpu(priv->usb_buf.val32);
+       mutex_unlock(&priv->usb_buf_mutex);
+
+       if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_READ)
+               dev_info(&udev->dev, "%s(%04x)  = 0x%08x, len %i\n",
+                        __func__, addr, data, len);
+       return data;
+}
+
+static int rtl8xxxu_write8(struct rtl8xxxu_priv *priv, u16 addr, u8 val)
+{
+       struct usb_device *udev = priv->udev;
+       int ret;
+
+       mutex_lock(&priv->usb_buf_mutex);
+       priv->usb_buf.val8 = val;
+       ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+                             REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+                             addr, 0, &priv->usb_buf.val8, sizeof(u8),
+                             RTW_USB_CONTROL_MSG_TIMEOUT);
+
+       mutex_unlock(&priv->usb_buf_mutex);
+
+       if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE)
+               dev_info(&udev->dev, "%s(%04x) = 0x%02x\n",
+                        __func__, addr, val);
+       return ret;
+}
+
+static int rtl8xxxu_write16(struct rtl8xxxu_priv *priv, u16 addr, u16 val)
+{
+       struct usb_device *udev = priv->udev;
+       int ret;
+
+       mutex_lock(&priv->usb_buf_mutex);
+       priv->usb_buf.val16 = cpu_to_le16(val);
+       ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+                             REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+                             addr, 0, &priv->usb_buf.val16, sizeof(u16),
+                             RTW_USB_CONTROL_MSG_TIMEOUT);
+       mutex_unlock(&priv->usb_buf_mutex);
+
+       if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE)
+               dev_info(&udev->dev, "%s(%04x) = 0x%04x\n",
+                        __func__, addr, val);
+       return ret;
+}
+
+static int rtl8xxxu_write32(struct rtl8xxxu_priv *priv, u16 addr, u32 val)
+{
+       struct usb_device *udev = priv->udev;
+       int ret;
+
+       mutex_lock(&priv->usb_buf_mutex);
+       priv->usb_buf.val32 = cpu_to_le32(val);
+       ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+                             REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+                             addr, 0, &priv->usb_buf.val32, sizeof(u32),
+                             RTW_USB_CONTROL_MSG_TIMEOUT);
+       mutex_unlock(&priv->usb_buf_mutex);
+
+       if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE)
+               dev_info(&udev->dev, "%s(%04x) = 0x%08x\n",
+                        __func__, addr, val);
+       return ret;
+}
+
+static int
+rtl8xxxu_writeN(struct rtl8xxxu_priv *priv, u16 addr, u8 *buf, u16 len)
+{
+       struct usb_device *udev = priv->udev;
+       int blocksize = priv->fops->writeN_block_size;
+       int ret, i, count, remainder;
+
+       count = len / blocksize;
+       remainder = len % blocksize;
+
+       for (i = 0; i < count; i++) {
+               ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+                                     REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+                                     addr, 0, buf, blocksize,
+                                     RTW_USB_CONTROL_MSG_TIMEOUT);
+               if (ret != blocksize)
+                       goto write_error;
+
+               addr += blocksize;
+               buf += blocksize;
+       }
+
+       if (remainder) {
+               ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+                                     REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+                                     addr, 0, buf, remainder,
+                                     RTW_USB_CONTROL_MSG_TIMEOUT);
+               if (ret != remainder)
+                       goto write_error;
+       }
+
+       return len;
+
+write_error:
+       dev_info(&udev->dev,
+                "%s: Failed to write block at addr: %04x size: %04x\n",
+                __func__, addr, blocksize);
+       return -EAGAIN;
+}
+
+static u32 rtl8xxxu_read_rfreg(struct rtl8xxxu_priv *priv,
+                              enum rtl8xxxu_rfpath path, u8 reg)
+{
+       u32 hssia, val32, retval;
+
+       hssia = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM2);
+       if (path != RF_A)
+               val32 = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].hssiparm2);
+       else
+               val32 = hssia;
+
+       val32 &= ~FPGA0_HSSI_PARM2_ADDR_MASK;
+       val32 |= (reg << FPGA0_HSSI_PARM2_ADDR_SHIFT);
+       val32 |= FPGA0_HSSI_PARM2_EDGE_READ;
+       hssia &= ~FPGA0_HSSI_PARM2_EDGE_READ;
+       rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM2, hssia);
+
+       udelay(10);
+
+       rtl8xxxu_write32(priv, rtl8xxxu_rfregs[path].hssiparm2, val32);
+       udelay(100);
+
+       hssia |= FPGA0_HSSI_PARM2_EDGE_READ;
+       rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM2, hssia);
+       udelay(10);
+
+       val32 = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].hssiparm1);
+       if (val32 & FPGA0_HSSI_PARM1_PI)
+               retval = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].hspiread);
+       else
+               retval = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].lssiread);
+
+       retval &= 0xfffff;
+
+       if (rtl8xxxu_debug & RTL8XXXU_DEBUG_RFREG_READ)
+               dev_info(&priv->udev->dev, "%s(%02x) = 0x%06x\n",
+                        __func__, reg, retval);
+       return retval;
+}
+
+static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
+                               enum rtl8xxxu_rfpath path, u8 reg, u32 data)
+{
+       int ret, retval;
+       u32 dataaddr;
+
+       if (rtl8xxxu_debug & RTL8XXXU_DEBUG_RFREG_WRITE)
+               dev_info(&priv->udev->dev, "%s(%02x) = 0x%06x\n",
+                        __func__, reg, data);
+
+       data &= FPGA0_LSSI_PARM_DATA_MASK;
+       dataaddr = (reg << FPGA0_LSSI_PARM_ADDR_SHIFT) | data;
+
+       /* Use XB for path B */
+       ret = rtl8xxxu_write32(priv, rtl8xxxu_rfregs[path].lssiparm, dataaddr);
+       if (ret != sizeof(dataaddr))
+               retval = -EIO;
+       else
+               retval = 0;
+
+       udelay(1);
+
+       return retval;
+}
+
+static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c)
+{
+       struct device *dev = &priv->udev->dev;
+       int mbox_nr, retry, retval = 0;
+       int mbox_reg, mbox_ext_reg;
+       u8 val8;
+
+       mutex_lock(&priv->h2c_mutex);
+
+       mbox_nr = priv->next_mbox;
+       mbox_reg = REG_HMBOX_0 + (mbox_nr * 4);
+       mbox_ext_reg = REG_HMBOX_EXT_0 + (mbox_nr * 2);
+
+       /*
+        * MBOX ready?
+        */
+       retry = 100;
+       do {
+               val8 = rtl8xxxu_read8(priv, REG_HMTFR);
+               if (!(val8 & BIT(mbox_nr)))
+                       break;
+       } while (retry--);
+
+       if (!retry) {
+               dev_dbg(dev, "%s: Mailbox busy\n", __func__);
+               retval = -EBUSY;
+               goto error;
+       }
+
+       /*
+        * Need to swap as it's being swapped again by rtl8xxxu_write16/32()
+        */
+       if (h2c->cmd.cmd & H2C_EXT) {
+               rtl8xxxu_write16(priv, mbox_ext_reg,
+                                le16_to_cpu(h2c->raw.ext));
+               if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
+                       dev_info(dev, "H2C_EXT %04x\n",
+                                le16_to_cpu(h2c->raw.ext));
+       }
+       rtl8xxxu_write32(priv, mbox_reg, le32_to_cpu(h2c->raw.data));
+       if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
+               dev_info(dev, "H2C %08x\n", le32_to_cpu(h2c->raw.data));
+
+       priv->next_mbox = (mbox_nr + 1) % H2C_MAX_MBOX;
+
+error:
+       mutex_unlock(&priv->h2c_mutex);
+       return retval;
+}
+
+static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv)
+{
+       u8 val8;
+       u32 val32;
+
+       val8 = rtl8xxxu_read8(priv, REG_SPS0_CTRL);
+       val8 |= BIT(0) | BIT(3);
+       rtl8xxxu_write8(priv, REG_SPS0_CTRL, val8);
+
+       val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_PARM);
+       val32 &= ~(BIT(4) | BIT(5));
+       val32 |= BIT(3);
+       if (priv->rf_paths == 2) {
+               val32 &= ~(BIT(20) | BIT(21));
+               val32 |= BIT(19);
+       }
+       rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_PARM, val32);
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+       val32 &= ~OFDM_RF_PATH_TX_MASK;
+       if (priv->tx_paths == 2)
+               val32 |= OFDM_RF_PATH_TX_A | OFDM_RF_PATH_TX_B;
+       else if (priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c)
+               val32 |= OFDM_RF_PATH_TX_B;
+       else
+               val32 |= OFDM_RF_PATH_TX_A;
+       rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
+
+       val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+       val32 &= ~FPGA_RF_MODE_JAPAN;
+       rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+       if (priv->rf_paths == 2)
+               rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x63db25a0);
+       else
+               rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x631b25a0);
+
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x32d95);
+       if (priv->rf_paths == 2)
+               rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, 0x32d95);
+
+       rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
+}
+
+static void rtl8723a_disable_rf(struct rtl8xxxu_priv *priv)
+{
+       u8 sps0;
+       u32 val32;
+
+       rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
+       sps0 = rtl8xxxu_read8(priv, REG_SPS0_CTRL);
+
+       /* RF RX code for preamble power saving */
+       val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_PARM);
+       val32 &= ~(BIT(3) | BIT(4) | BIT(5));
+       if (priv->rf_paths == 2)
+               val32 &= ~(BIT(19) | BIT(20) | BIT(21));
+       rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_PARM, val32);
+
+       /* Disable TX for four paths */
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+       val32 &= ~OFDM_RF_PATH_TX_MASK;
+       rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
+
+       /* Enable power saving */
+       val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+       val32 |= FPGA_RF_MODE_JAPAN;
+       rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+       /* AFE control register to power down bits [30:22] */
+       if (priv->rf_paths == 2)
+               rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x00db25a0);
+       else
+               rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x001b25a0);
+
+       /* Power down RF module */
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0);
+       if (priv->rf_paths == 2)
+               rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, 0);
+
+       sps0 &= ~(BIT(0) | BIT(3));
+       rtl8xxxu_write8(priv, REG_SPS0_CTRL, sps0);
+}
+
+
+static void rtl8723a_stop_tx_beacon(struct rtl8xxxu_priv *priv)
+{
+       u8 val8;
+
+       val8 = rtl8xxxu_read8(priv, REG_FWHW_TXQ_CTRL + 2);
+       val8 &= ~BIT(6);
+       rtl8xxxu_write8(priv, REG_FWHW_TXQ_CTRL + 2, val8);
+
+       rtl8xxxu_write8(priv, REG_TBTT_PROHIBIT + 1, 0x64);
+       val8 = rtl8xxxu_read8(priv, REG_TBTT_PROHIBIT + 2);
+       val8 &= ~BIT(0);
+       rtl8xxxu_write8(priv, REG_TBTT_PROHIBIT + 2, val8);
+}
+
+
+/*
+ * The rtl8723a has 3 channel groups for it's efuse settings. It only
+ * supports the 2.4GHz band, so channels 1 - 14:
+ *  group 0: channels 1 - 3
+ *  group 1: channels 4 - 9
+ *  group 2: channels 10 - 14
+ *
+ * Note: We index from 0 in the code
+ */
+static int rtl8723a_channel_to_group(int channel)
+{
+       int group;
+
+       if (channel < 4)
+               group = 0;
+       else if (channel < 10)
+               group = 1;
+       else
+               group = 2;
+
+       return group;
+}
+
+static void rtl8723au_config_channel(struct ieee80211_hw *hw)
+{
+       struct rtl8xxxu_priv *priv = hw->priv;
+       u32 val32, rsr;
+       u8 val8, opmode;
+       bool ht = true;
+       int sec_ch_above, channel;
+       int i;
+
+       opmode = rtl8xxxu_read8(priv, REG_BW_OPMODE);
+       rsr = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+       channel = hw->conf.chandef.chan->hw_value;
+
+       switch (hw->conf.chandef.width) {
+       case NL80211_CHAN_WIDTH_20_NOHT:
+               ht = false;
+       case NL80211_CHAN_WIDTH_20:
+               opmode |= BW_OPMODE_20MHZ;
+               rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode);
+
+               val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+               val32 &= ~FPGA_RF_MODE;
+               rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE);
+               val32 &= ~FPGA_RF_MODE;
+               rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_FPGA0_ANALOG2);
+               val32 |= FPGA0_ANALOG2_20MHZ;
+               rtl8xxxu_write32(priv, REG_FPGA0_ANALOG2, val32);
+               break;
+       case NL80211_CHAN_WIDTH_40:
+               if (hw->conf.chandef.center_freq1 >
+                   hw->conf.chandef.chan->center_freq) {
+                       sec_ch_above = 1;
+                       channel += 2;
+               } else {
+                       sec_ch_above = 0;
+                       channel -= 2;
+               }
+
+               opmode &= ~BW_OPMODE_20MHZ;
+               rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode);
+               rsr &= ~RSR_RSC_BANDWIDTH_40M;
+               if (sec_ch_above)
+                       rsr |= RSR_RSC_UPPER_SUB_CHANNEL;
+               else
+                       rsr |= RSR_RSC_LOWER_SUB_CHANNEL;
+               rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, rsr);
+
+               val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+               val32 |= FPGA_RF_MODE;
+               rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE);
+               val32 |= FPGA_RF_MODE;
+               rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32);
+
+               /*
+                * Set Control channel to upper or lower. These settings
+                * are required only for 40MHz
+                */
+               val32 = rtl8xxxu_read32(priv, REG_CCK0_SYSTEM);
+               val32 &= ~CCK0_SIDEBAND;
+               if (!sec_ch_above)
+                       val32 |= CCK0_SIDEBAND;
+               rtl8xxxu_write32(priv, REG_CCK0_SYSTEM, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_OFDM1_LSTF);
+               val32 &= ~OFDM_LSTF_PRIME_CH_MASK; /* 0xc00 */
+               if (sec_ch_above)
+                       val32 |= OFDM_LSTF_PRIME_CH_LOW;
+               else
+                       val32 |= OFDM_LSTF_PRIME_CH_HIGH;
+               rtl8xxxu_write32(priv, REG_OFDM1_LSTF, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_FPGA0_ANALOG2);
+               val32 &= ~FPGA0_ANALOG2_20MHZ;
+               rtl8xxxu_write32(priv, REG_FPGA0_ANALOG2, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE);
+               val32 &= ~(FPGA0_PS_LOWER_CHANNEL | FPGA0_PS_UPPER_CHANNEL);
+               if (sec_ch_above)
+                       val32 |= FPGA0_PS_UPPER_CHANNEL;
+               else
+                       val32 |= FPGA0_PS_LOWER_CHANNEL;
+               rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32);
+               break;
+
+       default:
+               break;
+       }
+
+       for (i = RF_A; i < priv->rf_paths; i++) {
+               val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG);
+               val32 &= ~MODE_AG_CHANNEL_MASK;
+               val32 |= channel;
+               rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32);
+       }
+
+       if (ht)
+               val8 = 0x0e;
+       else
+               val8 = 0x0a;
+
+       rtl8xxxu_write8(priv, REG_SIFS_CCK + 1, val8);
+       rtl8xxxu_write8(priv, REG_SIFS_OFDM + 1, val8);
+
+       rtl8xxxu_write16(priv, REG_R2T_SIFS, 0x0808);
+       rtl8xxxu_write16(priv, REG_T2T_SIFS, 0x0a0a);
+
+       for (i = RF_A; i < priv->rf_paths; i++) {
+               val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG);
+               if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40)
+                       val32 &= ~MODE_AG_CHANNEL_20MHZ;
+               else
+                       val32 |= MODE_AG_CHANNEL_20MHZ;
+               rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32);
+       }
+}
+
+static void
+rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+{
+       u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS];
+       u8 ofdmbase[RTL8723A_MAX_RF_PATHS], mcsbase[RTL8723A_MAX_RF_PATHS];
+       u32 val32, ofdm_a, ofdm_b, mcs_a, mcs_b;
+       u8 val8;
+       int group, i;
+
+       group = rtl8723a_channel_to_group(channel);
+
+       cck[0] = priv->cck_tx_power_index_A[group];
+       cck[1] = priv->cck_tx_power_index_B[group];
+
+       ofdm[0] = priv->ht40_1s_tx_power_index_A[group];
+       ofdm[1] = priv->ht40_1s_tx_power_index_B[group];
+
+       ofdmbase[0] = ofdm[0] + priv->ofdm_tx_power_index_diff[group].a;
+       ofdmbase[1] = ofdm[1] + priv->ofdm_tx_power_index_diff[group].b;
+
+       mcsbase[0] = ofdm[0];
+       mcsbase[1] = ofdm[1];
+       if (!ht40) {
+               mcsbase[0] += priv->ht20_tx_power_index_diff[group].a;
+               mcsbase[1] += priv->ht20_tx_power_index_diff[group].b;
+       }
+
+       if (priv->tx_paths > 1) {
+               if (ofdm[0] > priv->ht40_2s_tx_power_index_diff[group].a)
+                       ofdm[0] -=  priv->ht40_2s_tx_power_index_diff[group].a;
+               if (ofdm[1] > priv->ht40_2s_tx_power_index_diff[group].b)
+                       ofdm[1] -=  priv->ht40_2s_tx_power_index_diff[group].b;
+       }
+
+       if (rtl8xxxu_debug & RTL8XXXU_DEBUG_CHANNEL)
+               dev_info(&priv->udev->dev,
+                        "%s: Setting TX power CCK A: %02x, "
+                        "CCK B: %02x, OFDM A: %02x, OFDM B: %02x\n",
+                        __func__, cck[0], cck[1], ofdm[0], ofdm[1]);
+
+       for (i = 0; i < RTL8723A_MAX_RF_PATHS; i++) {
+               if (cck[i] > RF6052_MAX_TX_PWR)
+                       cck[i] = RF6052_MAX_TX_PWR;
+               if (ofdm[i] > RF6052_MAX_TX_PWR)
+                       ofdm[i] = RF6052_MAX_TX_PWR;
+       }
+
+       val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32);
+       val32 &= 0xffff00ff;
+       val32 |= (cck[0] << 8);
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32);
+
+       val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+       val32 &= 0xff;
+       val32 |= ((cck[0] << 8) | (cck[0] << 16) | (cck[0] << 24));
+       rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+       val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+       val32 &= 0xffffff00;
+       val32 |= cck[1];
+       rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+       val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK1_55_MCS32);
+       val32 &= 0xff;
+       val32 |= ((cck[1] << 8) | (cck[1] << 16) | (cck[1] << 24));
+       rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK1_55_MCS32, val32);
+
+       ofdm_a = ofdmbase[0] | ofdmbase[0] << 8 |
+               ofdmbase[0] << 16 | ofdmbase[0] << 24;
+       ofdm_b = ofdmbase[1] | ofdmbase[1] << 8 |
+               ofdmbase[1] << 16 | ofdmbase[1] << 24;
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm_a);
+       rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06, ofdm_b);
+
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm_a);
+       rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24, ofdm_b);
+
+       mcs_a = mcsbase[0] | mcsbase[0] << 8 |
+               mcsbase[0] << 16 | mcsbase[0] << 24;
+       mcs_b = mcsbase[1] | mcsbase[1] << 8 |
+               mcsbase[1] << 16 | mcsbase[1] << 24;
+
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs_a);
+       rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00, mcs_b);
+
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs_a);
+       rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04, mcs_b);
+
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08, mcs_a);
+       rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08, mcs_b);
+
+       rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs_a);
+       for (i = 0; i < 3; i++) {
+               if (i != 2)
+                       val8 = (mcsbase[0] > 8) ? (mcsbase[0] - 8) : 0;
+               else
+                       val8 = (mcsbase[0] > 6) ? (mcsbase[0] - 6) : 0;
+               rtl8xxxu_write8(priv, REG_OFDM0_XC_TX_IQ_IMBALANCE + i, val8);
+       }
+       rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12, mcs_b);
+       for (i = 0; i < 3; i++) {
+               if (i != 2)
+                       val8 = (mcsbase[1] > 8) ? (mcsbase[1] - 8) : 0;
+               else
+                       val8 = (mcsbase[1] > 6) ? (mcsbase[1] - 6) : 0;
+               rtl8xxxu_write8(priv, REG_OFDM0_XD_TX_IQ_IMBALANCE + i, val8);
+       }
+}
+
+static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv,
+                                 enum nl80211_iftype linktype)
+{
+       u16 val8;
+
+       val8 = rtl8xxxu_read16(priv, REG_MSR);
+       val8 &= ~MSR_LINKTYPE_MASK;
+
+       switch (linktype) {
+       case NL80211_IFTYPE_UNSPECIFIED:
+               val8 |= MSR_LINKTYPE_NONE;
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               val8 |= MSR_LINKTYPE_ADHOC;
+               break;
+       case NL80211_IFTYPE_STATION:
+               val8 |= MSR_LINKTYPE_STATION;
+               break;
+       case NL80211_IFTYPE_AP:
+               val8 |= MSR_LINKTYPE_AP;
+               break;
+       default:
+               goto out;
+       }
+
+       rtl8xxxu_write8(priv, REG_MSR, val8);
+out:
+       return;
+}
+
+static void
+rtl8xxxu_set_retry(struct rtl8xxxu_priv *priv, u16 short_retry, u16 long_retry)
+{
+       u16 val16;
+
+       val16 = ((short_retry << RETRY_LIMIT_SHORT_SHIFT) &
+                RETRY_LIMIT_SHORT_MASK) |
+               ((long_retry << RETRY_LIMIT_LONG_SHIFT) &
+                RETRY_LIMIT_LONG_MASK);
+
+       rtl8xxxu_write16(priv, REG_RETRY_LIMIT, val16);
+}
+
+static void
+rtl8xxxu_set_spec_sifs(struct rtl8xxxu_priv *priv, u16 cck, u16 ofdm)
+{
+       u16 val16;
+
+       val16 = ((cck << SPEC_SIFS_CCK_SHIFT) & SPEC_SIFS_CCK_MASK) |
+               ((ofdm << SPEC_SIFS_OFDM_SHIFT) & SPEC_SIFS_OFDM_MASK);
+
+       rtl8xxxu_write16(priv, REG_SPEC_SIFS, val16);
+}
+
+static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv)
+{
+       struct device *dev = &priv->udev->dev;
+       char *cut;
+
+       switch (priv->chip_cut) {
+       case 0:
+               cut = "A";
+               break;
+       case 1:
+               cut = "B";
+               break;
+       default:
+               cut = "unknown";
+       }
+
+       dev_info(dev,
+                "RTL%s rev %s (%s) %iT%iR, TX queues %i, WiFi=%i, BT=%i, GPS=%i, HI PA=%i\n",
+                priv->chip_name, cut, priv->vendor_umc ? "UMC" : "TSMC",
+                priv->tx_paths, priv->rx_paths, priv->ep_tx_count,
+                priv->has_wifi, priv->has_bluetooth, priv->has_gps,
+                priv->hi_pa);
+
+       dev_info(dev, "RTL%s MAC: %pM\n", priv->chip_name, priv->mac_addr);
+}
+
+static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
+{
+       struct device *dev = &priv->udev->dev;
+       u32 val32, bonding;
+       u16 val16;
+
+       val32 = rtl8xxxu_read32(priv, REG_SYS_CFG);
+       priv->chip_cut = (val32 & SYS_CFG_CHIP_VERSION_MASK) >>
+               SYS_CFG_CHIP_VERSION_SHIFT;
+       if (val32 & SYS_CFG_TRP_VAUX_EN) {
+               dev_info(dev, "Unsupported test chip\n");
+               return -ENOTSUPP;
+       }
+
+       if (val32 & SYS_CFG_BT_FUNC) {
+               sprintf(priv->chip_name, "8723AU");
+               priv->rf_paths = 1;
+               priv->rx_paths = 1;
+               priv->tx_paths = 1;
+               priv->rtlchip = 0x8723a;
+
+               val32 = rtl8xxxu_read32(priv, REG_MULTI_FUNC_CTRL);
+               if (val32 & MULTI_WIFI_FUNC_EN)
+                       priv->has_wifi = 1;
+               if (val32 & MULTI_BT_FUNC_EN)
+                       priv->has_bluetooth = 1;
+               if (val32 & MULTI_GPS_FUNC_EN)
+                       priv->has_gps = 1;
+       } else if (val32 & SYS_CFG_TYPE_ID) {
+               bonding = rtl8xxxu_read32(priv, REG_HPON_FSM);
+               bonding &= HPON_FSM_BONDING_MASK;
+               if (bonding == HPON_FSM_BONDING_1T2R) {
+                       sprintf(priv->chip_name, "8191CU");
+                       priv->rf_paths = 2;
+                       priv->rx_paths = 2;
+                       priv->tx_paths = 1;
+                       priv->rtlchip = 0x8191c;
+               } else {
+                       sprintf(priv->chip_name, "8192CU");
+                       priv->rf_paths = 2;
+                       priv->rx_paths = 2;
+                       priv->tx_paths = 2;
+                       priv->rtlchip = 0x8192c;
+               }
+               priv->has_wifi = 1;
+       } else {
+               sprintf(priv->chip_name, "8188CU");
+               priv->rf_paths = 1;
+               priv->rx_paths = 1;
+               priv->tx_paths = 1;
+               priv->rtlchip = 0x8188c;
+               priv->has_wifi = 1;
+       }
+
+       if (val32 & SYS_CFG_VENDOR_ID)
+               priv->vendor_umc = 1;
+
+       val32 = rtl8xxxu_read32(priv, REG_GPIO_OUTSTS);
+       priv->rom_rev = (val32 & GPIO_RF_RL_ID) >> 28;
+
+       val16 = rtl8xxxu_read16(priv, REG_NORMAL_SIE_EP_TX);
+       if (val16 & NORMAL_SIE_EP_TX_HIGH_MASK) {
+               priv->ep_tx_high_queue = 1;
+               priv->ep_tx_count++;
+       }
+
+       if (val16 & NORMAL_SIE_EP_TX_NORMAL_MASK) {
+               priv->ep_tx_normal_queue = 1;
+               priv->ep_tx_count++;
+       }
+
+       if (val16 & NORMAL_SIE_EP_TX_LOW_MASK) {
+               priv->ep_tx_low_queue = 1;
+               priv->ep_tx_count++;
+       }
+
+       /*
+        * Fallback for devices that do not provide REG_NORMAL_SIE_EP_TX
+        */
+       if (!priv->ep_tx_count) {
+               switch (priv->nr_out_eps) {
+               case 3:
+                       priv->ep_tx_low_queue = 1;
+                       priv->ep_tx_count++;
+               case 2:
+                       priv->ep_tx_normal_queue = 1;
+                       priv->ep_tx_count++;
+               case 1:
+                       priv->ep_tx_high_queue = 1;
+                       priv->ep_tx_count++;
+                       break;
+               default:
+                       dev_info(dev, "Unsupported USB TX end-points\n");
+                       return -ENOTSUPP;
+               }
+       }
+
+       return 0;
+}
+
+static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+       if (priv->efuse_wifi.efuse8723.rtl_id != cpu_to_le16(0x8129))
+               return -EINVAL;
+
+       ether_addr_copy(priv->mac_addr, priv->efuse_wifi.efuse8723.mac_addr);
+
+       memcpy(priv->cck_tx_power_index_A,
+              priv->efuse_wifi.efuse8723.cck_tx_power_index_A,
+              sizeof(priv->cck_tx_power_index_A));
+       memcpy(priv->cck_tx_power_index_B,
+              priv->efuse_wifi.efuse8723.cck_tx_power_index_B,
+              sizeof(priv->cck_tx_power_index_B));
+
+       memcpy(priv->ht40_1s_tx_power_index_A,
+              priv->efuse_wifi.efuse8723.ht40_1s_tx_power_index_A,
+              sizeof(priv->ht40_1s_tx_power_index_A));
+       memcpy(priv->ht40_1s_tx_power_index_B,
+              priv->efuse_wifi.efuse8723.ht40_1s_tx_power_index_B,
+              sizeof(priv->ht40_1s_tx_power_index_B));
+
+       memcpy(priv->ht20_tx_power_index_diff,
+              priv->efuse_wifi.efuse8723.ht20_tx_power_index_diff,
+              sizeof(priv->ht20_tx_power_index_diff));
+       memcpy(priv->ofdm_tx_power_index_diff,
+              priv->efuse_wifi.efuse8723.ofdm_tx_power_index_diff,
+              sizeof(priv->ofdm_tx_power_index_diff));
+
+       memcpy(priv->ht40_max_power_offset,
+              priv->efuse_wifi.efuse8723.ht40_max_power_offset,
+              sizeof(priv->ht40_max_power_offset));
+       memcpy(priv->ht20_max_power_offset,
+              priv->efuse_wifi.efuse8723.ht20_max_power_offset,
+              sizeof(priv->ht20_max_power_offset));
+
+       dev_info(&priv->udev->dev, "Vendor: %.7s\n",
+                priv->efuse_wifi.efuse8723.vendor_name);
+       dev_info(&priv->udev->dev, "Product: %.41s\n",
+                priv->efuse_wifi.efuse8723.device_name);
+       return 0;
+}
+
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+
+static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+       int i;
+
+       if (priv->efuse_wifi.efuse8192.rtl_id != cpu_to_le16(0x8129))
+               return -EINVAL;
+
+       ether_addr_copy(priv->mac_addr, priv->efuse_wifi.efuse8192.mac_addr);
+
+       memcpy(priv->cck_tx_power_index_A,
+              priv->efuse_wifi.efuse8192.cck_tx_power_index_A,
+              sizeof(priv->cck_tx_power_index_A));
+       memcpy(priv->cck_tx_power_index_B,
+              priv->efuse_wifi.efuse8192.cck_tx_power_index_B,
+              sizeof(priv->cck_tx_power_index_B));
+
+       memcpy(priv->ht40_1s_tx_power_index_A,
+              priv->efuse_wifi.efuse8192.ht40_1s_tx_power_index_A,
+              sizeof(priv->ht40_1s_tx_power_index_A));
+       memcpy(priv->ht40_1s_tx_power_index_B,
+              priv->efuse_wifi.efuse8192.ht40_1s_tx_power_index_B,
+              sizeof(priv->ht40_1s_tx_power_index_B));
+       memcpy(priv->ht40_2s_tx_power_index_diff,
+              priv->efuse_wifi.efuse8192.ht40_2s_tx_power_index_diff,
+              sizeof(priv->ht40_2s_tx_power_index_diff));
+
+       memcpy(priv->ht20_tx_power_index_diff,
+              priv->efuse_wifi.efuse8192.ht20_tx_power_index_diff,
+              sizeof(priv->ht20_tx_power_index_diff));
+       memcpy(priv->ofdm_tx_power_index_diff,
+              priv->efuse_wifi.efuse8192.ofdm_tx_power_index_diff,
+              sizeof(priv->ofdm_tx_power_index_diff));
+
+       memcpy(priv->ht40_max_power_offset,
+              priv->efuse_wifi.efuse8192.ht40_max_power_offset,
+              sizeof(priv->ht40_max_power_offset));
+       memcpy(priv->ht20_max_power_offset,
+              priv->efuse_wifi.efuse8192.ht20_max_power_offset,
+              sizeof(priv->ht20_max_power_offset));
+
+       dev_info(&priv->udev->dev, "Vendor: %.7s\n",
+                priv->efuse_wifi.efuse8192.vendor_name);
+       dev_info(&priv->udev->dev, "Product: %.20s\n",
+                priv->efuse_wifi.efuse8192.device_name);
+
+       if (priv->efuse_wifi.efuse8192.rf_regulatory & 0x20) {
+               sprintf(priv->chip_name, "8188RU");
+               priv->hi_pa = 1;
+       }
+
+       if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
+               unsigned char *raw = priv->efuse_wifi.raw;
+
+               dev_info(&priv->udev->dev,
+                        "%s: dumping efuse (0x%02zx bytes):\n",
+                        __func__, sizeof(struct rtl8192cu_efuse));
+               for (i = 0; i < sizeof(struct rtl8192cu_efuse); i += 8) {
+                       dev_info(&priv->udev->dev, "%02x: "
+                                "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+                                raw[i], raw[i + 1], raw[i + 2],
+                                raw[i + 3], raw[i + 4], raw[i + 5],
+                                raw[i + 6], raw[i + 7]);
+               }
+       }
+       return 0;
+}
+
+#endif
+
+static int
+rtl8xxxu_read_efuse8(struct rtl8xxxu_priv *priv, u16 offset, u8 *data)
+{
+       int i;
+       u8 val8;
+       u32 val32;
+
+       /* Write Address */
+       rtl8xxxu_write8(priv, REG_EFUSE_CTRL + 1, offset & 0xff);
+       val8 = rtl8xxxu_read8(priv, REG_EFUSE_CTRL + 2);
+       val8 &= 0xfc;
+       val8 |= (offset >> 8) & 0x03;
+       rtl8xxxu_write8(priv, REG_EFUSE_CTRL + 2, val8);
+
+       val8 = rtl8xxxu_read8(priv, REG_EFUSE_CTRL + 3);
+       rtl8xxxu_write8(priv, REG_EFUSE_CTRL + 3, val8 & 0x7f);
+
+       /* Poll for data read */
+       val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
+       for (i = 0; i < RTL8XXXU_MAX_REG_POLL; i++) {
+               val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
+               if (val32 & BIT(31))
+                       break;
+       }
+
+       if (i == RTL8XXXU_MAX_REG_POLL)
+               return -EIO;
+
+       udelay(50);
+       val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
+
+       *data = val32 & 0xff;
+       return 0;
+}
+
+static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv)
+{
+       struct device *dev = &priv->udev->dev;
+       int i, ret = 0;
+       u8 val8, word_mask, header, extheader;
+       u16 val16, efuse_addr, offset;
+       u32 val32;
+
+       val16 = rtl8xxxu_read16(priv, REG_9346CR);
+       if (val16 & EEPROM_ENABLE)
+               priv->has_eeprom = 1;
+       if (val16 & EEPROM_BOOT)
+               priv->boot_eeprom = 1;
+
+       val32 = rtl8xxxu_read32(priv, REG_EFUSE_TEST);
+       val32 = (val32 & ~EFUSE_SELECT_MASK) | EFUSE_WIFI_SELECT;
+       rtl8xxxu_write32(priv, REG_EFUSE_TEST, val32);
+
+       dev_dbg(dev, "Booting from %s\n",
+               priv->boot_eeprom ? "EEPROM" : "EFUSE");
+
+       rtl8xxxu_write8(priv, REG_EFUSE_ACCESS, EFUSE_ACCESS_ENABLE);
+
+       /*  1.2V Power: From VDDON with Power Cut(0x0000[15]), default valid */
+       val16 = rtl8xxxu_read16(priv, REG_SYS_ISO_CTRL);
+       if (!(val16 & SYS_ISO_PWC_EV12V)) {
+               val16 |= SYS_ISO_PWC_EV12V;
+               rtl8xxxu_write16(priv, REG_SYS_ISO_CTRL, val16);
+       }
+       /*  Reset: 0x0000[28], default valid */
+       val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+       if (!(val16 & SYS_FUNC_ELDR)) {
+               val16 |= SYS_FUNC_ELDR;
+               rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+       }
+
+       /*
+        * Clock: Gated(0x0008[5]) 8M(0x0008[1]) clock from ANA, default valid
+        */
+       val16 = rtl8xxxu_read16(priv, REG_SYS_CLKR);
+       if (!(val16 & SYS_CLK_LOADER_ENABLE) || !(val16 & SYS_CLK_ANA8M)) {
+               val16 |= (SYS_CLK_LOADER_ENABLE | SYS_CLK_ANA8M);
+               rtl8xxxu_write16(priv, REG_SYS_CLKR, val16);
+       }
+
+       /* Default value is 0xff */
+       memset(priv->efuse_wifi.raw, 0xff, EFUSE_MAP_LEN_8723A);
+
+       efuse_addr = 0;
+       while (efuse_addr < EFUSE_REAL_CONTENT_LEN_8723A) {
+               ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &header);
+               if (ret || header == 0xff)
+                       goto exit;
+
+               if ((header & 0x1f) == 0x0f) {  /* extended header */
+                       offset = (header & 0xe0) >> 5;
+
+                       ret = rtl8xxxu_read_efuse8(priv, efuse_addr++,
+                                                  &extheader);
+                       if (ret)
+                               goto exit;
+                       /* All words disabled */
+                       if ((extheader & 0x0f) == 0x0f)
+                               continue;
+
+                       offset |= ((extheader & 0xf0) >> 1);
+                       word_mask = extheader & 0x0f;
+               } else {
+                       offset = (header >> 4) & 0x0f;
+                       word_mask = header & 0x0f;
+               }
+
+               if (offset < EFUSE_MAX_SECTION_8723A) {
+                       u16 map_addr;
+                       /* Get word enable value from PG header */
+
+                       /* We have 8 bits to indicate validity */
+                       map_addr = offset * 8;
+                       if (map_addr >= EFUSE_MAP_LEN_8723A) {
+                               dev_warn(dev, "%s: Illegal map_addr (%04x), "
+                                        "efuse corrupt!\n",
+                                        __func__, map_addr);
+                               ret = -EINVAL;
+                               goto exit;
+                       }
+                       for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+                               /* Check word enable condition in the section */
+                               if (!(word_mask & BIT(i))) {
+                                       ret = rtl8xxxu_read_efuse8(priv,
+                                                                  efuse_addr++,
+                                                                  &val8);
+                                       if (ret)
+                                               goto exit;
+                                       priv->efuse_wifi.raw[map_addr++] = val8;
+
+                                       ret = rtl8xxxu_read_efuse8(priv,
+                                                                  efuse_addr++,
+                                                                  &val8);
+                                       if (ret)
+                                               goto exit;
+                                       priv->efuse_wifi.raw[map_addr++] = val8;
+                               } else
+                                       map_addr += 2;
+                       }
+               } else {
+                       dev_warn(dev,
+                                "%s: Illegal offset (%04x), efuse corrupt!\n",
+                                __func__, offset);
+                       ret = -EINVAL;
+                       goto exit;
+               }
+       }
+
+exit:
+       rtl8xxxu_write8(priv, REG_EFUSE_ACCESS, EFUSE_ACCESS_DISABLE);
+
+       return ret;
+}
+
+static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv)
+{
+       struct device *dev = &priv->udev->dev;
+       int ret = 0, i;
+       u32 val32;
+
+       /* Poll checksum report */
+       for (i = 0; i < RTL8XXXU_FIRMWARE_POLL_MAX; i++) {
+               val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
+               if (val32 & MCU_FW_DL_CSUM_REPORT)
+                       break;
+       }
+
+       if (i == RTL8XXXU_FIRMWARE_POLL_MAX) {
+               dev_warn(dev, "Firmware checksum poll timed out\n");
+               ret = -EAGAIN;
+               goto exit;
+       }
+
+       val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
+       val32 |= MCU_FW_DL_READY;
+       val32 &= ~MCU_WINT_INIT_READY;
+       rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32);
+
+       /* Wait for firmware to become ready */
+       for (i = 0; i < RTL8XXXU_FIRMWARE_POLL_MAX; i++) {
+               val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
+               if (val32 & MCU_WINT_INIT_READY)
+                       break;
+
+               udelay(100);
+       }
+
+       if (i == RTL8XXXU_FIRMWARE_POLL_MAX) {
+               dev_warn(dev, "Firmware failed to start\n");
+               ret = -EAGAIN;
+               goto exit;
+       }
+
+exit:
+       return ret;
+}
+
+static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv)
+{
+       int pages, remainder, i, ret;
+       u8 val8;
+       u16 val16;
+       u32 val32;
+       u8 *fwptr;
+
+       val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC + 1);
+       val8 |= 4;
+       rtl8xxxu_write8(priv, REG_SYS_FUNC + 1, val8);
+
+       /* 8051 enable */
+       val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+       rtl8xxxu_write16(priv, REG_SYS_FUNC, val16 | SYS_FUNC_CPU_ENABLE);
+
+       /* MCU firmware download enable */
+       val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL);
+       rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8 | MCU_FW_DL_ENABLE);
+
+       /* 8051 reset */
+       val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
+       rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32 & ~BIT(19));
+
+       /* Reset firmware download checksum */
+       val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL);
+       rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8 | MCU_FW_DL_CSUM_REPORT);
+
+       pages = priv->fw_size / RTL_FW_PAGE_SIZE;
+       remainder = priv->fw_size % RTL_FW_PAGE_SIZE;
+
+       fwptr = priv->fw_data->data;
+
+       for (i = 0; i < pages; i++) {
+               val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL + 2) & 0xF8;
+               rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8 | i);
+
+               ret = rtl8xxxu_writeN(priv, REG_FW_START_ADDRESS,
+                                     fwptr, RTL_FW_PAGE_SIZE);
+               if (ret != RTL_FW_PAGE_SIZE) {
+                       ret = -EAGAIN;
+                       goto fw_abort;
+               }
+
+               fwptr += RTL_FW_PAGE_SIZE;
+       }
+
+       if (remainder) {
+               val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL + 2) & 0xF8;
+               rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8 | i);
+               ret = rtl8xxxu_writeN(priv, REG_FW_START_ADDRESS,
+                                     fwptr, remainder);
+               if (ret != remainder) {
+                       ret = -EAGAIN;
+                       goto fw_abort;
+               }
+       }
+
+       ret = 0;
+fw_abort:
+       /* MCU firmware download disable */
+       val16 = rtl8xxxu_read16(priv, REG_MCU_FW_DL);
+       rtl8xxxu_write16(priv, REG_MCU_FW_DL,
+                        val16 & (~MCU_FW_DL_ENABLE & 0xff));
+
+       return ret;
+}
+
+static int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name)
+{
+       struct device *dev = &priv->udev->dev;
+       const struct firmware *fw;
+       int ret = 0;
+       u16 signature;
+
+       dev_info(dev, "%s: Loading firmware %s\n", DRIVER_NAME, fw_name);
+       if (request_firmware(&fw, fw_name, &priv->udev->dev)) {
+               dev_warn(dev, "request_firmware(%s) failed\n", fw_name);
+               ret = -EAGAIN;
+               goto exit;
+       }
+       if (!fw) {
+               dev_warn(dev, "Firmware data not available\n");
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       priv->fw_data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+       priv->fw_size = fw->size - sizeof(struct rtl8xxxu_firmware_header);
+
+       signature = le16_to_cpu(priv->fw_data->signature);
+       switch (signature & 0xfff0) {
+       case 0x92c0:
+       case 0x88c0:
+       case 0x2300:
+               break;
+       default:
+               ret = -EINVAL;
+               dev_warn(dev, "%s: Invalid firmware signature: 0x%04x\n",
+                        __func__, signature);
+       }
+
+       dev_info(dev, "Firmware revision %i.%i (signature 0x%04x)\n",
+                le16_to_cpu(priv->fw_data->major_version),
+                priv->fw_data->minor_version, signature);
+
+exit:
+       release_firmware(fw);
+       return ret;
+}
+
+static int rtl8723au_load_firmware(struct rtl8xxxu_priv *priv)
+{
+       char *fw_name;
+       int ret;
+
+       switch (priv->chip_cut) {
+       case 0:
+               fw_name = "rtlwifi/rtl8723aufw_A.bin";
+               break;
+       case 1:
+               if (priv->enable_bluetooth)
+                       fw_name = "rtlwifi/rtl8723aufw_B.bin";
+               else
+                       fw_name = "rtlwifi/rtl8723aufw_B_NoBT.bin";
+
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ret = rtl8xxxu_load_firmware(priv, fw_name);
+       return ret;
+}
+
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+
+static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv)
+{
+       char *fw_name;
+       int ret;
+
+       if (!priv->vendor_umc)
+               fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
+       else if (priv->chip_cut || priv->rtlchip == 0x8192c)
+               fw_name = "rtlwifi/rtl8192cufw_B.bin";
+       else
+               fw_name = "rtlwifi/rtl8192cufw_A.bin";
+
+       ret = rtl8xxxu_load_firmware(priv, fw_name);
+
+       return ret;
+}
+
+#endif
+
+static void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv)
+{
+       u16 val16;
+       int i = 100;
+
+       /* Inform 8051 to perform reset */
+       rtl8xxxu_write8(priv, REG_HMTFR + 3, 0x20);
+
+       for (i = 100; i > 0; i--) {
+               val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+
+               if (!(val16 & SYS_FUNC_CPU_ENABLE)) {
+                       dev_dbg(&priv->udev->dev,
+                               "%s: Firmware self reset success!\n", __func__);
+                       break;
+               }
+               udelay(50);
+       }
+
+       if (!i) {
+               /* Force firmware reset */
+               val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+               val16 &= ~SYS_FUNC_CPU_ENABLE;
+               rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+       }
+}
+
+static int
+rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array)
+{
+       int i, ret;
+       u16 reg;
+       u8 val;
+
+       for (i = 0; ; i++) {
+               reg = array[i].reg;
+               val = array[i].val;
+
+               if (reg == 0xffff && val == 0xff)
+                       break;
+
+               ret = rtl8xxxu_write8(priv, reg, val);
+               if (ret != 1) {
+                       dev_warn(&priv->udev->dev,
+                                "Failed to initialize MAC\n");
+                       return -EAGAIN;
+               }
+       }
+
+       rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a);
+
+       return 0;
+}
+
+static int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv,
+                                 struct rtl8xxxu_reg32val *array)
+{
+       int i, ret;
+       u16 reg;
+       u32 val;
+
+       for (i = 0; ; i++) {
+               reg = array[i].reg;
+               val = array[i].val;
+
+               if (reg == 0xffff && val == 0xffffffff)
+                       break;
+
+               ret = rtl8xxxu_write32(priv, reg, val);
+               if (ret != sizeof(val)) {
+                       dev_warn(&priv->udev->dev,
+                                "Failed to initialize PHY\n");
+                       return -EAGAIN;
+               }
+               udelay(1);
+       }
+
+       return 0;
+}
+
+/*
+ * Most of this is black magic retrieved from the old rtl8723au driver
+ */
+static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+       u8 val8, ldoa15, ldov12d, lpldo, ldohci12;
+       u32 val32;
+
+       /*
+        * Todo: The vendor driver maintains a table of PHY register
+        *       addresses, which is initialized here. Do we need this?
+        */
+
+       val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
+       udelay(2);
+       val8 |= AFE_PLL_320_ENABLE;
+       rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
+       udelay(2);
+
+       rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff);
+       udelay(2);
+
+       val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+       val8 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB;
+       rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+       /* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */
+       val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
+       val32 &= ~AFE_XTAL_RF_GATE;
+       if (priv->has_bluetooth)
+               val32 &= ~AFE_XTAL_BT_GATE;
+       rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
+
+       /* 6. 0x1f[7:0] = 0x07 */
+       val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+       rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+
+       if (priv->hi_pa)
+               rtl8xxxu_init_phy_regs(priv, rtl8188ru_phy_1t_highpa_table);
+       else if (priv->tx_paths == 2)
+               rtl8xxxu_init_phy_regs(priv, rtl8192cu_phy_2t_init_table);
+       else
+               rtl8xxxu_init_phy_regs(priv, rtl8723a_phy_1t_init_table);
+
+
+       if (priv->rtlchip == 0x8188c && priv->hi_pa &&
+           priv->vendor_umc && priv->chip_cut == 1)
+               rtl8xxxu_write8(priv, REG_OFDM0_AGC_PARM1 + 2, 0x50);
+
+       if (priv->tx_paths == 1 && priv->rx_paths == 2) {
+               /*
+                * For 1T2R boards, patch the registers.
+                *
+                * It looks like 8191/2 1T2R boards use path B for TX
+                */
+               val32 = rtl8xxxu_read32(priv, REG_FPGA0_TX_INFO);
+               val32 &= ~(BIT(0) | BIT(1));
+               val32 |= BIT(1);
+               rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_FPGA1_TX_INFO);
+               val32 &= ~0x300033;
+               val32 |= 0x200022;
+               rtl8xxxu_write32(priv, REG_FPGA1_TX_INFO, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
+               val32 &= 0xff000000;
+               val32 |= 0x45000000;
+               rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+               val32 &= ~(OFDM_RF_PATH_RX_MASK | OFDM_RF_PATH_TX_MASK);
+               val32 |= (OFDM_RF_PATH_RX_A | OFDM_RF_PATH_RX_B |
+                         OFDM_RF_PATH_TX_B);
+               rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_OFDM0_AGC_PARM1);
+               val32 &= ~(BIT(4) | BIT(5));
+               val32 |= BIT(4);
+               rtl8xxxu_write32(priv, REG_OFDM0_AGC_PARM1, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_TX_CCK_RFON);
+               val32 &= ~(BIT(27) | BIT(26));
+               val32 |= BIT(27);
+               rtl8xxxu_write32(priv, REG_TX_CCK_RFON, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_TX_CCK_BBON);
+               val32 &= ~(BIT(27) | BIT(26));
+               val32 |= BIT(27);
+               rtl8xxxu_write32(priv, REG_TX_CCK_BBON, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_TX_OFDM_RFON);
+               val32 &= ~(BIT(27) | BIT(26));
+               val32 |= BIT(27);
+               rtl8xxxu_write32(priv, REG_TX_OFDM_RFON, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_TX_OFDM_BBON);
+               val32 &= ~(BIT(27) | BIT(26));
+               val32 |= BIT(27);
+               rtl8xxxu_write32(priv, REG_TX_OFDM_BBON, val32);
+
+               val32 = rtl8xxxu_read32(priv, REG_TX_TO_TX);
+               val32 &= ~(BIT(27) | BIT(26));
+               val32 |= BIT(27);
+               rtl8xxxu_write32(priv, REG_TX_TO_TX, val32);
+       }
+
+       if (priv->hi_pa)
+               rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table);
+       else
+               rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table);
+
+       if (priv->rtlchip == 0x8723a &&
+           priv->efuse_wifi.efuse8723.version >= 0x01) {
+               val32 = rtl8xxxu_read32(priv, REG_MAC_PHY_CTRL);
+
+               val8 = priv->efuse_wifi.efuse8723.xtal_k & 0x3f;
+               val32 &= 0xff000fff;
+               val32 |= ((val8 | (val8 << 6)) << 12);
+
+               rtl8xxxu_write32(priv, REG_MAC_PHY_CTRL, val32);
+       }
+
+       ldoa15 = LDOA15_ENABLE | LDOA15_OBUF;
+       ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT);
+       ldohci12 = 0x57;
+       lpldo = 1;
+       val32 = (lpldo << 24) | (ldohci12 << 16) | (ldov12d << 8) | ldoa15;
+
+       rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32);
+
+       return 0;
+}
+
+static int rtl8xxxu_init_rf_regs(struct rtl8xxxu_priv *priv,
+                                struct rtl8xxxu_rfregval *array,
+                                enum rtl8xxxu_rfpath path)
+{
+       int i, ret;
+       u8 reg;
+       u32 val;
+
+       for (i = 0; ; i++) {
+               reg = array[i].reg;
+               val = array[i].val;
+
+               if (reg == 0xff && val == 0xffffffff)
+                       break;
+
+               switch (reg) {
+               case 0xfe:
+                       msleep(50);
+                       continue;
+               case 0xfd:
+                       mdelay(5);
+                       continue;
+               case 0xfc:
+                       mdelay(1);
+                       continue;
+               case 0xfb:
+                       udelay(50);
+                       continue;
+               case 0xfa:
+                       udelay(5);
+                       continue;
+               case 0xf9:
+                       udelay(1);
+                       continue;
+               }
+
+               reg &= 0x3f;
+
+               ret = rtl8xxxu_write_rfreg(priv, path, reg, val);
+               if (ret) {
+                       dev_warn(&priv->udev->dev,
+                                "Failed to initialize RF\n");
+                       return -EAGAIN;
+               }
+               udelay(1);
+       }
+
+       return 0;
+}
+
+static int rtl8xxxu_init_phy_rf(struct rtl8xxxu_priv *priv,
+                               struct rtl8xxxu_rfregval *table,
+                               enum rtl8xxxu_rfpath path)
+{
+       u32 val32;
+       u16 val16, rfsi_rfenv;
+       u16 reg_sw_ctrl, reg_int_oe, reg_hssi_parm2;
+
+       switch (path) {
+       case RF_A:
+               reg_sw_ctrl = REG_FPGA0_XA_RF_SW_CTRL;
+               reg_int_oe = REG_FPGA0_XA_RF_INT_OE;
+               reg_hssi_parm2 = REG_FPGA0_XA_HSSI_PARM2;
+               break;
+       case RF_B:
+               reg_sw_ctrl = REG_FPGA0_XB_RF_SW_CTRL;
+               reg_int_oe = REG_FPGA0_XB_RF_INT_OE;
+               reg_hssi_parm2 = REG_FPGA0_XB_HSSI_PARM2;
+               break;
+       default:
+               dev_err(&priv->udev->dev, "%s:Unsupported RF path %c\n",
+                       __func__, path + 'A');
+               return -EINVAL;
+       }
+       /* For path B, use XB */
+       rfsi_rfenv = rtl8xxxu_read16(priv, reg_sw_ctrl);
+       rfsi_rfenv &= FPGA0_RF_RFENV;
+
+       /*
+        * These two we might be able to optimize into one
+        */
+       val32 = rtl8xxxu_read32(priv, reg_int_oe);
+       val32 |= BIT(20);       /* 0x10 << 16 */
+       rtl8xxxu_write32(priv, reg_int_oe, val32);
+       udelay(1);
+
+       val32 = rtl8xxxu_read32(priv, reg_int_oe);
+       val32 |= BIT(4);
+       rtl8xxxu_write32(priv, reg_int_oe, val32);
+       udelay(1);
+
+       /*
+        * These two we might be able to optimize into one
+        */
+       val32 = rtl8xxxu_read32(priv, reg_hssi_parm2);
+       val32 &= ~FPGA0_HSSI_3WIRE_ADDR_LEN;
+       rtl8xxxu_write32(priv, reg_hssi_parm2, val32);
+       udelay(1);
+
+       val32 = rtl8xxxu_read32(priv, reg_hssi_parm2);
+       val32 &= ~FPGA0_HSSI_3WIRE_DATA_LEN;
+       rtl8xxxu_write32(priv, reg_hssi_parm2, val32);
+       udelay(1);
+
+       rtl8xxxu_init_rf_regs(priv, table, path);
+
+       /* For path B, use XB */
+       val16 = rtl8xxxu_read16(priv, reg_sw_ctrl);
+       val16 &= ~FPGA0_RF_RFENV;
+       val16 |= rfsi_rfenv;
+       rtl8xxxu_write16(priv, reg_sw_ctrl, val16);
+
+       return 0;
+}
+
+static int rtl8xxxu_llt_write(struct rtl8xxxu_priv *priv, u8 address, u8 data)
+{
+       int ret = -EBUSY;
+       int count = 0;
+       u32 value;
+
+       value = LLT_OP_WRITE | address << 8 | data;
+
+       rtl8xxxu_write32(priv, REG_LLT_INIT, value);
+
+       do {
+               value = rtl8xxxu_read32(priv, REG_LLT_INIT);
+               if ((value & LLT_OP_MASK) == LLT_OP_INACTIVE) {
+                       ret = 0;
+                       break;
+               }
+       } while (count++ < 20);
+
+       return ret;
+}
+
+static int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
+{
+       int ret;
+       int i;
+
+       for (i = 0; i < last_tx_page; i++) {
+               ret = rtl8xxxu_llt_write(priv, i, i + 1);
+               if (ret)
+                       goto exit;
+       }
+
+       ret = rtl8xxxu_llt_write(priv, last_tx_page, 0xff);
+       if (ret)
+               goto exit;
+
+       /* Mark remaining pages as a ring buffer */
+       for (i = last_tx_page + 1; i < 0xff; i++) {
+               ret = rtl8xxxu_llt_write(priv, i, (i + 1));
+               if (ret)
+                       goto exit;
+       }
+
+       /*  Let last entry point to the start entry of ring buffer */
+       ret = rtl8xxxu_llt_write(priv, 0xff, last_tx_page + 1);
+       if (ret)
+               goto exit;
+
+exit:
+       return ret;
+}
+
+static int rtl8xxxu_init_queue_priority(struct rtl8xxxu_priv *priv)
+{
+       u16 val16, hi, lo;
+       u16 hiq, mgq, bkq, beq, viq, voq;
+       int hip, mgp, bkp, bep, vip, vop;
+       int ret = 0;
+
+       switch (priv->ep_tx_count) {
+       case 1:
+               if (priv->ep_tx_high_queue) {
+                       hi = TRXDMA_QUEUE_HIGH;
+               } else if (priv->ep_tx_low_queue) {
+                       hi = TRXDMA_QUEUE_LOW;
+               } else if (priv->ep_tx_normal_queue) {
+                       hi = TRXDMA_QUEUE_NORMAL;
+               } else {
+                       hi = 0;
+                       ret = -EINVAL;
+               }
+
+               hiq = hi;
+               mgq = hi;
+               bkq = hi;
+               beq = hi;
+               viq = hi;
+               voq = hi;
+
+               hip = 0;
+               mgp = 0;
+               bkp = 0;
+               bep = 0;
+               vip = 0;
+               vop = 0;
+               break;
+       case 2:
+               if (priv->ep_tx_high_queue && priv->ep_tx_low_queue) {
+                       hi = TRXDMA_QUEUE_HIGH;
+                       lo = TRXDMA_QUEUE_LOW;
+               } else if (priv->ep_tx_normal_queue && priv->ep_tx_low_queue) {
+                       hi = TRXDMA_QUEUE_NORMAL;
+                       lo = TRXDMA_QUEUE_LOW;
+               } else if (priv->ep_tx_high_queue && priv->ep_tx_normal_queue) {
+                       hi = TRXDMA_QUEUE_HIGH;
+                       lo = TRXDMA_QUEUE_NORMAL;
+               } else {
+                       ret = -EINVAL;
+                       hi = 0;
+                       lo = 0;
+               }
+
+               hiq = hi;
+               mgq = hi;
+               bkq = lo;
+               beq = lo;
+               viq = hi;
+               voq = hi;
+
+               hip = 0;
+               mgp = 0;
+               bkp = 1;
+               bep = 1;
+               vip = 0;
+               vop = 0;
+               break;
+       case 3:
+               beq = TRXDMA_QUEUE_LOW;
+               bkq = TRXDMA_QUEUE_LOW;
+               viq = TRXDMA_QUEUE_NORMAL;
+               voq = TRXDMA_QUEUE_HIGH;
+               mgq = TRXDMA_QUEUE_HIGH;
+               hiq = TRXDMA_QUEUE_HIGH;
+
+               hip = hiq ^ 3;
+               mgp = mgq ^ 3;
+               bkp = bkq ^ 3;
+               bep = beq ^ 3;
+               vip = viq ^ 3;
+               vop = viq ^ 3;
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       /*
+        * None of the vendor drivers are configuring the beacon
+        * queue here .... why?
+        */
+       if (!ret) {
+               val16 = rtl8xxxu_read16(priv, REG_TRXDMA_CTRL);
+               val16 &= 0x7;
+               val16 |= (voq << TRXDMA_CTRL_VOQ_SHIFT) |
+                       (viq << TRXDMA_CTRL_VIQ_SHIFT) |
+                       (beq << TRXDMA_CTRL_BEQ_SHIFT) |
+                       (bkq << TRXDMA_CTRL_BKQ_SHIFT) |
+                       (mgq << TRXDMA_CTRL_MGQ_SHIFT) |
+                       (hiq << TRXDMA_CTRL_HIQ_SHIFT);
+               rtl8xxxu_write16(priv, REG_TRXDMA_CTRL, val16);
+
+               priv->pipe_out[TXDESC_QUEUE_VO] =
+                       usb_sndbulkpipe(priv->udev, priv->out_ep[vop]);
+               priv->pipe_out[TXDESC_QUEUE_VI] =
+                       usb_sndbulkpipe(priv->udev, priv->out_ep[vip]);
+               priv->pipe_out[TXDESC_QUEUE_BE] =
+                       usb_sndbulkpipe(priv->udev, priv->out_ep[bep]);
+               priv->pipe_out[TXDESC_QUEUE_BK] =
+                       usb_sndbulkpipe(priv->udev, priv->out_ep[bkp]);
+               priv->pipe_out[TXDESC_QUEUE_BEACON] =
+                       usb_sndbulkpipe(priv->udev, priv->out_ep[0]);
+               priv->pipe_out[TXDESC_QUEUE_MGNT] =
+                       usb_sndbulkpipe(priv->udev, priv->out_ep[mgp]);
+               priv->pipe_out[TXDESC_QUEUE_HIGH] =
+                       usb_sndbulkpipe(priv->udev, priv->out_ep[hip]);
+               priv->pipe_out[TXDESC_QUEUE_CMD] =
+                       usb_sndbulkpipe(priv->udev, priv->out_ep[0]);
+       }
+
+       return ret;
+}
+
+static void rtl8xxxu_fill_iqk_matrix_a(struct rtl8xxxu_priv *priv,
+                                      bool iqk_ok, int result[][8],
+                                      int candidate, bool tx_only)
+{
+       u32 oldval, x, tx0_a, reg;
+       int y, tx0_c;
+       u32 val32;
+
+       if (!iqk_ok)
+               return;
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE);
+       oldval = val32 >> 22;
+
+       x = result[candidate][0];
+       if ((x & 0x00000200) != 0)
+               x = x | 0xfffffc00;
+       tx0_a = (x * oldval) >> 8;
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE);
+       val32 &= ~0x3ff;
+       val32 |= tx0_a;
+       rtl8xxxu_write32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE, val32);
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES);
+       val32 &= ~BIT(31);
+       if ((x * oldval >> 7) & 0x1)
+               val32 |= BIT(31);
+       rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32);
+
+       y = result[candidate][1];
+       if ((y & 0x00000200) != 0)
+               y = y | 0xfffffc00;
+       tx0_c = (y * oldval) >> 8;
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_XC_TX_AFE);
+       val32 &= ~0xf0000000;
+       val32 |= (((tx0_c & 0x3c0) >> 6) << 28);
+       rtl8xxxu_write32(priv, REG_OFDM0_XC_TX_AFE, val32);
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE);
+       val32 &= ~0x003f0000;
+       val32 |= ((tx0_c & 0x3f) << 16);
+       rtl8xxxu_write32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE, val32);
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES);
+       val32 &= ~BIT(29);
+       if ((y * oldval >> 7) & 0x1)
+               val32 |= BIT(29);
+       rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32);
+
+       if (tx_only) {
+               dev_dbg(&priv->udev->dev, "%s: only TX\n", __func__);
+               return;
+       }
+
+       reg = result[candidate][2];
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE);
+       val32 &= ~0x3ff;
+       val32 |= (reg & 0x3ff);
+       rtl8xxxu_write32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE, val32);
+
+       reg = result[candidate][3] & 0x3F;
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE);
+       val32 &= ~0xfc00;
+       val32 |= ((reg << 10) & 0xfc00);
+       rtl8xxxu_write32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE, val32);
+
+       reg = (result[candidate][3] >> 6) & 0xF;
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_RX_IQ_EXT_ANTA);
+       val32 &= ~0xf0000000;
+       val32 |= (reg << 28);
+       rtl8xxxu_write32(priv, REG_OFDM0_RX_IQ_EXT_ANTA, val32);
+}
+
+static void rtl8xxxu_fill_iqk_matrix_b(struct rtl8xxxu_priv *priv,
+                                      bool iqk_ok, int result[][8],
+                                      int candidate, bool tx_only)
+{
+       u32 oldval, x, tx1_a, reg;
+       int y, tx1_c;
+       u32 val32;
+
+       if (!iqk_ok)
+               return;
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE);
+       oldval = val32 >> 22;
+
+       x = result[candidate][4];
+       if ((x & 0x00000200) != 0)
+               x = x | 0xfffffc00;
+       tx1_a = (x * oldval) >> 8;
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE);
+       val32 &= ~0x3ff;
+       val32 |= tx1_a;
+       rtl8xxxu_write32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE, val32);
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES);
+       val32 &= ~BIT(27);
+       if ((x * oldval >> 7) & 0x1)
+               val32 |= BIT(27);
+       rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32);
+
+       y = result[candidate][5];
+       if ((y & 0x00000200) != 0)
+               y = y | 0xfffffc00;
+       tx1_c = (y * oldval) >> 8;
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_XD_TX_AFE);
+       val32 &= ~0xf0000000;
+       val32 |= (((tx1_c & 0x3c0) >> 6) << 28);
+       rtl8xxxu_write32(priv, REG_OFDM0_XD_TX_AFE, val32);
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE);
+       val32 &= ~0x003f0000;
+       val32 |= ((tx1_c & 0x3f) << 16);
+       rtl8xxxu_write32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE, val32);
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES);
+       val32 &= ~BIT(25);
+       if ((y * oldval >> 7) & 0x1)
+               val32 |= BIT(25);
+       rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32);
+
+       if (tx_only) {
+               dev_dbg(&priv->udev->dev, "%s: only TX\n", __func__);
+               return;
+       }
+
+       reg = result[candidate][6];
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE);
+       val32 &= ~0x3ff;
+       val32 |= (reg & 0x3ff);
+       rtl8xxxu_write32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE, val32);
+
+       reg = result[candidate][7] & 0x3f;
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE);
+       val32 &= ~0xfc00;
+       val32 |= ((reg << 10) & 0xfc00);
+       rtl8xxxu_write32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE, val32);
+
+       reg = (result[candidate][7] >> 6) & 0xf;
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_AGCR_SSI_TABLE);
+       val32 &= ~0x0000f000;
+       val32 |= (reg << 12);
+       rtl8xxxu_write32(priv, REG_OFDM0_AGCR_SSI_TABLE, val32);
+}
+
+#define MAX_TOLERANCE          5
+
+static bool rtl8xxxu_simularity_compare(struct rtl8xxxu_priv *priv,
+                                       int result[][8], int c1, int c2)
+{
+       u32 i, j, diff, simubitmap, bound = 0;
+       int candidate[2] = {-1, -1};    /* for path A and path B */
+       bool retval = true;
+
+       if (priv->tx_paths > 1)
+               bound = 8;
+       else
+               bound = 4;
+
+       simubitmap = 0;
+
+       for (i = 0; i < bound; i++) {
+               diff = (result[c1][i] > result[c2][i]) ?
+                       (result[c1][i] - result[c2][i]) :
+                       (result[c2][i] - result[c1][i]);
+               if (diff > MAX_TOLERANCE) {
+                       if ((i == 2 || i == 6) && !simubitmap) {
+                               if (result[c1][i] + result[c1][i + 1] == 0)
+                                       candidate[(i / 4)] = c2;
+                               else if (result[c2][i] + result[c2][i + 1] == 0)
+                                       candidate[(i / 4)] = c1;
+                               else
+                                       simubitmap = simubitmap | (1 << i);
+                       } else {
+                               simubitmap = simubitmap | (1 << i);
+                       }
+               }
+       }
+
+       if (simubitmap == 0) {
+               for (i = 0; i < (bound / 4); i++) {
+                       if (candidate[i] >= 0) {
+                               for (j = i * 4; j < (i + 1) * 4 - 2; j++)
+                                       result[3][j] = result[candidate[i]][j];
+                               retval = false;
+                       }
+               }
+               return retval;
+       } else if (!(simubitmap & 0x0f)) {
+               /* path A OK */
+               for (i = 0; i < 4; i++)
+                       result[3][i] = result[c1][i];
+       } else if (!(simubitmap & 0xf0) && priv->tx_paths > 1) {
+               /* path B OK */
+               for (i = 4; i < 8; i++)
+                       result[3][i] = result[c1][i];
+       }
+
+       return false;
+}
+
+static void
+rtl8xxxu_save_mac_regs(struct rtl8xxxu_priv *priv, const u32 *reg, u32 *backup)
+{
+       int i;
+
+       for (i = 0; i < (RTL8XXXU_MAC_REGS - 1); i++)
+               backup[i] = rtl8xxxu_read8(priv, reg[i]);
+
+       backup[i] = rtl8xxxu_read32(priv, reg[i]);
+}
+
+static void rtl8xxxu_restore_mac_regs(struct rtl8xxxu_priv *priv,
+                                     const u32 *reg, u32 *backup)
+{
+       int i;
+
+       for (i = 0; i < (RTL8XXXU_MAC_REGS - 1); i++)
+               rtl8xxxu_write8(priv, reg[i], backup[i]);
+
+       rtl8xxxu_write32(priv, reg[i], backup[i]);
+}
+
+static void rtl8xxxu_save_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
+                              u32 *backup, int count)
+{
+       int i;
+
+       for (i = 0; i < count; i++)
+               backup[i] = rtl8xxxu_read32(priv, regs[i]);
+}
+
+static void rtl8xxxu_restore_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
+                                 u32 *backup, int count)
+{
+       int i;
+
+       for (i = 0; i < count; i++)
+               rtl8xxxu_write32(priv, regs[i], backup[i]);
+}
+
+
+static void rtl8xxxu_path_adda_on(struct rtl8xxxu_priv *priv, const u32 *regs,
+                                 bool path_a_on)
+{
+       u32 path_on;
+       int i;
+
+       path_on = path_a_on ? 0x04db25a4 : 0x0b1b25a4;
+       if (priv->tx_paths == 1) {
+               path_on = 0x0bdb25a0;
+               rtl8xxxu_write32(priv, regs[0], 0x0b1b25a0);
+       } else {
+               rtl8xxxu_write32(priv, regs[0], path_on);
+       }
+
+       for (i = 1 ; i < RTL8XXXU_ADDA_REGS ; i++)
+               rtl8xxxu_write32(priv, regs[i], path_on);
+}
+
+static void rtl8xxxu_mac_calibration(struct rtl8xxxu_priv *priv,
+                                    const u32 *regs, u32 *backup)
+{
+       int i = 0;
+
+       rtl8xxxu_write8(priv, regs[i], 0x3f);
+
+       for (i = 1 ; i < (RTL8XXXU_MAC_REGS - 1); i++)
+               rtl8xxxu_write8(priv, regs[i], (u8)(backup[i] & ~BIT(3)));
+
+       rtl8xxxu_write8(priv, regs[i], (u8)(backup[i] & ~BIT(5)));
+}
+
+static int rtl8xxxu_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+       u32 reg_eac, reg_e94, reg_e9c, reg_ea4, val32;
+       int result = 0;
+
+       /* path-A IQK setting */
+       rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x10008c1f);
+       rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x10008c1f);
+       rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82140102);
+
+       val32 = (priv->rf_paths > 1) ? 0x28160202 :
+               /*IS_81xxC_VENDOR_UMC_B_CUT(pHalData->VersionID)?0x28160202: */
+               0x28160502;
+       rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, val32);
+
+       /* path-B IQK setting */
+       if (priv->rf_paths > 1) {
+               rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x10008c22);
+               rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x10008c22);
+               rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82140102);
+               rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28160202);
+       }
+
+       /* LO calibration setting */
+       rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x001028d1);
+
+       /* One shot, path A LOK & IQK */
+       rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+       rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+       mdelay(1);
+
+       /* Check failed */
+       reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+       reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+       reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+       reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2);
+
+       if (!(reg_eac & BIT(28)) &&
+           ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+           ((reg_e9c & 0x03ff0000) != 0x00420000))
+               result |= 0x01;
+       else    /* If TX not OK, ignore RX */
+               goto out;
+
+       /* If TX is OK, check whether RX is OK */
+       if (!(reg_eac & BIT(27)) &&
+           ((reg_ea4 & 0x03ff0000) != 0x01320000) &&
+           ((reg_eac & 0x03ff0000) != 0x00360000))
+               result |= 0x02;
+       else
+               dev_warn(&priv->udev->dev, "%s: Path A RX IQK failed!\n",
+                        __func__);
+out:
+       return result;
+}
+
+static int rtl8xxxu_iqk_path_b(struct rtl8xxxu_priv *priv)
+{
+       u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+       int result = 0;
+
+       /* One shot, path B LOK & IQK */
+       rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000002);
+       rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000000);
+
+       mdelay(1);
+
+       /* Check failed */
+       reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+       reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+       reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+       reg_ec4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2);
+       reg_ecc = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2);
+
+       if (!(reg_eac & BIT(31)) &&
+           ((reg_eb4 & 0x03ff0000) != 0x01420000) &&
+           ((reg_ebc & 0x03ff0000) != 0x00420000))
+               result |= 0x01;
+       else
+               goto out;
+
+       if (!(reg_eac & BIT(30)) &&
+           (((reg_ec4 & 0x03ff0000) >> 16) != 0x132) &&
+           (((reg_ecc & 0x03ff0000) >> 16) != 0x36))
+               result |= 0x02;
+       else
+               dev_warn(&priv->udev->dev, "%s: Path B RX IQK failed!\n",
+                        __func__);
+out:
+       return result;
+}
+
+static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
+                                    int result[][8], int t)
+{
+       struct device *dev = &priv->udev->dev;
+       u32 i, val32;
+       int path_a_ok, path_b_ok;
+       int retry = 2;
+       const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+               REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
+               REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
+               REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
+               REG_TX_OFDM_BBON, REG_TX_TO_RX,
+               REG_TX_TO_TX, REG_RX_CCK,
+               REG_RX_OFDM, REG_RX_WAIT_RIFS,
+               REG_RX_TO_RX, REG_STANDBY,
+               REG_SLEEP, REG_PMPD_ANAEN
+       };
+       const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+               REG_TXPAUSE, REG_BEACON_CTRL,
+               REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
+       };
+       const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+               REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
+               REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
+               REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
+               REG_FPGA0_XB_RF_INT_OE, REG_FPGA0_RF_MODE
+       };
+
+       /*
+        * Note: IQ calibration must be performed after loading
+        *       PHY_REG.txt , and radio_a, radio_b.txt
+        */
+
+       if (t == 0) {
+               /* Save ADDA parameters, turn Path A ADDA on */
+               rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup,
+                                  RTL8XXXU_ADDA_REGS);
+               rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+               rtl8xxxu_save_regs(priv, iqk_bb_regs,
+                                  priv->bb_backup, RTL8XXXU_BB_REGS);
+       }
+
+       rtl8xxxu_path_adda_on(priv, adda_regs, true);
+
+       if (t == 0) {
+               val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM1);
+               if (val32 & FPGA0_HSSI_PARM1_PI)
+                       priv->pi_enabled = 1;
+       }
+
+       if (!priv->pi_enabled) {
+               /* Switch BB to PI mode to do IQ Calibration. */
+               rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000100);
+               rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, 0x01000100);
+       }
+
+       val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+       val32 &= ~FPGA_RF_MODE_CCK;
+       rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+       rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600);
+       rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
+       rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000);
+
+       val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL);
+       val32 |= (FPGA0_RF_PAPE | (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
+       rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+
+       val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_RF_INT_OE);
+       val32 &= ~BIT(10);
+       rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, val32);
+       val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_RF_INT_OE);
+       val32 &= ~BIT(10);
+       rtl8xxxu_write32(priv, REG_FPGA0_XB_RF_INT_OE, val32);
+
+       if (priv->tx_paths > 1) {
+               rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00010000);
+               rtl8xxxu_write32(priv, REG_FPGA0_XB_LSSI_PARM, 0x00010000);
+       }
+
+       /* MAC settings */
+       rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup);
+
+       /* Page B init */
+       rtl8xxxu_write32(priv, REG_CONFIG_ANT_A, 0x00080000);
+
+       if (priv->tx_paths > 1)
+               rtl8xxxu_write32(priv, REG_CONFIG_ANT_B, 0x00080000);
+
+       /* IQ calibration setting */
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+       rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+       rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+       for (i = 0; i < retry; i++) {
+               path_a_ok = rtl8xxxu_iqk_path_a(priv);
+               if (path_a_ok == 0x03) {
+                       val32 = rtl8xxxu_read32(priv,
+                                               REG_TX_POWER_BEFORE_IQK_A);
+                       result[t][0] = (val32 >> 16) & 0x3ff;
+                       val32 = rtl8xxxu_read32(priv,
+                                               REG_TX_POWER_AFTER_IQK_A);
+                       result[t][1] = (val32 >> 16) & 0x3ff;
+                       val32 = rtl8xxxu_read32(priv,
+                                               REG_RX_POWER_BEFORE_IQK_A_2);
+                       result[t][2] = (val32 >> 16) & 0x3ff;
+                       val32 = rtl8xxxu_read32(priv,
+                                               REG_RX_POWER_AFTER_IQK_A_2);
+                       result[t][3] = (val32 >> 16) & 0x3ff;
+                       break;
+               } else if (i == (retry - 1) && path_a_ok == 0x01) {
+                       /* TX IQK OK */
+                       dev_dbg(dev, "%s: Path A IQK Only Tx Success!!\n",
+                               __func__);
+
+                       val32 = rtl8xxxu_read32(priv,
+                                               REG_TX_POWER_BEFORE_IQK_A);
+                       result[t][0] = (val32 >> 16) & 0x3ff;
+                       val32 = rtl8xxxu_read32(priv,
+                                               REG_TX_POWER_AFTER_IQK_A);
+                       result[t][1] = (val32 >> 16) & 0x3ff;
+               }
+       }
+
+       if (!path_a_ok)
+               dev_dbg(dev, "%s: Path A IQK failed!\n", __func__);
+
+       if (priv->tx_paths > 1) {
+               /*
+                * Path A into standby
+                */
+               rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x0);
+               rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00010000);
+               rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+               /* Turn Path B ADDA on */
+               rtl8xxxu_path_adda_on(priv, adda_regs, false);
+
+               for (i = 0; i < retry; i++) {
+                       path_b_ok = rtl8xxxu_iqk_path_b(priv);
+                       if (path_b_ok == 0x03) {
+                               val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+                               result[t][4] = (val32 >> 16) & 0x3ff;
+                               val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+                               result[t][5] = (val32 >> 16) & 0x3ff;
+                               val32 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2);
+                               result[t][6] = (val32 >> 16) & 0x3ff;
+                               val32 = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2);
+                               result[t][7] = (val32 >> 16) & 0x3ff;
+                               break;
+                       } else if (i == (retry - 1) && path_b_ok == 0x01) {
+                               /* TX IQK OK */
+                               val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+                               result[t][4] = (val32 >> 16) & 0x3ff;
+                               val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+                               result[t][5] = (val32 >> 16) & 0x3ff;
+                       }
+               }
+
+               if (!path_b_ok)
+                       dev_dbg(dev, "%s: Path B IQK failed!\n", __func__);
+       }
+
+       /* Back to BB mode, load original value */
+       rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0);
+
+       if (t) {
+               if (!priv->pi_enabled) {
+                       /*
+                        * Switch back BB to SI mode after finishing
+                        * IQ Calibration
+                        */
+                       val32 = 0x01000000;
+                       rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, val32);
+                       rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, val32);
+               }
+
+               /* Reload ADDA power saving parameters */
+               rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup,
+                                     RTL8XXXU_ADDA_REGS);
+
+               /* Reload MAC parameters */
+               rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+
+               /* Reload BB parameters */
+               rtl8xxxu_restore_regs(priv, iqk_bb_regs,
+                                     priv->bb_backup, RTL8XXXU_BB_REGS);
+
+               /* Restore RX initial gain */
+               rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00032ed3);
+
+               if (priv->tx_paths > 1) {
+                       rtl8xxxu_write32(priv, REG_FPGA0_XB_LSSI_PARM,
+                                        0x00032ed3);
+               }
+
+               /* Load 0xe30 IQC default value */
+               rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00);
+               rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00);
+       }
+}
+
+static void rtl8723a_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+{
+       struct device *dev = &priv->udev->dev;
+       int result[4][8];       /* last is final result */
+       int i, candidate;
+       bool path_a_ok, path_b_ok;
+       u32 reg_e94, reg_e9c, reg_ea4, reg_eac;
+       u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+       s32 reg_tmp = 0;
+       bool simu;
+
+       memset(result, 0, sizeof(result));
+       candidate = -1;
+
+       path_a_ok = false;
+       path_b_ok = false;
+
+       rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+
+       for (i = 0; i < 3; i++) {
+               rtl8xxxu_phy_iqcalibrate(priv, result, i);
+
+               if (i == 1) {
+                       simu = rtl8xxxu_simularity_compare(priv, result, 0, 1);
+                       if (simu) {
+                               candidate = 0;
+                               break;
+                       }
+               }
+
+               if (i == 2) {
+                       simu = rtl8xxxu_simularity_compare(priv, result, 0, 2);
+                       if (simu) {
+                               candidate = 0;
+                               break;
+                       }
+
+                       simu = rtl8xxxu_simularity_compare(priv, result, 1, 2);
+                       if (simu) {
+                               candidate = 1;
+                       } else {
+                               for (i = 0; i < 8; i++)
+                                       reg_tmp += result[3][i];
+
+                               if (reg_tmp)
+                                       candidate = 3;
+                               else
+                                       candidate = -1;
+                       }
+               }
+       }
+
+       for (i = 0; i < 4; i++) {
+               reg_e94 = result[i][0];
+               reg_e9c = result[i][1];
+               reg_ea4 = result[i][2];
+               reg_eac = result[i][3];
+               reg_eb4 = result[i][4];
+               reg_ebc = result[i][5];
+               reg_ec4 = result[i][6];
+               reg_ecc = result[i][7];
+       }
+
+       if (candidate >= 0) {
+               reg_e94 = result[candidate][0];
+               priv->rege94 =  reg_e94;
+               reg_e9c = result[candidate][1];
+               priv->rege9c = reg_e9c;
+               reg_ea4 = result[candidate][2];
+               reg_eac = result[candidate][3];
+               reg_eb4 = result[candidate][4];
+               priv->regeb4 = reg_eb4;
+               reg_ebc = result[candidate][5];
+               priv->regebc = reg_ebc;
+               reg_ec4 = result[candidate][6];
+               reg_ecc = result[candidate][7];
+               dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate);
+               dev_dbg(dev,
+                       "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x "
+                       "ecc=%x\n ", __func__, reg_e94, reg_e9c,
+                       reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc);
+               path_a_ok = true;
+               path_b_ok = true;
+       } else {
+               reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100;
+               reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0;
+       }
+
+       if (reg_e94 && candidate >= 0)
+               rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result,
+                                          candidate, (reg_ea4 == 0));
+
+       if (priv->tx_paths > 1 && reg_eb4)
+               rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
+                                          candidate, (reg_ec4 == 0));
+
+       rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
+                          priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
+}
+
+static void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv)
+{
+       u32 val32;
+       u32 rf_amode, rf_bmode = 0, lstf;
+
+       /* Check continuous TX and Packet TX */
+       lstf = rtl8xxxu_read32(priv, REG_OFDM1_LSTF);
+
+       if (lstf & OFDM_LSTF_MASK) {
+               /* Disable all continuous TX */
+               val32 = lstf & ~OFDM_LSTF_MASK;
+               rtl8xxxu_write32(priv, REG_OFDM1_LSTF, val32);
+
+               /* Read original RF mode Path A */
+               rf_amode = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_AC);
+
+               /* Set RF mode to standby Path A */
+               rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC,
+                                    (rf_amode & 0x8ffff) | 0x10000);
+
+               /* Path-B */
+               if (priv->tx_paths > 1) {
+                       rf_bmode = rtl8xxxu_read_rfreg(priv, RF_B,
+                                                      RF6052_REG_AC);
+
+                       rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC,
+                                            (rf_bmode & 0x8ffff) | 0x10000);
+               }
+       } else {
+               /*  Deal with Packet TX case */
+               /*  block all queues */
+               rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+       }
+
+       /* Start LC calibration */
+       val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG);
+       val32 |= 0x08000;
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, val32);
+
+       msleep(100);
+
+       /* Restore original parameters */
+       if (lstf & OFDM_LSTF_MASK) {
+               /* Path-A */
+               rtl8xxxu_write32(priv, REG_OFDM1_LSTF, lstf);
+               rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, rf_amode);
+
+               /* Path-B */
+               if (priv->tx_paths > 1)
+                       rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC,
+                                            rf_bmode);
+       } else /*  Deal with Packet TX case */
+               rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
+}
+
+static int rtl8xxxu_set_mac(struct rtl8xxxu_priv *priv)
+{
+       int i;
+       u16 reg;
+
+       reg = REG_MACID;
+
+       for (i = 0; i < ETH_ALEN; i++)
+               rtl8xxxu_write8(priv, reg + i, priv->mac_addr[i]);
+
+       return 0;
+}
+
+static int rtl8xxxu_set_bssid(struct rtl8xxxu_priv *priv, const u8 *bssid)
+{
+       int i;
+       u16 reg;
+
+       dev_dbg(&priv->udev->dev, "%s: (%pM)\n", __func__, bssid);
+
+       reg = REG_BSSID;
+
+       for (i = 0; i < ETH_ALEN; i++)
+               rtl8xxxu_write8(priv, reg + i, bssid[i]);
+
+       return 0;
+}
+
+static void
+rtl8xxxu_set_ampdu_factor(struct rtl8xxxu_priv *priv, u8 ampdu_factor)
+{
+       u8 vals[4] = { 0x41, 0xa8, 0x72, 0xb9 };
+       u8 max_agg = 0xf;
+       int i;
+
+       ampdu_factor = 1 << (ampdu_factor + 2);
+       if (ampdu_factor > max_agg)
+               ampdu_factor = max_agg;
+
+       for (i = 0; i < 4; i++) {
+               if ((vals[i] & 0xf0) > (ampdu_factor << 4))
+                       vals[i] = (vals[i] & 0x0f) | (ampdu_factor << 4);
+
+               if ((vals[i] & 0x0f) > ampdu_factor)
+                       vals[i] = (vals[i] & 0xf0) | ampdu_factor;
+
+               rtl8xxxu_write8(priv, REG_AGGLEN_LMT + i, vals[i]);
+       }
+}
+
+static void rtl8xxxu_set_ampdu_min_space(struct rtl8xxxu_priv *priv, u8 density)
+{
+       u8 val8;
+
+       val8 = rtl8xxxu_read8(priv, REG_AMPDU_MIN_SPACE);
+       val8 &= 0xf8;
+       val8 |= density;
+       rtl8xxxu_write8(priv, REG_AMPDU_MIN_SPACE, val8);
+}
+
+static int rtl8xxxu_active_to_emu(struct rtl8xxxu_priv *priv)
+{
+       u8 val8;
+       int count, ret;
+
+       /* Start of rtl8723AU_card_enable_flow */
+       /* Act to Cardemu sequence*/
+       /* Turn off RF */
+       rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
+
+       /* 0x004E[7] = 0, switch DPDT_SEL_P output from register 0x0065[2] */
+       val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+       val8 &= ~LEDCFG2_DPDT_SELECT;
+       rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+
+       /* 0x0005[1] = 1 turn off MAC by HW state machine*/
+       val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+       val8 |= BIT(1);
+       rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+       for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+               val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+               if ((val8 & BIT(1)) == 0)
+                       break;
+               udelay(10);
+       }
+
+       if (!count) {
+               dev_warn(&priv->udev->dev, "%s: Disabling MAC timed out\n",
+                        __func__);
+               ret = -EBUSY;
+               goto exit;
+       }
+
+       /* 0x0000[5] = 1 analog Ips to digital, 1:isolation */
+       val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+       val8 |= SYS_ISO_ANALOG_IPS;
+       rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+
+       /* 0x0020[0] = 0 disable LDOA12 MACRO block*/
+       val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
+       val8 &= ~LDOA15_ENABLE;
+       rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
+
+exit:
+       return ret;
+}
+
+static int rtl8xxxu_active_to_lps(struct rtl8xxxu_priv *priv)
+{
+       u8 val8;
+       u8 val32;
+       int count, ret;
+
+       rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
+       /*
+        * Poll - wait for RX packet to complete
+        */
+       for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+               val32 = rtl8xxxu_read32(priv, 0x5f8);
+               if (!val32)
+                       break;
+               udelay(10);
+       }
+
+       if (!count) {
+               dev_warn(&priv->udev->dev,
+                        "%s: RX poll timed out (0x05f8)\n", __func__);
+               ret = -EBUSY;
+               goto exit;
+       }
+
+       /* Disable CCK and OFDM, clock gated */
+       val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+       val8 &= ~SYS_FUNC_BBRSTB;
+       rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+       udelay(2);
+
+       /* Reset baseband */
+       val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+       val8 &= ~SYS_FUNC_BB_GLB_RSTN;
+       rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+       /* Reset MAC TRX */
+       val8 = rtl8xxxu_read8(priv, REG_CR);
+       val8 = CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE;
+       rtl8xxxu_write8(priv, REG_CR, val8);
+
+       /* Reset MAC TRX */
+       val8 = rtl8xxxu_read8(priv, REG_CR + 1);
+       val8 &= ~BIT(1); /* CR_SECURITY_ENABLE */
+       rtl8xxxu_write8(priv, REG_CR + 1, val8);
+
+       /* Respond TX OK to scheduler */
+       val8 = rtl8xxxu_read8(priv, REG_DUAL_TSF_RST);
+       val8 |= DUAL_TSF_TX_OK;
+       rtl8xxxu_write8(priv, REG_DUAL_TSF_RST, val8);
+
+exit:
+       return ret;
+}
+
+static void rtl8xxxu_disabled_to_emu(struct rtl8xxxu_priv *priv)
+{
+       u8 val8;
+
+       /* Clear suspend enable and power down enable*/
+       val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+       val8 &= ~(BIT(3) | BIT(7));
+       rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+       /* 0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/
+       val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 2);
+       val8 &= ~BIT(0);
+       rtl8xxxu_write8(priv, REG_GPIO_INTM + 2, val8);
+
+       /* 0x04[12:11] = 11 enable WL suspend*/
+       val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+       val8 &= ~(BIT(3) | BIT(4));
+       rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+}
+
+static int rtl8xxxu_emu_to_active(struct rtl8xxxu_priv *priv)
+{
+       u8 val8;
+       u32 val32;
+       int count, ret = 0;
+
+       /* 0x20[0] = 1 enable LDOA12 MACRO block for all interface*/
+       val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
+       val8 |= LDOA15_ENABLE;
+       rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
+
+       /* 0x67[0] = 0 to disable BT_GPS_SEL pins*/
+       val8 = rtl8xxxu_read8(priv, 0x0067);
+       val8 &= ~BIT(4);
+       rtl8xxxu_write8(priv, 0x0067, val8);
+
+       mdelay(1);
+
+       /* 0x00[5] = 0 release analog Ips to digital, 1:isolation */
+       val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+       val8 &= ~SYS_ISO_ANALOG_IPS;
+       rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+
+       /* disable SW LPS 0x04[10]= 0 */
+       val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+       val8 &= ~BIT(2);
+       rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+       /* wait till 0x04[17] = 1 power ready*/
+       for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+               val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+               if (val32 & BIT(17))
+                       break;
+
+               udelay(10);
+       }
+
+       if (!count) {
+               ret = -EBUSY;
+               goto exit;
+       }
+
+       /* We should be able to optimize the following three entries into one */
+
+       /* release WLON reset 0x04[16]= 1*/
+       val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
+       val8 |= BIT(0);
+       rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
+
+       /* disable HWPDN 0x04[15]= 0*/
+       val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+       val8 &= ~BIT(7);
+       rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+       /* disable WL suspend*/
+       val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+       val8 &= ~(BIT(3) | BIT(4));
+       rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+       /* set, then poll until 0 */
+       val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+       val32 |= APS_FSMCO_MAC_ENABLE;
+       rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+       for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+               val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+               if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
+                       ret = 0;
+                       break;
+               }
+               udelay(10);
+       }
+
+       if (!count) {
+               ret = -EBUSY;
+               goto exit;
+       }
+
+       /* 0x4C[23] = 0x4E[7] = 1, switch DPDT_SEL_P output from WL BB */
+       /*
+        * Note: Vendor driver actually clears this bit, despite the
+        * documentation claims it's being set!
+        */
+       val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+       val8 |= LEDCFG2_DPDT_SELECT;
+       val8 &= ~LEDCFG2_DPDT_SELECT;
+       rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+
+exit:
+       return ret;
+}
+
+static int rtl8xxxu_emu_to_disabled(struct rtl8xxxu_priv *priv)
+{
+       u8 val8;
+
+       /* 0x0007[7:0] = 0x20 SOP option to disable BG/MB */
+       rtl8xxxu_write8(priv, REG_APS_FSMCO + 3, 0x20);
+
+       /* 0x04[12:11] = 01 enable WL suspend */
+       val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+       val8 &= ~BIT(4);
+       val8 |= BIT(3);
+       rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+       val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+       val8 |= BIT(7);
+       rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+       /* 0x48[16] = 1 to enable GPIO9 as EXT wakeup */
+       val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 2);
+       val8 |= BIT(0);
+       rtl8xxxu_write8(priv, REG_GPIO_INTM + 2, val8);
+
+       return 0;
+}
+
+static int rtl8723au_power_on(struct rtl8xxxu_priv *priv)
+{
+       u8 val8;
+       u16 val16;
+       u32 val32;
+       int ret;
+
+       /*
+        * RSV_CTRL 0x001C[7:0] = 0x00, unlock ISO/CLK/Power control register
+        */
+       rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0);
+
+       rtl8xxxu_disabled_to_emu(priv);
+
+       ret = rtl8xxxu_emu_to_active(priv);
+       if (ret)
+               goto exit;
+
+       /*
+        * 0x0004[19] = 1, reset 8051
+        */
+       val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
+       val8 |= BIT(3);
+       rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
+
+       /*
+        * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+        * Set CR bit10 to enable 32k calibration.
+        */
+       val16 = rtl8xxxu_read16(priv, REG_CR);
+       val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+                 CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
+                 CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
+                 CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE |
+                 CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
+       rtl8xxxu_write16(priv, REG_CR, val16);
+
+       /* For EFuse PG */
+       val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
+       val32 &= ~(BIT(28) | BIT(29) | BIT(30));
+       val32 |= (0x06 << 28);
+       rtl8xxxu_write32(priv, REG_EFUSE_CTRL, val32);
+exit:
+       return ret;
+}
+
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+
+static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv)
+{
+       u8 val8;
+       u16 val16;
+       u32 val32;
+       int i;
+
+       for (i = 100; i; i--) {
+               val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO);
+               if (val8 & APS_FSMCO_PFM_ALDN)
+                       break;
+       }
+
+       if (!i) {
+               pr_info("%s: Poll failed\n", __func__);
+               return -ENODEV;
+       }
+
+       /*
+        * RSV_CTRL 0x001C[7:0] = 0x00, unlock ISO/CLK/Power control register
+        */
+       rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0);
+       rtl8xxxu_write8(priv, REG_SPS0_CTRL, 0x2b);
+       udelay(100);
+
+       val8 = rtl8xxxu_read8(priv, REG_LDOV12D_CTRL);
+       if (!(val8 & LDOV12D_ENABLE)) {
+               pr_info("%s: Enabling LDOV12D (%02x)\n", __func__, val8);
+               val8 |= LDOV12D_ENABLE;
+               rtl8xxxu_write8(priv, REG_LDOV12D_CTRL, val8);
+
+               udelay(100);
+
+               val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+               val8 &= ~SYS_ISO_MD2PP;
+               rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+       }
+
+       /*
+        * Auto enable WLAN
+        */
+       val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+       val16 |= APS_FSMCO_MAC_ENABLE;
+       rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+
+       for (i = 1000; i; i--) {
+               val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+               if (!(val16 & APS_FSMCO_MAC_ENABLE))
+                       break;
+       }
+       if (!i) {
+               pr_info("%s: FSMCO_MAC_ENABLE poll failed\n", __func__);
+               return -EBUSY;
+       }
+
+       /*
+        * Enable radio, GPIO, LED
+        */
+       val16 = APS_FSMCO_HW_SUSPEND | APS_FSMCO_ENABLE_POWERDOWN |
+               APS_FSMCO_PFM_ALDN;
+       rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+
+       /*
+        * Release RF digital isolation
+        */
+       val16 = rtl8xxxu_read16(priv, REG_SYS_ISO_CTRL);
+       val16 &= ~SYS_ISO_DIOR;
+       rtl8xxxu_write16(priv, REG_SYS_ISO_CTRL, val16);
+
+       val8 = rtl8xxxu_read8(priv, REG_APSD_CTRL);
+       val8 &= ~APSD_CTRL_OFF;
+       rtl8xxxu_write8(priv, REG_APSD_CTRL, val8);
+       for (i = 200; i; i--) {
+               val8 = rtl8xxxu_read8(priv, REG_APSD_CTRL);
+               if (!(val8 & APSD_CTRL_OFF_STATUS))
+                       break;
+       }
+
+       if (!i) {
+               pr_info("%s: APSD_CTRL poll failed\n", __func__);
+               return -EBUSY;
+       }
+
+       /*
+        * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+        */
+       val16 = rtl8xxxu_read16(priv, REG_CR);
+       val16 |= CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+               CR_TXDMA_ENABLE | CR_RXDMA_ENABLE | CR_PROTOCOL_ENABLE |
+               CR_SCHEDULE_ENABLE | CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE;
+       rtl8xxxu_write16(priv, REG_CR, val16);
+
+       /*
+        * Workaround for 8188RU LNA power leakage problem.
+        */
+       if (priv->rtlchip == 0x8188c && priv->hi_pa) {
+               val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
+               val32 &= ~BIT(1);
+               rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
+       }
+       return 0;
+}
+
+#endif
+
+static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
+{
+       u8 val8;
+       u16 val16;
+       u32 val32;
+
+       /*
+        * Workaround for 8188RU LNA power leakage problem.
+        */
+       if (priv->rtlchip == 0x8188c && priv->hi_pa) {
+               val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
+               val32 |= BIT(1);
+               rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
+       }
+
+       rtl8xxxu_active_to_lps(priv);
+
+       /* Turn off RF */
+       rtl8xxxu_write8(priv, REG_RF_CTRL, 0x00);
+
+       /* Reset Firmware if running in RAM */
+       if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL)
+               rtl8xxxu_firmware_self_reset(priv);
+
+       /* Reset MCU */
+       val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+       val16 &= ~SYS_FUNC_CPU_ENABLE;
+       rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+       /* Reset MCU ready status */
+       rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
+
+       rtl8xxxu_active_to_emu(priv);
+       rtl8xxxu_emu_to_disabled(priv);
+
+       /* Reset MCU IO Wrapper */
+       val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+       val8 &= ~BIT(0);
+       rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+       val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+       val8 |= BIT(0);
+       rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+       /* RSV_CTRL 0x1C[7:0] = 0x0e  lock ISO/CLK/Power control register */
+       rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0e);
+}
+
+static void rtl8xxxu_init_bt(struct rtl8xxxu_priv *priv)
+{
+       if (!priv->has_bluetooth)
+               return;
+}
+
+static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
+{
+       struct rtl8xxxu_priv *priv = hw->priv;
+       struct device *dev = &priv->udev->dev;
+       struct rtl8xxxu_rfregval *rftable;
+       bool macpower;
+       int ret;
+       u8 val8;
+       u16 val16;
+       u32 val32;
+
+       /* Check if MAC is already powered on */
+       val8 = rtl8xxxu_read8(priv, REG_CR);
+
+       /*
+        * Fix 92DU-VC S3 hang with the reason is that secondary mac is not
+        * initialized. First MAC returns 0xea, second MAC returns 0x00
+        */
+       if (val8 == 0xea)
+               macpower = false;
+       else
+               macpower = true;
+
+       ret = priv->fops->power_on(priv);
+       if (ret < 0) {
+               dev_warn(dev, "%s: Failed power on\n", __func__);
+               goto exit;
+       }
+
+       dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
+       if (!macpower) {
+               ret = rtl8xxxu_init_llt_table(priv, TX_TOTAL_PAGE_NUM);
+               if (ret) {
+                       dev_warn(dev, "%s: LLT table init failed\n", __func__);
+                       goto exit;
+               }
+       }
+
+       ret = rtl8xxxu_download_firmware(priv);
+       dev_dbg(dev, "%s: download_fiwmare %i\n", __func__, ret);
+       if (ret)
+               goto exit;
+       ret = rtl8xxxu_start_firmware(priv);
+       dev_dbg(dev, "%s: start_fiwmare %i\n", __func__, ret);
+       if (ret)
+               goto exit;
+
+       ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table);
+       dev_dbg(dev, "%s: init_mac %i\n", __func__, ret);
+       if (ret)
+               goto exit;
+
+       ret = rtl8xxxu_init_phy_bb(priv);
+       dev_dbg(dev, "%s: init_phy_bb %i\n", __func__, ret);
+       if (ret)
+               goto exit;
+
+       switch(priv->rtlchip) {
+       case 0x8723a:
+               rftable = rtl8723au_radioa_1t_init_table;
+               ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+               break;
+       case 0x8188c:
+               if (priv->hi_pa)
+                       rftable = rtl8188ru_radioa_1t_highpa_table;
+               else
+                       rftable = rtl8192cu_radioa_1t_init_table;
+               ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+               break;
+       case 0x8191c:
+               rftable = rtl8192cu_radioa_1t_init_table;
+               ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+               break;
+       case 0x8192c:
+               rftable = rtl8192cu_radioa_2t_init_table;
+               ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+               if (ret)
+                       break;
+               rftable = rtl8192cu_radiob_2t_init_table;
+               ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_B);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       if (ret)
+               goto exit;
+
+       /* Reduce 80M spur */
+       rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d);
+       rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
+       rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82);
+       rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
+
+       /* RFSW Control - clear bit 14 ?? */
+       rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003);
+       /* 0x07000760 */
+       val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW |
+               FPGA0_RF_ANTSWB | FPGA0_RF_PAPE |
+               ((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB | FPGA0_RF_PAPE) <<
+                FPGA0_RF_BD_CTRL_SHIFT);
+       rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+       /* 0x860[6:5]= 00 - why? - this sets antenna B */
+       rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66F60210);
+
+       priv->rf_mode_ag[0] = rtl8xxxu_read_rfreg(priv, RF_A,
+                                                 RF6052_REG_MODE_AG);
+
+       dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
+       if (!macpower) {
+               if (priv->ep_tx_normal_queue)
+                       val8 = TX_PAGE_NUM_NORM_PQ;
+               else
+                       val8 = 0;
+
+               rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8);
+
+               val32 = (TX_PAGE_NUM_PUBQ << RQPN_NORM_PQ_SHIFT) | RQPN_LOAD;
+
+               if (priv->ep_tx_high_queue)
+                       val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT);
+               if (priv->ep_tx_low_queue)
+                       val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT);
+
+               rtl8xxxu_write32(priv, REG_RQPN, val32);
+
+               /*
+                * Set TX buffer boundary
+                */
+               val8 = TX_TOTAL_PAGE_NUM + 1;
+               rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8);
+               rtl8xxxu_write8(priv, REG_TXPKTBUF_MGQ_BDNY, val8);
+               rtl8xxxu_write8(priv, REG_TXPKTBUF_WMAC_LBK_BF_HD, val8);
+               rtl8xxxu_write8(priv, REG_TRXFF_BNDY, val8);
+               rtl8xxxu_write8(priv, REG_TDECTRL + 1, val8);
+       }
+
+       ret = rtl8xxxu_init_queue_priority(priv);
+       dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret);
+       if (ret)
+               goto exit;
+
+       /*
+        * Set RX page boundary
+        */
+       rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff);
+       /*
+        * Transfer page size is always 128
+        */
+       val8 = (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_RX_SHIFT) |
+               (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_TX_SHIFT);
+       rtl8xxxu_write8(priv, REG_PBP, val8);
+
+       /*
+        * Unit in 8 bytes, not obvious what it is used for
+        */
+       rtl8xxxu_write8(priv, REG_RX_DRVINFO_SZ, 4);
+
+       /*
+        * Enable all interrupts - not obvious USB needs to do this
+        */
+       rtl8xxxu_write32(priv, REG_HISR, 0xffffffff);
+       rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff);
+
+       rtl8xxxu_set_mac(priv);
+       rtl8xxxu_set_linktype(priv, NL80211_IFTYPE_STATION);
+
+       /*
+        * Configure initial WMAC settings
+        */
+       val32 = RCR_ACCEPT_PHYS_MATCH | RCR_ACCEPT_MCAST | RCR_ACCEPT_BCAST |
+               /* RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON | */
+               RCR_ACCEPT_MGMT_FRAME | RCR_HTC_LOC_CTRL |
+               RCR_APPEND_PHYSTAT | RCR_APPEND_ICV | RCR_APPEND_MIC;
+       rtl8xxxu_write32(priv, REG_RCR, val32);
+
+       /*
+        * Accept all multicast
+        */
+       rtl8xxxu_write32(priv, REG_MAR, 0xffffffff);
+       rtl8xxxu_write32(priv, REG_MAR + 4, 0xffffffff);
+
+       /*
+        * Init adaptive controls
+        */
+       val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+       val32 &= ~RESPONSE_RATE_BITMAP_ALL;
+       val32 |= RESPONSE_RATE_RRSR_CCK_ONLY_1M;
+       rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32);
+
+       /* CCK = 0x0a, OFDM = 0x10 */
+       rtl8xxxu_set_spec_sifs(priv, 0x10, 0x10);
+       rtl8xxxu_set_retry(priv, 0x30, 0x30);
+       rtl8xxxu_set_spec_sifs(priv, 0x0a, 0x10);
+
+       /*
+        * Init EDCA
+        */
+       rtl8xxxu_write16(priv, REG_MAC_SPEC_SIFS, 0x100a);
+
+       /* Set CCK SIFS */
+       rtl8xxxu_write16(priv, REG_SIFS_CCK, 0x100a);
+
+       /* Set OFDM SIFS */
+       rtl8xxxu_write16(priv, REG_SIFS_OFDM, 0x100a);
+
+       /* TXOP */
+       rtl8xxxu_write32(priv, REG_EDCA_BE_PARAM, 0x005ea42b);
+       rtl8xxxu_write32(priv, REG_EDCA_BK_PARAM, 0x0000a44f);
+       rtl8xxxu_write32(priv, REG_EDCA_VI_PARAM, 0x005ea324);
+       rtl8xxxu_write32(priv, REG_EDCA_VO_PARAM, 0x002fa226);
+
+       /* Set data auto rate fallback retry count */
+       rtl8xxxu_write32(priv, REG_DARFRC, 0x00000000);
+       rtl8xxxu_write32(priv, REG_DARFRC + 4, 0x10080404);
+       rtl8xxxu_write32(priv, REG_RARFRC, 0x04030201);
+       rtl8xxxu_write32(priv, REG_RARFRC + 4, 0x08070605);
+
+       val8 = rtl8xxxu_read8(priv, REG_FWHW_TXQ_CTRL);
+       val8 |= FWHW_TXQ_CTRL_AMPDU_RETRY;
+       rtl8xxxu_write8(priv, REG_FWHW_TXQ_CTRL, val8);
+
+       /*  Set ACK timeout */
+       rtl8xxxu_write8(priv, REG_ACKTO, 0x40);
+
+       /*
+        * Initialize beacon parameters
+        */
+       val16 = BEACON_DISABLE_TSF_UPDATE | (BEACON_DISABLE_TSF_UPDATE << 8);
+       rtl8xxxu_write16(priv, REG_BEACON_CTRL, val16);
+       rtl8xxxu_write16(priv, REG_TBTT_PROHIBIT, 0x6404);
+       rtl8xxxu_write8(priv, REG_DRIVER_EARLY_INT, DRIVER_EARLY_INT_TIME);
+       rtl8xxxu_write8(priv, REG_BEACON_DMA_TIME, BEACON_DMA_ATIME_INT_TIME);
+       rtl8xxxu_write16(priv, REG_BEACON_TCFG, 0x660F);
+
+       /*
+        * Enable CCK and OFDM block
+        */
+       val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+       val32 |= (FPGA_RF_MODE_CCK | FPGA_RF_MODE_OFDM);
+       rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+       /*
+        * Invalidate all CAM entries - bit 30 is undocumented
+        */
+       rtl8xxxu_write32(priv, REG_CAM_CMD, CAM_CMD_POLLING | BIT(30));
+
+       /*
+        * Start out with default power levels for channel 6, 20MHz
+        */
+       rtl8723a_set_tx_power(priv, 1, false);
+
+       /* Let the 8051 take control of antenna setting */
+       val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+       val8 |= LEDCFG2_DPDT_SELECT;
+       rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+
+       rtl8xxxu_write8(priv, REG_HWSEQ_CTRL, 0xff);
+
+       /* Disable BAR - not sure if this has any effect on USB */
+       rtl8xxxu_write32(priv, REG_BAR_MODE_CTRL, 0x0201ffff);
+
+       rtl8xxxu_write16(priv, REG_FAST_EDCA_CTRL, 0);
+
+       /*
+        * Not sure if we should get into this at all
+        */
+       if (priv->iqk_initialized) {
+               rtl8xxxu_restore_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
+                                     priv->bb_recovery_backup,
+                                     RTL8XXXU_BB_REGS);
+       } else {
+               rtl8723a_phy_iq_calibrate(priv);
+               priv->iqk_initialized = true;
+       }
+
+       /*
+        * This should enable thermal meter
+        */
+       rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_T_METER, 0x60);
+
+       rtl8723a_phy_lc_calibrate(priv);
+
+       /* fix USB interface interference issue */
+       rtl8xxxu_write8(priv, 0xfe40, 0xe0);
+       rtl8xxxu_write8(priv, 0xfe41, 0x8d);
+       rtl8xxxu_write8(priv, 0xfe42, 0x80);
+       rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320);
+
+       /* Solve too many protocol error on USB bus */
+       /* Can't do this for 8188/8192 UMC A cut parts */
+       rtl8xxxu_write8(priv, 0xfe40, 0xe6);
+       rtl8xxxu_write8(priv, 0xfe41, 0x94);
+       rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+       rtl8xxxu_write8(priv, 0xfe40, 0xe0);
+       rtl8xxxu_write8(priv, 0xfe41, 0x19);
+       rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+       rtl8xxxu_write8(priv, 0xfe40, 0xe5);
+       rtl8xxxu_write8(priv, 0xfe41, 0x91);
+       rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+       rtl8xxxu_write8(priv, 0xfe40, 0xe2);
+       rtl8xxxu_write8(priv, 0xfe41, 0x81);
+       rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+       /* Init BT hw config. */
+       rtl8xxxu_init_bt(priv);
+
+       /*
+        * Not sure if we really need to save these parameters, but the
+        * vendor driver does
+        */
+       val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM2);
+       if (val32 & FPGA0_HSSI_PARM2_CCK_HIGH_PWR)
+               priv->path_a_hi_power = 1;
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+       priv->path_a_rf_paths = val32 & OFDM_RF_PATH_RX_MASK;
+
+       val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1);
+       priv->path_a_ig_value = val32 & OFDM0_X_AGC_CORE1_IGI_MASK;
+
+       /* Set NAV_UPPER to 30000us */
+       val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT);
+       rtl8xxxu_write8(priv, REG_NAV_UPPER, val8);
+
+       /*
+        * 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test,
+        * but we need to fin root cause.
+        */
+       val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+       if ((val32 & 0xff000000) != 0x83000000) {
+               val32 |= FPGA_RF_MODE_CCK;
+               rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+       }
+
+       val32 = rtl8xxxu_read32(priv, REG_FWHW_TXQ_CTRL);
+       val32 |= FWHW_TXQ_CTRL_XMIT_MGMT_ACK;
+       /* ack for xmit mgmt frames. */
+       rtl8xxxu_write32(priv, REG_FWHW_TXQ_CTRL, val32);
+
+exit:
+       return ret;
+}
+
+static void rtl8xxxu_disable_device(struct ieee80211_hw *hw)
+{
+       struct rtl8xxxu_priv *priv = hw->priv;
+
+       rtl8xxxu_power_off(priv);
+}
+
+static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv,
+                              struct ieee80211_key_conf *key, const u8 *mac)
+{
+       u32 cmd, val32, addr, ctrl;
+       int j, i, tmp_debug;
+
+       tmp_debug = rtl8xxxu_debug;
+       if (rtl8xxxu_debug & RTL8XXXU_DEBUG_KEY)
+               rtl8xxxu_debug |= RTL8XXXU_DEBUG_REG_WRITE;
+
+       /*
+        * This is a bit of a hack - the lower bits of the cipher
+        * suite selector happens to match the cipher index in the CAM
+        */
+       addr = key->keyidx << CAM_CMD_KEY_SHIFT;
+       ctrl = (key->cipher & 0x0f) << 2 | key->keyidx | CAM_WRITE_VALID;
+
+       for (j = 5; j >= 0; j--) {
+               switch (j) {
+               case 0:
+                       val32 = ctrl | (mac[0] << 16) | (mac[1] << 24);
+                       break;
+               case 1:
+                       val32 = mac[2] | (mac[3] << 8) |
+                               (mac[4] << 16) | (mac[5] << 24);
+                       break;
+               default:
+                       i = (j - 2) << 2;
+                       val32 = key->key[i] | (key->key[i + 1] << 8) |
+                               key->key[i + 2] << 16 | key->key[i + 3] << 24;
+                       break;
+               }
+
+               rtl8xxxu_write32(priv, REG_CAM_WRITE, val32);
+               cmd = CAM_CMD_POLLING | CAM_CMD_WRITE | (addr + j);
+               rtl8xxxu_write32(priv, REG_CAM_CMD, cmd);
+               udelay(100);
+       }
+
+       rtl8xxxu_debug = tmp_debug;
+}
+
+static void rtl8xxxu_sw_scan_start(struct ieee80211_hw *hw,
+                                  struct ieee80211_vif *vif, const u8* mac)
+{
+       struct rtl8xxxu_priv *priv = hw->priv;
+       u8 val8;
+
+       val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+       val8 |= BEACON_DISABLE_TSF_UPDATE;
+       rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+}
+
+static void rtl8xxxu_sw_scan_complete(struct ieee80211_hw *hw,
+                                     struct ieee80211_vif *vif)
+{
+       struct rtl8xxxu_priv *priv = hw->priv;
+       u8 val8;
+
+       val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+       val8 &= ~BEACON_DISABLE_TSF_UPDATE;
+       rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+}
+
+static void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
+                                     u32 ramask, int sgi)
+{
+       struct h2c_cmd h2c;
+
+       h2c.ramask.cmd = H2C_SET_RATE_MASK;
+       h2c.ramask.mask_lo = cpu_to_le16(ramask & 0xffff);
+       h2c.ramask.mask_hi = cpu_to_le16(ramask >> 16);
+
+       h2c.ramask.arg = 0x80;
+       if (sgi)
+               h2c.ramask.arg |= 0x20;
+
+       dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x\n", __func__,
+               ramask, h2c.ramask.arg);
+       rtl8723a_h2c_cmd(priv, &h2c);
+}
+
+static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
+{
+       u32 val32;
+       u8 rate_idx = 0;
+
+       rate_cfg &= RESPONSE_RATE_BITMAP_ALL;
+
+       val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+       val32 &= ~RESPONSE_RATE_BITMAP_ALL;
+       val32 |= rate_cfg;
+       rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32);
+
+       dev_dbg(&priv->udev->dev, "%s: rates %08x\n", __func__, rate_cfg);
+
+       while (rate_cfg) {
+               rate_cfg = (rate_cfg >> 1);
+               rate_idx++;
+       }
+       rtl8xxxu_write8(priv, REG_INIRTS_RATE_SEL, rate_idx);
+}
+
+static void
+rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                         struct ieee80211_bss_conf *bss_conf, u32 changed)
+{
+       struct rtl8xxxu_priv *priv = hw->priv;
+       struct device *dev = &priv->udev->dev;
+       struct ieee80211_sta *sta;
+       u32 val32;
+       u8 val8;
+
+       if (changed & BSS_CHANGED_ASSOC) {
+               struct h2c_cmd h2c;
+
+               dev_dbg(dev, "Changed ASSOC: %i!\n", bss_conf->assoc);
+
+               memset(&h2c, 0, sizeof(struct h2c_cmd));
+               rtl8xxxu_set_linktype(priv, vif->type);
+
+               if (bss_conf->assoc) {
+                       u32 ramask;
+                       int sgi = 0;
+
+                       rcu_read_lock();
+                       sta = ieee80211_find_sta(vif, bss_conf->bssid);
+                       if (!sta) {
+                               dev_info(dev, "%s: ASSOC no sta found\n",
+                                        __func__);
+                               rcu_read_unlock();
+                               goto error;
+                       }
+
+                       if (sta->ht_cap.ht_supported)
+                               dev_info(dev, "%s: HT supported\n", __func__);
+                       if (sta->vht_cap.vht_supported)
+                               dev_info(dev, "%s: VHT supported\n", __func__);
+
+                       /* TODO: Set bits 28-31 for rate adaptive id */
+                       ramask = (sta->supp_rates[0] & 0xfff) |
+                               sta->ht_cap.mcs.rx_mask[0] << 12 |
+                               sta->ht_cap.mcs.rx_mask[1] << 20;
+                       if (sta->ht_cap.cap &
+                           (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
+                               sgi = 1;
+                       rcu_read_unlock();
+
+                       rtl8xxxu_update_rate_mask(priv, ramask, sgi);
+
+                       val32 = rtl8xxxu_read32(priv, REG_RCR);
+                       val32 |= RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON;
+                       rtl8xxxu_write32(priv, REG_RCR, val32);
+
+                       /* Enable RX of data frames */
+                       rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0xffff);
+
+                       rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
+
+                       rtl8723a_stop_tx_beacon(priv);
+
+                       /* joinbss sequence */
+                       rtl8xxxu_write16(priv, REG_BCN_PSR_RPT,
+                                        0xc000 | bss_conf->aid);
+
+                       h2c.joinbss.data = H2C_JOIN_BSS_CONNECT;
+               } else {
+                       val32 = rtl8xxxu_read32(priv, REG_RCR);
+                       val32 &= ~(RCR_CHECK_BSSID_MATCH |
+                                  RCR_CHECK_BSSID_BEACON);
+                       rtl8xxxu_write32(priv, REG_RCR, val32);
+
+                       val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+                       val8 |= BEACON_DISABLE_TSF_UPDATE;
+                       rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+
+                       /* Disable RX of data frames */
+                       rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
+                       h2c.joinbss.data = H2C_JOIN_BSS_DISCONNECT;
+               }
+               h2c.joinbss.cmd = H2C_JOIN_BSS_REPORT;
+               rtl8723a_h2c_cmd(priv, &h2c);
+       }
+
+       if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+               dev_dbg(dev, "Changed ERP_PREAMBLE: Use short preamble %i\n",
+                       bss_conf->use_short_preamble);
+               val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+               if (bss_conf->use_short_preamble)
+                       val32 |= RSR_ACK_SHORT_PREAMBLE;
+               else
+                       val32 &= ~RSR_ACK_SHORT_PREAMBLE;
+               rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32);
+       }
+
+       if (changed & BSS_CHANGED_ERP_SLOT) {
+               dev_dbg(dev, "Changed ERP_SLOT: short_slot_time %i\n",
+                       bss_conf->use_short_slot);
+
+               if (bss_conf->use_short_slot)
+                       val8 = 9;
+               else
+                       val8 = 20;
+               rtl8xxxu_write8(priv, REG_SLOT, val8);
+       }
+
+       if (changed & BSS_CHANGED_BSSID) {
+               dev_dbg(dev, "Changed BSSID!\n");
+               rtl8xxxu_set_bssid(priv, bss_conf->bssid);
+       }
+
+       if (changed & BSS_CHANGED_BASIC_RATES) {
+               dev_dbg(dev, "Changed BASIC_RATES!\n");
+               rtl8xxxu_set_basic_rates(priv, bss_conf->basic_rates);
+       }
+error:
+       return;
+}
+
+static u32 rtl8xxxu_80211_to_rtl_queue(u32 queue)
+{
+       u32 rtlqueue;
+
+       switch (queue) {
+       case IEEE80211_AC_VO:
+               rtlqueue = TXDESC_QUEUE_VO;
+               break;
+       case IEEE80211_AC_VI:
+               rtlqueue = TXDESC_QUEUE_VI;
+               break;
+       case IEEE80211_AC_BE:
+               rtlqueue = TXDESC_QUEUE_BE;
+               break;
+       case IEEE80211_AC_BK:
+               rtlqueue = TXDESC_QUEUE_BK;
+               break;
+       default:
+               rtlqueue = TXDESC_QUEUE_BE;
+       }
+
+       return rtlqueue;
+}
+
+static u32 rtl8xxxu_queue_select(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       u32 queue;
+
+       if (ieee80211_is_mgmt(hdr->frame_control))
+               queue = TXDESC_QUEUE_MGNT;
+       else
+               queue = rtl8xxxu_80211_to_rtl_queue(skb_get_queue_mapping(skb));
+
+       return queue;
+}
+
+static void rtl8xxxu_calc_tx_desc_csum(struct rtl8xxxu_tx_desc *tx_desc)
+{
+       __le16 *ptr = (__le16 *)tx_desc;
+       u16 csum = 0;
+       int i;
+
+       /*
+        * Clear csum field before calculation, as the csum field is
+        * in the middle of the struct.
+        */
+       tx_desc->csum = cpu_to_le16(0);
+
+       for (i = 0; i < (sizeof(struct rtl8xxxu_tx_desc) / sizeof(u16)); i++)
+               csum = csum ^ le16_to_cpu(ptr[i]);
+
+       tx_desc->csum |= cpu_to_le16(csum);
+}
+
+static void rtl8xxxu_free_tx_resources(struct rtl8xxxu_priv *priv)
+{
+       struct rtl8xxxu_tx_urb *tx_urb, *tmp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->tx_urb_lock, flags);
+       list_for_each_entry_safe(tx_urb, tmp, &priv->tx_urb_free_list, list) {
+               list_del(&tx_urb->list);
+               priv->tx_urb_free_count--;
+               usb_free_urb(&tx_urb->urb);
+       }
+       spin_unlock_irqrestore(&priv->tx_urb_lock, flags);
+}
+
+static struct rtl8xxxu_tx_urb *
+rtl8xxxu_alloc_tx_urb(struct rtl8xxxu_priv *priv)
+{
+       struct rtl8xxxu_tx_urb *tx_urb;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->tx_urb_lock, flags);
+       tx_urb = list_first_entry_or_null(&priv->tx_urb_free_list,
+                                         struct rtl8xxxu_tx_urb, list);
+       if (tx_urb) {
+               list_del(&tx_urb->list);
+               priv->tx_urb_free_count--;
+               if (priv->tx_urb_free_count < RTL8XXXU_TX_URB_LOW_WATER &&
+                   !priv->tx_stopped) {
+                       priv->tx_stopped = true;
+                       ieee80211_stop_queues(priv->hw);
+               }
+       }
+
+       spin_unlock_irqrestore(&priv->tx_urb_lock, flags);
+
+       return tx_urb;
+}
+
+static void rtl8xxxu_free_tx_urb(struct rtl8xxxu_priv *priv,
+                                struct rtl8xxxu_tx_urb *tx_urb)
+{
+       unsigned long flags;
+
+       INIT_LIST_HEAD(&tx_urb->list);
+
+       spin_lock_irqsave(&priv->tx_urb_lock, flags);
+
+       list_add(&tx_urb->list, &priv->tx_urb_free_list);
+       priv->tx_urb_free_count++;
+       if (priv->tx_urb_free_count > RTL8XXXU_TX_URB_HIGH_WATER &&
+           priv->tx_stopped) {
+               priv->tx_stopped = false;
+               ieee80211_wake_queues(priv->hw);
+       }
+
+       spin_unlock_irqrestore(&priv->tx_urb_lock, flags);
+}
+
+static void rtl8xxxu_tx_complete(struct urb *urb)
+{
+       struct sk_buff *skb = (struct sk_buff *)urb->context;
+       struct ieee80211_tx_info *tx_info;
+       struct ieee80211_hw *hw;
+       struct rtl8xxxu_tx_urb *tx_urb =
+               container_of(urb, struct rtl8xxxu_tx_urb, urb);
+
+       tx_info = IEEE80211_SKB_CB(skb);
+       hw = tx_info->rate_driver_data[0];
+
+       skb_pull(skb, sizeof(struct rtl8xxxu_tx_desc));
+
+       ieee80211_tx_info_clear_status(tx_info);
+       tx_info->status.rates[0].idx = -1;
+       tx_info->status.rates[0].count = 0;
+
+       if (!urb->status)
+               tx_info->flags |= IEEE80211_TX_STAT_ACK;
+
+       ieee80211_tx_status_irqsafe(hw, skb);
+
+       rtl8xxxu_free_tx_urb(hw->priv, tx_urb);
+}
+
+static void rtl8xxxu_dump_action(struct device *dev,
+                                struct ieee80211_hdr *hdr)
+{
+       struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr;
+       u16 cap, timeout;
+
+       if (!(rtl8xxxu_debug & RTL8XXXU_DEBUG_ACTION))
+               return;
+
+       switch (mgmt->u.action.u.addba_resp.action_code) {
+       case WLAN_ACTION_ADDBA_RESP:
+               cap = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
+               timeout = le16_to_cpu(mgmt->u.action.u.addba_resp.timeout);
+               dev_info(dev, "WLAN_ACTION_ADDBA_RESP: "
+                        "timeout %i, tid %02x, buf_size %02x, policy %02x, "
+                        "status %02x\n",
+                        timeout,
+                        (cap & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2,
+                        (cap & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6,
+                        (cap >> 1) & 0x1,
+                        le16_to_cpu(mgmt->u.action.u.addba_resp.status));
+               break;
+       case WLAN_ACTION_ADDBA_REQ:
+               cap = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+               timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout);
+               dev_info(dev, "WLAN_ACTION_ADDBA_REQ: "
+                        "timeout %i, tid %02x, buf_size %02x, policy %02x\n",
+                        timeout,
+                        (cap & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2,
+                        (cap & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6,
+                        (cap >> 1) & 0x1);
+               break;
+       default:
+               dev_info(dev, "action frame %02x\n",
+                        mgmt->u.action.u.addba_resp.action_code);
+               break;
+       }
+}
+
+static void rtl8xxxu_tx(struct ieee80211_hw *hw,
+                       struct ieee80211_tx_control *control,
+                       struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
+       struct rtl8xxxu_priv *priv = hw->priv;
+       struct rtl8xxxu_tx_desc *tx_desc;
+       struct rtl8xxxu_tx_urb *tx_urb;
+       struct ieee80211_sta *sta = NULL;
+       struct ieee80211_vif *vif = tx_info->control.vif;
+       struct device *dev = &priv->udev->dev;
+       u32 queue, rate;
+       u16 pktlen = skb->len;
+       u16 seq_number;
+       u16 rate_flag = tx_info->control.rates[0].flags;
+       int ret;
+
+       if (skb_headroom(skb) < sizeof(struct rtl8xxxu_tx_desc)) {
+               dev_warn(dev,
+                        "%s: Not enough headroom (%i) for tx descriptor\n",
+                        __func__, skb_headroom(skb));
+               goto error;
+       }
+
+       if (unlikely(skb->len > (65535 - sizeof(struct rtl8xxxu_tx_desc)))) {
+               dev_warn(dev, "%s: Trying to send over-sized skb (%i)\n",
+                        __func__, skb->len);
+               goto error;
+       }
+
+       tx_urb = rtl8xxxu_alloc_tx_urb(priv);
+       if (!tx_urb) {
+               dev_warn(dev, "%s: Unable to allocate tx urb\n", __func__);
+               goto error;
+       }
+
+       if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX)
+               dev_info(dev, "%s: TX rate: %d (%d), pkt size %d\n",
+                        __func__, tx_rate->bitrate, tx_rate->hw_value, pktlen);
+
+       if (ieee80211_is_action(hdr->frame_control))
+               rtl8xxxu_dump_action(dev, hdr);
+
+       tx_info->rate_driver_data[0] = hw;
+
+       if (control && control->sta)
+               sta = control->sta;
+
+       tx_desc = (struct rtl8xxxu_tx_desc *)
+               skb_push(skb, sizeof(struct rtl8xxxu_tx_desc));
+
+       memset(tx_desc, 0, sizeof(struct rtl8xxxu_tx_desc));
+       tx_desc->pkt_size = cpu_to_le16(pktlen);
+       tx_desc->pkt_offset = sizeof(struct rtl8xxxu_tx_desc);
+
+       tx_desc->txdw0 =
+               TXDESC_OWN | TXDESC_FIRST_SEGMENT | TXDESC_LAST_SEGMENT;
+       if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+           is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+               tx_desc->txdw0 |= TXDESC_BROADMULTICAST;
+
+       queue = rtl8xxxu_queue_select(hw, skb);
+       tx_desc->txdw1 = cpu_to_le32(queue << TXDESC_QUEUE_SHIFT);
+
+       if (tx_info->control.hw_key) {
+               switch (tx_info->control.hw_key->cipher) {
+               case WLAN_CIPHER_SUITE_WEP40:
+               case WLAN_CIPHER_SUITE_WEP104:
+               case WLAN_CIPHER_SUITE_TKIP:
+                       tx_desc->txdw1 |= cpu_to_le32(TXDESC_SEC_RC4);
+                       break;
+               case WLAN_CIPHER_SUITE_CCMP:
+                       tx_desc->txdw1 |= cpu_to_le32(TXDESC_SEC_AES);
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+       tx_desc->txdw3 = cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT);
+
+       if (rate_flag & IEEE80211_TX_RC_MCS)
+               rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
+       else
+               rate = tx_rate->hw_value;
+       tx_desc->txdw5 = cpu_to_le32(rate);
+
+       if (ieee80211_is_data(hdr->frame_control))
+               tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
+
+       /* (tx_info->flags & IEEE80211_TX_CTL_AMPDU) && */
+       if (ieee80211_is_data_qos(hdr->frame_control) && sta) {
+               if (sta->ht_cap.ht_supported) {
+                       u32 ampdu, val32;
+
+                       ampdu = (u32)sta->ht_cap.ampdu_density;
+                       val32 = ampdu << TXDESC_AMPDU_DENSITY_SHIFT;
+                       tx_desc->txdw2 |= cpu_to_le32(val32);
+                       tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_ENABLE);
+               } else
+                       tx_desc->txdw1 |= cpu_to_le32(TXDESC_BK);
+       } else
+               tx_desc->txdw1 |= cpu_to_le32(TXDESC_BK);
+
+       if (ieee80211_is_data_qos(hdr->frame_control))
+               tx_desc->txdw4 |= cpu_to_le32(TXDESC_QOS);
+       if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
+           (sta && vif && vif->bss_conf.use_short_preamble))
+               tx_desc->txdw4 |= cpu_to_le32(TXDESC_SHORT_PREAMBLE);
+       if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
+           (ieee80211_is_data_qos(hdr->frame_control) &&
+            sta && sta->ht_cap.cap &
+            (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) {
+               tx_desc->txdw5 |= cpu_to_le32(TXDESC_SHORT_GI);
+       }
+       if (ieee80211_is_mgmt(hdr->frame_control)) {
+               tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value);
+               tx_desc->txdw4 |= cpu_to_le32(TXDESC_USE_DRIVER_RATE);
+               tx_desc->txdw5 |= cpu_to_le32(6 << TXDESC_RETRY_LIMIT_SHIFT);
+               tx_desc->txdw5 |= cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE);
+       }
+
+       if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
+               /* Use RTS rate 24M - does the mac80211 tell us which to use? */
+               tx_desc->txdw4 |= cpu_to_le32(DESC_RATE_24M);
+               tx_desc->txdw4 |= cpu_to_le32(TXDESC_RTS_CTS_ENABLE);
+               tx_desc->txdw4 |= cpu_to_le32(TXDESC_HW_RTS_ENABLE);
+       }
+
+       rtl8xxxu_calc_tx_desc_csum(tx_desc);
+
+       usb_fill_bulk_urb(&tx_urb->urb, priv->udev, priv->pipe_out[queue],
+                         skb->data, skb->len, rtl8xxxu_tx_complete, skb);
+
+       usb_anchor_urb(&tx_urb->urb, &priv->tx_anchor);
+       ret = usb_submit_urb(&tx_urb->urb, GFP_ATOMIC);
+       if (ret) {
+               usb_unanchor_urb(&tx_urb->urb);
+               rtl8xxxu_free_tx_urb(priv, tx_urb);
+               goto error;
+       }
+       return;
+error:
+       dev_kfree_skb(skb);
+}
+
+static void rtl8xxxu_rx_parse_phystats(struct rtl8xxxu_priv *priv,
+                                      struct ieee80211_rx_status *rx_status,
+                                      struct rtl8xxxu_rx_desc *rx_desc,
+                                      struct rtl8723au_phy_stats *phy_stats)
+{
+       if (phy_stats->sgi_en)
+               rx_status->flag |= RX_FLAG_SHORT_GI;
+
+       if (rx_desc->rxmcs < DESC_RATE_6M) {
+               /*
+                * Handle PHY stats for CCK rates
+                */
+               u8 cck_agc_rpt = phy_stats->cck_agc_rpt_ofdm_cfosho_a;
+
+               switch (cck_agc_rpt & 0xc0) {
+               case 0xc0:
+                       rx_status->signal = -46 - (cck_agc_rpt & 0x3e);
+                       break;
+               case 0x80:
+                       rx_status->signal = -26 - (cck_agc_rpt & 0x3e);
+                       break;
+               case 0x40:
+                       rx_status->signal = -12 - (cck_agc_rpt & 0x3e);
+                       break;
+               case 0x00:
+                       rx_status->signal = 16 - (cck_agc_rpt & 0x3e);
+                       break;
+               }
+       } else {
+               rx_status->signal =
+                       (phy_stats->cck_sig_qual_ofdm_pwdb_all >> 1) - 110;
+       }
+}
+
+static void rtl8xxxu_free_rx_resources(struct rtl8xxxu_priv *priv)
+{
+       struct rtl8xxxu_rx_urb *rx_urb, *tmp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->rx_urb_lock, flags);
+
+       list_for_each_entry_safe(rx_urb, tmp,
+                                &priv->rx_urb_pending_list, list) {
+               list_del(&rx_urb->list);
+               priv->rx_urb_pending_count--;
+               usb_free_urb(&rx_urb->urb);
+       }
+
+       spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+}
+
+static void rtl8xxxu_queue_rx_urb(struct rtl8xxxu_priv *priv,
+                                 struct rtl8xxxu_rx_urb *rx_urb)
+{
+       struct sk_buff *skb;
+       unsigned long flags;
+       int pending = 0;
+
+       spin_lock_irqsave(&priv->rx_urb_lock, flags);
+
+       if (!priv->shutdown) {
+               list_add_tail(&rx_urb->list, &priv->rx_urb_pending_list);
+               priv->rx_urb_pending_count++;
+               pending = priv->rx_urb_pending_count;
+       } else {
+               skb = (struct sk_buff *)rx_urb->urb.context;
+               dev_kfree_skb(skb);
+               usb_free_urb(&rx_urb->urb);
+       }
+
+       spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+
+       if (pending > RTL8XXXU_RX_URB_PENDING_WATER)
+               schedule_work(&priv->rx_urb_wq);
+}
+
+static void rtl8xxxu_rx_urb_work(struct work_struct *work)
+{
+       struct rtl8xxxu_priv *priv;
+       struct rtl8xxxu_rx_urb *rx_urb, *tmp;
+       struct list_head local;
+       struct sk_buff *skb;
+       unsigned long flags;
+       int ret;
+
+       priv = container_of(work, struct rtl8xxxu_priv, rx_urb_wq);
+       INIT_LIST_HEAD(&local);
+
+       spin_lock_irqsave(&priv->rx_urb_lock, flags);
+
+       list_splice_init(&priv->rx_urb_pending_list, &local);
+       priv->rx_urb_pending_count = 0;
+
+       spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+
+       list_for_each_entry_safe(rx_urb, tmp, &local, list) {
+               list_del_init(&rx_urb->list);
+               ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
+               /*
+                * If out of memory or temporary error, put it back on the
+                * queue and try again. Otherwise the device is dead/gone
+                * and we should drop it.
+                */
+               switch (ret) {
+               case 0:
+                       break;
+               case -ENOMEM:
+               case -EAGAIN:
+                       rtl8xxxu_queue_rx_urb(priv, rx_urb);
+                       break;
+               default:
+                       pr_info("failed to requeue urb %i\n", ret);
+                       skb = (struct sk_buff *)rx_urb->urb.context;
+                       dev_kfree_skb(skb);
+                       usb_free_urb(&rx_urb->urb);
+               }
+       }
+}
+
+static void rtl8xxxu_rx_complete(struct urb *urb)
+{
+       struct rtl8xxxu_rx_urb *rx_urb =
+               container_of(urb, struct rtl8xxxu_rx_urb, urb);
+       struct ieee80211_hw *hw = rx_urb->hw;
+       struct rtl8xxxu_priv *priv = hw->priv;
+       struct sk_buff *skb = (struct sk_buff *)urb->context;
+       struct rtl8xxxu_rx_desc *rx_desc = (struct rtl8xxxu_rx_desc *)skb->data;
+       struct rtl8723au_phy_stats *phy_stats;
+       struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+       struct ieee80211_mgmt *mgmt;
+       struct device *dev = &priv->udev->dev;
+       __le32 *_rx_desc_le = (__le32 *)skb->data;
+       u32 *_rx_desc = (u32 *)skb->data;
+       int cnt, len, drvinfo_sz, desc_shift, i;
+
+       for (i = 0; i < (sizeof(struct rtl8xxxu_rx_desc) / sizeof(u32)); i++)
+               _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
+
+       cnt = rx_desc->frag;
+       len = rx_desc->pktlen;
+       drvinfo_sz = rx_desc->drvinfo_sz * 8;
+       desc_shift = rx_desc->shift;
+       skb_put(skb, urb->actual_length);
+
+       if (urb->status == 0) {
+               skb_pull(skb, sizeof(struct rtl8xxxu_rx_desc));
+               phy_stats = (struct rtl8723au_phy_stats *)skb->data;
+
+               skb_pull(skb, drvinfo_sz + desc_shift);
+
+               mgmt = (struct ieee80211_mgmt *)skb->data;
+
+               memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+
+               if (rx_desc->phy_stats)
+                       rtl8xxxu_rx_parse_phystats(priv, rx_status,
+                                                  rx_desc, phy_stats);
+
+               rx_status->freq = hw->conf.chandef.chan->center_freq;
+               rx_status->band = hw->conf.chandef.chan->band;
+
+               rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+               rx_status->flag |= RX_FLAG_MACTIME_START;
+
+               if (!rx_desc->swdec)
+                       rx_status->flag |= RX_FLAG_DECRYPTED;
+               if (rx_desc->crc32)
+                       rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+               if (rx_desc->bw)
+                       rx_status->flag |= RX_FLAG_40MHZ;
+
+               if (rx_desc->rxht) {
+                       rx_status->flag |= RX_FLAG_HT;
+                       rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0;
+               } else {
+                       rx_status->rate_idx = rx_desc->rxmcs;
+               }
+
+               ieee80211_rx_irqsafe(hw, skb);
+               skb = NULL;
+               rx_urb->urb.context = NULL;
+               rtl8xxxu_queue_rx_urb(priv, rx_urb);
+       } else {
+               dev_dbg(dev, "%s: status %i\n", __func__, urb->status);
+               goto cleanup;
+       }
+       return;
+
+cleanup:
+       usb_free_urb(urb);
+       dev_kfree_skb(skb);
+       return;
+}
+
+static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv,
+                                 struct rtl8xxxu_rx_urb *rx_urb)
+{
+       struct sk_buff *skb;
+       int skb_size;
+       int ret;
+
+       skb_size = sizeof(struct rtl8xxxu_rx_desc) + RTL_RX_BUFFER_SIZE;
+       skb = __netdev_alloc_skb(NULL, skb_size, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       memset(skb->data, 0, sizeof(struct rtl8xxxu_rx_desc));
+       usb_fill_bulk_urb(&rx_urb->urb, priv->udev, priv->pipe_in, skb->data,
+                         skb_size, rtl8xxxu_rx_complete, skb);
+       usb_anchor_urb(&rx_urb->urb, &priv->rx_anchor);
+       ret = usb_submit_urb(&rx_urb->urb, GFP_ATOMIC);
+       if (ret)
+               usb_unanchor_urb(&rx_urb->urb);
+       return ret;
+}
+
+static void rtl8xxxu_int_complete(struct urb *urb)
+{
+       struct rtl8xxxu_priv *priv = (struct rtl8xxxu_priv *)urb->context;
+       struct device *dev = &priv->udev->dev;
+       int ret;
+
+       dev_dbg(dev, "%s: status %i\n", __func__, urb->status);
+       if (urb->status == 0) {
+               usb_anchor_urb(urb, &priv->int_anchor);
+               ret = usb_submit_urb(urb, GFP_ATOMIC);
+               if (ret)
+                       usb_unanchor_urb(urb);
+       } else {
+               dev_info(dev, "%s: Error %i\n", __func__, urb->status);
+       }
+}
+
+
+static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
+{
+       struct rtl8xxxu_priv *priv = hw->priv;
+       struct urb *urb;
+       u32 val32;
+       int ret;
+
+       urb = usb_alloc_urb(0, GFP_KERNEL);
+       if (!urb)
+               return -ENOMEM;
+
+       usb_fill_int_urb(urb, priv->udev, priv->pipe_interrupt,
+                        priv->int_buf, USB_INTR_CONTENT_LENGTH,
+                        rtl8xxxu_int_complete, priv, 1);
+       usb_anchor_urb(urb, &priv->int_anchor);
+       ret = usb_submit_urb(urb, GFP_KERNEL);
+       if (ret) {
+               usb_unanchor_urb(urb);
+               goto error;
+       }
+
+       val32 = rtl8xxxu_read32(priv, REG_USB_HIMR);
+       val32 |= USB_HIMR_CPWM;
+       rtl8xxxu_write32(priv, REG_USB_HIMR, val32);
+
+error:
+       return ret;
+}
+
+static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
+                                 struct ieee80211_vif *vif)
+{
+       struct rtl8xxxu_priv *priv = hw->priv;
+       int ret;
+       u8 val8;
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               rtl8723a_stop_tx_beacon(priv);
+
+               val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+               val8 |= BEACON_ATIM | BEACON_FUNCTION_ENABLE |
+                       BEACON_DISABLE_TSF_UPDATE;
+               rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+               ret = 0;
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+       }
+
+       rtl8xxxu_set_linktype(priv, vif->type);
+
+       return ret;
+}
+
+static void rtl8xxxu_remove_interface(struct ieee80211_hw *hw,
+                                     struct ieee80211_vif *vif)
+{
+       struct rtl8xxxu_priv *priv = hw->priv;
+
+       dev_dbg(&priv->udev->dev, "%s\n", __func__);
+}
+
+static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
+{
+       struct rtl8xxxu_priv *priv = hw->priv;
+       struct device *dev = &priv->udev->dev;
+       u16 val16;
+       int ret = 0, channel;
+       bool ht40;
+
+       if (rtl8xxxu_debug & RTL8XXXU_DEBUG_CHANNEL)
+               dev_info(dev,
+                        "%s: channel: %i (changed %08x chandef.width %02x)\n",
+                        __func__, hw->conf.chandef.chan->hw_value,
+                        changed, hw->conf.chandef.width);
+
+       if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+               val16 = ((hw->conf.long_frame_max_tx_count <<
+                         RETRY_LIMIT_LONG_SHIFT) & RETRY_LIMIT_LONG_MASK) |
+                       ((hw->conf.short_frame_max_tx_count <<
+                         RETRY_LIMIT_SHORT_SHIFT) & RETRY_LIMIT_SHORT_MASK);
+               rtl8xxxu_write16(priv, REG_RETRY_LIMIT, val16);
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+               switch (hw->conf.chandef.width) {
+               case NL80211_CHAN_WIDTH_20_NOHT:
+               case NL80211_CHAN_WIDTH_20:
+                       ht40 = false;
+                       break;
+               case NL80211_CHAN_WIDTH_40:
+                       ht40 = true;
+                       break;
+               default:
+                       ret = -ENOTSUPP;
+                       goto exit;
+               }
+
+               channel = hw->conf.chandef.chan->hw_value;
+
+               rtl8723a_set_tx_power(priv, channel, ht40);
+
+               rtl8723au_config_channel(hw);
+       }
+
+exit:
+       return ret;
+}
+
+static int rtl8xxxu_conf_tx(struct ieee80211_hw *hw,
+                           struct ieee80211_vif *vif, u16 queue,
+                           const struct ieee80211_tx_queue_params *param)
+{
+       struct rtl8xxxu_priv *priv = hw->priv;
+       struct device *dev = &priv->udev->dev;
+       u32 val32;
+       u8 aifs, acm_ctrl, acm_bit;
+
+       aifs = param->aifs;
+
+       val32 = aifs |
+               fls(param->cw_min) << EDCA_PARAM_ECW_MIN_SHIFT |
+               fls(param->cw_max) << EDCA_PARAM_ECW_MAX_SHIFT |
+               (u32)param->txop << EDCA_PARAM_TXOP_SHIFT;
+
+       acm_ctrl = rtl8xxxu_read8(priv, REG_ACM_HW_CTRL);
+       dev_dbg(dev,
+               "%s: IEEE80211 queue %02x val %08x, acm %i, acm_ctrl %02x\n",
+               __func__, queue, val32, param->acm, acm_ctrl);
+
+       switch (queue) {
+       case IEEE80211_AC_VO:
+               acm_bit = ACM_HW_CTRL_VO;
+               rtl8xxxu_write32(priv, REG_EDCA_VO_PARAM, val32);
+               break;
+       case IEEE80211_AC_VI:
+               acm_bit = ACM_HW_CTRL_VI;
+               rtl8xxxu_write32(priv, REG_EDCA_VI_PARAM, val32);
+               break;
+       case IEEE80211_AC_BE:
+               acm_bit = ACM_HW_CTRL_BE;
+               rtl8xxxu_write32(priv, REG_EDCA_BE_PARAM, val32);
+               break;
+       case IEEE80211_AC_BK:
+               acm_bit = ACM_HW_CTRL_BK;
+               rtl8xxxu_write32(priv, REG_EDCA_BK_PARAM, val32);
+               break;
+       default:
+               acm_bit = 0;
+               break;
+       }
+
+       if (param->acm)
+               acm_ctrl |= acm_bit;
+       else
+               acm_ctrl &= ~acm_bit;
+       rtl8xxxu_write8(priv, REG_ACM_HW_CTRL, acm_ctrl);
+
+       return 0;
+}
+
+static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
+                                     unsigned int changed_flags,
+                                     unsigned int *total_flags, u64 multicast)
+{
+       struct rtl8xxxu_priv *priv = hw->priv;
+
+       dev_dbg(&priv->udev->dev, "%s: changed_flags %08x, total_flags %08x\n",
+               __func__, changed_flags, *total_flags);
+
+       *total_flags &= (FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC);
+}
+
+static int rtl8xxxu_set_rts_threshold(struct ieee80211_hw *hw, u32 rts)
+{
+       if (rts > 2347)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+                           struct ieee80211_vif *vif,
+                           struct ieee80211_sta *sta,
+                           struct ieee80211_key_conf *key)
+{
+       struct rtl8xxxu_priv *priv = hw->priv;
+       struct device *dev = &priv->udev->dev;
+       u8 mac_addr[ETH_ALEN];
+       u8 val8;
+       u16 val16;
+       u32 val32;
+       int retval = -EOPNOTSUPP;
+
+       dev_dbg(dev, "%s: cmd %02x, cipher %08x, index %i\n",
+               __func__, cmd, key->cipher, key->keyidx);
+
+       if (vif->type != NL80211_IFTYPE_STATION)
+               return -EOPNOTSUPP;
+
+       if (key->keyidx > 3)
+               return -EOPNOTSUPP;
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+
+               break;
+       case WLAN_CIPHER_SUITE_CCMP:
+               key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+               break;
+       case WLAN_CIPHER_SUITE_TKIP:
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+               dev_dbg(dev, "%s: pairwise key\n", __func__);
+               ether_addr_copy(mac_addr, sta->addr);
+       } else {
+               dev_dbg(dev, "%s: group key\n", __func__);
+               eth_broadcast_addr(mac_addr);
+       }
+
+       val16 = rtl8xxxu_read16(priv, REG_CR);
+       val16 |= CR_SECURITY_ENABLE;
+       rtl8xxxu_write16(priv, REG_CR, val16);
+
+       val8 = SEC_CFG_TX_SEC_ENABLE | SEC_CFG_TXBC_USE_DEFKEY |
+               SEC_CFG_RX_SEC_ENABLE | SEC_CFG_RXBC_USE_DEFKEY;
+       val8 |= SEC_CFG_TX_USE_DEFKEY | SEC_CFG_RX_USE_DEFKEY;
+       rtl8xxxu_write8(priv, REG_SECURITY_CFG, val8);
+
+       switch (cmd) {
+       case SET_KEY:
+               key->hw_key_idx = key->keyidx;
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+               rtl8xxxu_cam_write(priv, key, mac_addr);
+               retval = 0;
+               break;
+       case DISABLE_KEY:
+               rtl8xxxu_write32(priv, REG_CAM_WRITE, 0x00000000);
+               val32 = CAM_CMD_POLLING | CAM_CMD_WRITE |
+                       key->keyidx << CAM_CMD_KEY_SHIFT;
+               rtl8xxxu_write32(priv, REG_CAM_CMD, val32);
+               retval = 0;
+               break;
+       default:
+               dev_warn(dev, "%s: Unsupported command %02x\n", __func__, cmd);
+       }
+
+       return retval;
+}
+
+static int
+rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                     enum ieee80211_ampdu_mlme_action action,
+                     struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size,
+                     bool amsdu)
+{
+       struct rtl8xxxu_priv *priv = hw->priv;
+       struct device *dev = &priv->udev->dev;
+       u8 ampdu_factor, ampdu_density;
+
+       switch (action) {
+       case IEEE80211_AMPDU_TX_START:
+               dev_info(dev, "%s: IEEE80211_AMPDU_TX_START\n", __func__);
+               ampdu_factor = sta->ht_cap.ampdu_factor;
+               ampdu_density = sta->ht_cap.ampdu_density;
+               rtl8xxxu_set_ampdu_factor(priv, ampdu_factor);
+               rtl8xxxu_set_ampdu_min_space(priv, ampdu_density);
+               dev_dbg(dev,
+                       "Changed HT: ampdu_factor %02x, ampdu_density %02x\n",
+                       ampdu_factor, ampdu_density);
+               break;
+       case IEEE80211_AMPDU_TX_STOP_FLUSH:
+               dev_info(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH\n", __func__);
+               rtl8xxxu_set_ampdu_factor(priv, 0);
+               rtl8xxxu_set_ampdu_min_space(priv, 0);
+               break;
+       case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+               dev_info(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH_CONT\n",
+                        __func__);
+               rtl8xxxu_set_ampdu_factor(priv, 0);
+               rtl8xxxu_set_ampdu_min_space(priv, 0);
+               break;
+       case IEEE80211_AMPDU_RX_START:
+               dev_info(dev, "%s: IEEE80211_AMPDU_RX_START\n", __func__);
+               break;
+       case IEEE80211_AMPDU_RX_STOP:
+               dev_info(dev, "%s: IEEE80211_AMPDU_RX_STOP\n", __func__);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int rtl8xxxu_start(struct ieee80211_hw *hw)
+{
+       struct rtl8xxxu_priv *priv = hw->priv;
+       struct rtl8xxxu_rx_urb *rx_urb;
+       struct rtl8xxxu_tx_urb *tx_urb;
+       unsigned long flags;
+       int ret, i;
+
+       ret = 0;
+
+       init_usb_anchor(&priv->rx_anchor);
+       init_usb_anchor(&priv->tx_anchor);
+       init_usb_anchor(&priv->int_anchor);
+
+       rtl8723a_enable_rf(priv);
+       ret = rtl8xxxu_submit_int_urb(hw);
+       if (ret)
+               goto exit;
+
+       for (i = 0; i < RTL8XXXU_TX_URBS; i++) {
+               tx_urb = kmalloc(sizeof(struct rtl8xxxu_tx_urb), GFP_KERNEL);
+               if (!tx_urb) {
+                       if (!i)
+                               ret = -ENOMEM;
+
+                       goto error_out;
+               }
+               usb_init_urb(&tx_urb->urb);
+               INIT_LIST_HEAD(&tx_urb->list);
+               tx_urb->hw = hw;
+               list_add(&tx_urb->list, &priv->tx_urb_free_list);
+               priv->tx_urb_free_count++;
+       }
+
+       priv->tx_stopped = false;
+
+       spin_lock_irqsave(&priv->rx_urb_lock, flags);
+       priv->shutdown = false;
+       spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+
+       for (i = 0; i < RTL8XXXU_RX_URBS; i++) {
+               rx_urb = kmalloc(sizeof(struct rtl8xxxu_rx_urb), GFP_KERNEL);
+               if (!rx_urb) {
+                       if (!i)
+                               ret = -ENOMEM;
+
+                       goto error_out;
+               }
+               usb_init_urb(&rx_urb->urb);
+               INIT_LIST_HEAD(&rx_urb->list);
+               rx_urb->hw = hw;
+
+               ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
+       }
+exit:
+       /*
+        * Disable all data frames
+        */
+       rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
+       /*
+        * Accept all mgmt frames
+        */
+       rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0xffff);
+
+       rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, 0x6954341e);
+
+       return ret;
+
+error_out:
+       rtl8xxxu_free_tx_resources(priv);
+       /*
+        * Disable all data and mgmt frames
+        */
+       rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
+       rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0x0000);
+
+       return ret;
+}
+
+static void rtl8xxxu_stop(struct ieee80211_hw *hw)
+{
+       struct rtl8xxxu_priv *priv = hw->priv;
+       unsigned long flags;
+
+       rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
+       rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0x0000);
+       rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
+
+       spin_lock_irqsave(&priv->rx_urb_lock, flags);
+       priv->shutdown = true;
+       spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+
+       usb_kill_anchored_urbs(&priv->rx_anchor);
+       usb_kill_anchored_urbs(&priv->tx_anchor);
+       usb_kill_anchored_urbs(&priv->int_anchor);
+
+       rtl8723a_disable_rf(priv);
+
+       /*
+        * Disable interrupts
+        */
+       rtl8xxxu_write32(priv, REG_USB_HIMR, 0);
+
+       rtl8xxxu_free_rx_resources(priv);
+       rtl8xxxu_free_tx_resources(priv);
+}
+
+static const struct ieee80211_ops rtl8xxxu_ops = {
+       .tx = rtl8xxxu_tx,
+       .add_interface = rtl8xxxu_add_interface,
+       .remove_interface = rtl8xxxu_remove_interface,
+       .config = rtl8xxxu_config,
+       .conf_tx = rtl8xxxu_conf_tx,
+       .bss_info_changed = rtl8xxxu_bss_info_changed,
+       .configure_filter = rtl8xxxu_configure_filter,
+       .set_rts_threshold = rtl8xxxu_set_rts_threshold,
+       .start = rtl8xxxu_start,
+       .stop = rtl8xxxu_stop,
+       .sw_scan_start = rtl8xxxu_sw_scan_start,
+       .sw_scan_complete = rtl8xxxu_sw_scan_complete,
+       .set_key = rtl8xxxu_set_key,
+       .ampdu_action = rtl8xxxu_ampdu_action,
+};
+
+static int rtl8xxxu_parse_usb(struct rtl8xxxu_priv *priv,
+                             struct usb_interface *interface)
+{
+       struct usb_interface_descriptor *interface_desc;
+       struct usb_host_interface *host_interface;
+       struct usb_endpoint_descriptor *endpoint;
+       struct device *dev = &priv->udev->dev;
+       int i, j = 0, endpoints;
+       u8 dir, xtype, num;
+       int ret = 0;
+
+       host_interface = &interface->altsetting[0];
+       interface_desc = &host_interface->desc;
+       endpoints = interface_desc->bNumEndpoints;
+
+       for (i = 0; i < endpoints; i++) {
+               endpoint = &host_interface->endpoint[i].desc;
+
+               dir = endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
+               num = usb_endpoint_num(endpoint);
+               xtype = usb_endpoint_type(endpoint);
+               if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB)
+                       dev_dbg(dev,
+                               "%s: endpoint: dir %02x, # %02x, type %02x\n",
+                               __func__, dir, num, xtype);
+               if (usb_endpoint_dir_in(endpoint) &&
+                   usb_endpoint_xfer_bulk(endpoint)) {
+                       if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB)
+                               dev_dbg(dev, "%s: in endpoint num %i\n",
+                                       __func__, num);
+
+                       if (priv->pipe_in) {
+                               dev_warn(dev,
+                                        "%s: Too many IN pipes\n", __func__);
+                               ret = -EINVAL;
+                               goto exit;
+                       }
+
+                       priv->pipe_in = usb_rcvbulkpipe(priv->udev, num);
+               }
+
+               if (usb_endpoint_dir_in(endpoint) &&
+                   usb_endpoint_xfer_int(endpoint)) {
+                       if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB)
+                               dev_dbg(dev, "%s: interrupt endpoint num %i\n",
+                                       __func__, num);
+
+                       if (priv->pipe_interrupt) {
+                               dev_warn(dev, "%s: Too many INTERRUPT pipes\n",
+                                        __func__);
+                               ret = -EINVAL;
+                               goto exit;
+                       }
+
+                       priv->pipe_interrupt = usb_rcvintpipe(priv->udev, num);
+               }
+
+               if (usb_endpoint_dir_out(endpoint) &&
+                   usb_endpoint_xfer_bulk(endpoint)) {
+                       if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB)
+                               dev_dbg(dev, "%s: out endpoint num %i\n",
+                                       __func__, num);
+                       if (j >= RTL8XXXU_OUT_ENDPOINTS) {
+                               dev_warn(dev,
+                                        "%s: Too many OUT pipes\n", __func__);
+                               ret = -EINVAL;
+                               goto exit;
+                       }
+                       priv->out_ep[j++] = num;
+               }
+       }
+exit:
+       priv->nr_out_eps = j;
+       return ret;
+}
+
+static int rtl8xxxu_probe(struct usb_interface *interface,
+                         const struct usb_device_id *id)
+{
+       struct rtl8xxxu_priv *priv;
+       struct ieee80211_hw *hw;
+       struct usb_device *udev;
+       struct ieee80211_supported_band *sband;
+       int ret = 0;
+       int untested = 1;
+
+       udev = usb_get_dev(interface_to_usbdev(interface));
+
+       switch (id->idVendor) {
+       case USB_VENDOR_ID_REALTEK:
+               switch(id->idProduct) {
+               case 0x1724:
+               case 0x8176:
+               case 0x8178:
+               case 0x817f:
+                       untested = 0;
+                       break;
+               }
+               break;
+       case 0x7392:
+               if (id->idProduct == 0x7811)
+                       untested = 0;
+               break;
+       default:
+               break;
+       }
+
+       if (untested) {
+               rtl8xxxu_debug = RTL8XXXU_DEBUG_EFUSE;
+               dev_info(&udev->dev,
+                        "This Realtek USB WiFi dongle (0x%04x:0x%04x) is untested!\n",
+                        id->idVendor, id->idProduct);
+               dev_info(&udev->dev,
+                        "Please report results to Jes.Sorensen@gmail.com\n");
+       }
+
+       hw = ieee80211_alloc_hw(sizeof(struct rtl8xxxu_priv), &rtl8xxxu_ops);
+       if (!hw) {
+               ret = -ENOMEM;
+               goto exit;
+       }
+
+       priv = hw->priv;
+       priv->hw = hw;
+       priv->udev = udev;
+       priv->fops = (struct rtl8xxxu_fileops *)id->driver_info;
+       mutex_init(&priv->usb_buf_mutex);
+       mutex_init(&priv->h2c_mutex);
+       INIT_LIST_HEAD(&priv->tx_urb_free_list);
+       spin_lock_init(&priv->tx_urb_lock);
+       INIT_LIST_HEAD(&priv->rx_urb_pending_list);
+       spin_lock_init(&priv->rx_urb_lock);
+       INIT_WORK(&priv->rx_urb_wq, rtl8xxxu_rx_urb_work);
+
+       usb_set_intfdata(interface, hw);
+
+       ret = rtl8xxxu_parse_usb(priv, interface);
+       if (ret)
+               goto exit;
+
+       ret = rtl8xxxu_identify_chip(priv);
+       if (ret) {
+               dev_err(&udev->dev, "Fatal - failed to identify chip\n");
+               goto exit;
+       }
+
+       ret = rtl8xxxu_read_efuse(priv);
+       if (ret) {
+               dev_err(&udev->dev, "Fatal - failed to read EFuse\n");
+               goto exit;
+       }
+
+       ret = priv->fops->parse_efuse(priv);
+       if (ret) {
+               dev_err(&udev->dev, "Fatal - failed to parse EFuse\n");
+               goto exit;
+       }
+
+       rtl8xxxu_print_chipinfo(priv);
+
+       ret = priv->fops->load_firmware(priv);
+       if (ret) {
+               dev_err(&udev->dev, "Fatal - failed to load firmware\n");
+               goto exit;
+       }
+
+       ret = rtl8xxxu_init_device(hw);
+
+       hw->wiphy->max_scan_ssids = 1;
+       hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+       hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+       hw->queues = 4;
+
+       sband = &rtl8xxxu_supported_band;
+       sband->ht_cap.ht_supported = true;
+       sband->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+       sband->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+       sband->ht_cap.cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40;
+       memset(&sband->ht_cap.mcs, 0, sizeof(sband->ht_cap.mcs));
+       sband->ht_cap.mcs.rx_mask[0] = 0xff;
+       sband->ht_cap.mcs.rx_mask[4] = 0x01;
+       if (priv->rf_paths > 1) {
+               sband->ht_cap.mcs.rx_mask[1] = 0xff;
+               sband->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+       }
+       sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+       /*
+        * Some APs will negotiate HT20_40 in a noisy environment leading
+        * to miserable performance. Rather than defaulting to this, only
+        * enable it if explicitly requested at module load time.
+        */
+       if (rtl8xxxu_ht40_2g) {
+               dev_info(&udev->dev, "Enabling HT_20_40 on the 2.4GHz band\n");
+               sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+       }
+       hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+
+       hw->wiphy->rts_threshold = 2347;
+
+       SET_IEEE80211_DEV(priv->hw, &interface->dev);
+       SET_IEEE80211_PERM_ADDR(hw, priv->mac_addr);
+
+       hw->extra_tx_headroom = sizeof(struct rtl8xxxu_tx_desc);
+       ieee80211_hw_set(hw, SIGNAL_DBM);
+       /*
+        * The firmware handles rate control
+        */
+       ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+       ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+
+       ret = ieee80211_register_hw(priv->hw);
+       if (ret) {
+               dev_err(&udev->dev, "%s: Failed to register: %i\n",
+                       __func__, ret);
+               goto exit;
+       }
+
+exit:
+       if (ret < 0)
+               usb_put_dev(udev);
+       return ret;
+}
+
+static void rtl8xxxu_disconnect(struct usb_interface *interface)
+{
+       struct rtl8xxxu_priv *priv;
+       struct ieee80211_hw *hw;
+
+       hw = usb_get_intfdata(interface);
+       priv = hw->priv;
+
+       rtl8xxxu_disable_device(hw);
+       usb_set_intfdata(interface, NULL);
+
+       dev_info(&priv->udev->dev, "disconnecting\n");
+
+       ieee80211_unregister_hw(hw);
+
+       kfree(priv->fw_data);
+       mutex_destroy(&priv->usb_buf_mutex);
+       mutex_destroy(&priv->h2c_mutex);
+
+       usb_put_dev(priv->udev);
+       ieee80211_free_hw(hw);
+}
+
+static struct rtl8xxxu_fileops rtl8723au_fops = {
+       .parse_efuse = rtl8723au_parse_efuse,
+       .load_firmware = rtl8723au_load_firmware,
+       .power_on = rtl8723au_power_on,
+       .writeN_block_size = 1024,
+};
+
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+
+static struct rtl8xxxu_fileops rtl8192cu_fops = {
+       .parse_efuse = rtl8192cu_parse_efuse,
+       .load_firmware = rtl8192cu_load_firmware,
+       .power_on = rtl8192cu_power_on,
+       .writeN_block_size = 128,
+};
+
+#endif
+
+static struct usb_device_id dev_table[] = {
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8724, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8723au_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x1724, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8723au_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x0724, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8723au_fops},
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+/* Still supported by rtlwifi */
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8176, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8178, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817f, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+/* Tested by Larry Finger */
+{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7811, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+/* Currently untested 8188 series devices */
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8191, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8170, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8177, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817a, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817b, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817d, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817e, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818a, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x1058, 0x0631, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x094c, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1102, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8189, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9041, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ba, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x1e1e, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x5088, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0052, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x005c, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0eb0, 0x9071, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x103c, 0x1629, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3357, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3308, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330b, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x4902, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2a, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2e, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xed17, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x648b, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x4855, 0x0090, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x4856, 0x0091, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0xcdab, 0x8010, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops}, /* Netcore 8188RU */
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff7, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff9, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaffa, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff8, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaffb, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaffc, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x1201, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+/* Currently untested 8192 series devices */
+{USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0950, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1004, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2102, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2103, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0586, 0x341f, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe035, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ab, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0061, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0070, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0789, 0x016d, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x07aa, 0x0056, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8178, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9021, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0xf001, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x2e2e, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0019, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0020, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3307, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3309, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330a, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2b, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x624d, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0100, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x4855, 0x0091, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7822, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+#endif
+{ }
+};
+
+static struct usb_driver rtl8xxxu_driver = {
+       .name = DRIVER_NAME,
+       .probe = rtl8xxxu_probe,
+       .disconnect = rtl8xxxu_disconnect,
+       .id_table = dev_table,
+       .disable_hub_initiated_lpm = 1,
+};
+
+static int __init rtl8xxxu_module_init(void)
+{
+       int res;
+
+       res = usb_register(&rtl8xxxu_driver);
+       if (res < 0)
+               pr_err(DRIVER_NAME ": usb_register() failed (%i)\n", res);
+
+       return res;
+}
+
+static void __exit rtl8xxxu_module_exit(void)
+{
+       usb_deregister(&rtl8xxxu_driver);
+}
+
+
+MODULE_DEVICE_TABLE(usb, dev_table);
+
+module_init(rtl8xxxu_module_init);
+module_exit(rtl8xxxu_module_exit);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
new file mode 100644 (file)
index 0000000..f2a1bac
--- /dev/null
@@ -0,0 +1,676 @@
+/*
+ * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Register definitions taken from original Realtek rtl8723au driver
+ */
+
+#include <asm/byteorder.h>
+
+#define RTL8XXXU_DEBUG_REG_WRITE       0x01
+#define RTL8XXXU_DEBUG_REG_READ                0x02
+#define RTL8XXXU_DEBUG_RFREG_WRITE     0x04
+#define RTL8XXXU_DEBUG_RFREG_READ      0x08
+#define RTL8XXXU_DEBUG_CHANNEL         0x10
+#define RTL8XXXU_DEBUG_TX              0x20
+#define RTL8XXXU_DEBUG_TX_DUMP         0x40
+#define RTL8XXXU_DEBUG_RX              0x80
+#define RTL8XXXU_DEBUG_RX_DUMP         0x100
+#define RTL8XXXU_DEBUG_USB             0x200
+#define RTL8XXXU_DEBUG_KEY             0x400
+#define RTL8XXXU_DEBUG_H2C             0x800
+#define RTL8XXXU_DEBUG_ACTION          0x1000
+#define RTL8XXXU_DEBUG_EFUSE           0x2000
+
+#define RTW_USB_CONTROL_MSG_TIMEOUT    500
+#define RTL8XXXU_MAX_REG_POLL          500
+#define        USB_INTR_CONTENT_LENGTH         56
+
+#define RTL8XXXU_OUT_ENDPOINTS         3
+
+#define REALTEK_USB_READ               0xc0
+#define REALTEK_USB_WRITE              0x40
+#define REALTEK_USB_CMD_REQ            0x05
+#define REALTEK_USB_CMD_IDX            0x00
+
+#define TX_TOTAL_PAGE_NUM              0xf8
+/* (HPQ + LPQ + NPQ + PUBQ) = TX_TOTAL_PAGE_NUM */
+#define TX_PAGE_NUM_PUBQ               0xe7
+#define TX_PAGE_NUM_HI_PQ              0x0c
+#define TX_PAGE_NUM_LO_PQ              0x02
+#define TX_PAGE_NUM_NORM_PQ            0x02
+
+#define RTL_FW_PAGE_SIZE               4096
+#define RTL8XXXU_FIRMWARE_POLL_MAX     1000
+
+#define RTL8723A_CHANNEL_GROUPS                3
+#define RTL8723A_MAX_RF_PATHS          2
+#define RF6052_MAX_TX_PWR              0x3f
+
+#define EFUSE_MAP_LEN_8723A            256
+#define EFUSE_MAX_SECTION_8723A                32
+#define EFUSE_REAL_CONTENT_LEN_8723A   512
+#define EFUSE_BT_MAP_LEN_8723A         1024
+#define EFUSE_MAX_WORD_UNIT            4
+
+struct rtl8xxxu_rx_desc {
+#ifdef __LITTLE_ENDIAN
+       u32 pktlen:14;
+       u32 crc32:1;
+       u32 icverr:1;
+       u32 drvinfo_sz:4;
+       u32 security:3;
+       u32 qos:1;
+       u32 shift:2;
+       u32 phy_stats:1;
+       u32 swdec:1;
+       u32 ls:1;
+       u32 fs:1;
+       u32 eor:1;
+       u32 own:1;
+
+       u32 macid:5;
+       u32 tid:4;
+       u32 hwrsvd:4;
+       u32 amsdu:1;
+       u32 paggr:1;
+       u32 faggr:1;
+       u32 a1fit:4;
+       u32 a2fit:4;
+       u32 pam:1;
+       u32 pwr:1;
+       u32 md:1;
+       u32 mf:1;
+       u32 type:2;
+       u32 mc:1;
+       u32 bc:1;
+
+       u32 seq:12;
+       u32 frag:4;
+       u32 nextpktlen:14;
+       u32 nextind:1;
+       u32 reserved0:1;
+
+       u32 rxmcs:6;
+       u32 rxht:1;
+       u32 gf:1;
+       u32 splcp:1;
+       u32 bw:1;
+       u32 htc:1;
+       u32 eosp:1;
+       u32 bssidfit:2;
+       u32 reserved1:16;
+       u32 unicastwake:1;
+       u32 magicwake:1;
+
+       u32 pattern0match:1;
+       u32 pattern1match:1;
+       u32 pattern2match:1;
+       u32 pattern3match:1;
+       u32 pattern4match:1;
+       u32 pattern5match:1;
+       u32 pattern6match:1;
+       u32 pattern7match:1;
+       u32 pattern8match:1;
+       u32 pattern9match:1;
+       u32 patternamatch:1;
+       u32 patternbmatch:1;
+       u32 patterncmatch:1;
+       u32 reserved2:19;
+#else
+       u32 own:1;
+       u32 eor:1;
+       u32 fs:1;
+       u32 ls:1;
+       u32 swdec:1;
+       u32 phy_stats:1;
+       u32 shift:2;
+       u32 qos:1;
+       u32 security:3;
+       u32 drvinfo_sz:4;
+       u32 icverr:1;
+       u32 crc32:1;
+       u32 pktlen:14;
+
+       u32 bc:1;
+       u32 mc:1;
+       u32 type:2;
+       u32 mf:1;
+       u32 md:1;
+       u32 pwr:1;
+       u32 pam:1;
+       u32 a2fit:4;
+       u32 a1fit:4;
+       u32 faggr:1;
+       u32 paggr:1;
+       u32 amsdu:1;
+       u32 hwrsvd:4;
+       u32 tid:4;
+       u32 macid:5;
+
+       u32 reserved0:1;
+       u32 nextind:1;
+       u32 nextpktlen:14;
+       u32 frag:4;
+       u32 seq:12;
+
+       u32 magicwake:1;
+       u32 unicastwake:1;
+       u32 reserved1:16;
+       u32 bssidfit:2;
+       u32 eosp:1;
+       u32 htc:1;
+       u32 bw:1;
+       u32 splcp:1;
+       u32 gf:1;
+       u32 rxht:1;
+       u32 rxmcs:6;
+
+       u32 reserved2:19;
+       u32 patterncmatch:1;
+       u32 patternbmatch:1;
+       u32 patternamatch:1;
+       u32 pattern9match:1;
+       u32 pattern8match:1;
+       u32 pattern7match:1;
+       u32 pattern6match:1;
+       u32 pattern5match:1;
+       u32 pattern4match:1;
+       u32 pattern3match:1;
+       u32 pattern2match:1;
+       u32 pattern1match:1;
+       u32 pattern0match:1;
+#endif
+       __le32 tsfl;
+#if 0
+       u32 bassn:12;
+       u32 bavld:1;
+       u32 reserved3:19;
+#endif
+};
+
+struct rtl8xxxu_tx_desc {
+       __le16 pkt_size;
+       u8 pkt_offset;
+       u8 txdw0;
+       __le32 txdw1;
+       __le32 txdw2;
+       __le32 txdw3;
+       __le32 txdw4;
+       __le32 txdw5;
+       __le32 txdw6;
+       __le16 csum;
+       __le16 txdw7;
+};
+
+/*  CCK Rates, TxHT = 0 */
+#define DESC_RATE_1M                   0x00
+#define DESC_RATE_2M                   0x01
+#define DESC_RATE_5_5M                 0x02
+#define DESC_RATE_11M                  0x03
+
+/*  OFDM Rates, TxHT = 0 */
+#define DESC_RATE_6M                   0x04
+#define DESC_RATE_9M                   0x05
+#define DESC_RATE_12M                  0x06
+#define DESC_RATE_18M                  0x07
+#define DESC_RATE_24M                  0x08
+#define DESC_RATE_36M                  0x09
+#define DESC_RATE_48M                  0x0a
+#define DESC_RATE_54M                  0x0b
+
+/*  MCS Rates, TxHT = 1 */
+#define DESC_RATE_MCS0                 0x0c
+#define DESC_RATE_MCS1                 0x0d
+#define DESC_RATE_MCS2                 0x0e
+#define DESC_RATE_MCS3                 0x0f
+#define DESC_RATE_MCS4                 0x10
+#define DESC_RATE_MCS5                 0x11
+#define DESC_RATE_MCS6                 0x12
+#define DESC_RATE_MCS7                 0x13
+#define DESC_RATE_MCS8                 0x14
+#define DESC_RATE_MCS9                 0x15
+#define DESC_RATE_MCS10                        0x16
+#define DESC_RATE_MCS11                        0x17
+#define DESC_RATE_MCS12                        0x18
+#define DESC_RATE_MCS13                        0x19
+#define DESC_RATE_MCS14                        0x1a
+#define DESC_RATE_MCS15                        0x1b
+#define DESC_RATE_MCS15_SG             0x1c
+#define DESC_RATE_MCS32                        0x20
+
+#define TXDESC_OFFSET_SZ               0
+#define TXDESC_OFFSET_SHT              16
+#if 0
+#define TXDESC_BMC                     BIT(24)
+#define TXDESC_LSG                     BIT(26)
+#define TXDESC_FSG                     BIT(27)
+#define TXDESC_OWN                     BIT(31)
+#else
+#define TXDESC_BROADMULTICAST          BIT(0)
+#define TXDESC_LAST_SEGMENT            BIT(2)
+#define TXDESC_FIRST_SEGMENT           BIT(3)
+#define TXDESC_OWN                     BIT(7)
+#endif
+
+/* Word 1 */
+#define TXDESC_PKT_OFFSET_SZ           0
+#define TXDESC_AGG_ENABLE              BIT(5)
+#define TXDESC_BK                      BIT(6)
+#define TXDESC_QUEUE_SHIFT             8
+#define TXDESC_QUEUE_MASK              0x1f00
+#define TXDESC_QUEUE_BK                        0x2
+#define TXDESC_QUEUE_BE                        0x0
+#define TXDESC_QUEUE_VI                        0x5
+#define TXDESC_QUEUE_VO                        0x7
+#define TXDESC_QUEUE_BEACON            0x10
+#define TXDESC_QUEUE_HIGH              0x11
+#define TXDESC_QUEUE_MGNT              0x12
+#define TXDESC_QUEUE_CMD               0x13
+#define TXDESC_QUEUE_MAX               (TXDESC_QUEUE_CMD + 1)
+
+#define DESC_RATE_ID_SHIFT             16
+#define DESC_RATE_ID_MASK              0xf
+#define TXDESC_NAVUSEHDR               BIT(20)
+#define TXDESC_SEC_RC4                 0x00400000
+#define TXDESC_SEC_AES                 0x00c00000
+#define TXDESC_PKT_OFFSET_SHIFT                26
+#define TXDESC_AGG_EN                  BIT(29)
+#define TXDESC_HWPC                    BIT(31)
+
+/* Word 2 */
+#define TXDESC_ACK_REPORT              BIT(19)
+#define TXDESC_AMPDU_DENSITY_SHIFT     20
+
+/* Word 3 */
+#define TXDESC_SEQ_SHIFT               16
+#define TXDESC_SEQ_MASK                        0x0fff0000
+
+/* Word 4 */
+#define TXDESC_QOS                     BIT(6)
+#define TXDESC_HW_SEQ_ENABLE           BIT(7)
+#define TXDESC_USE_DRIVER_RATE         BIT(8)
+#define TXDESC_DISABLE_DATA_FB         BIT(10)
+#define TXDESC_CTS_SELF_ENABLE         BIT(11)
+#define TXDESC_RTS_CTS_ENABLE          BIT(12)
+#define TXDESC_HW_RTS_ENABLE           BIT(13)
+#define TXDESC_PRIME_CH_OFF_LOWER      BIT(20)
+#define TXDESC_PRIME_CH_OFF_UPPER      BIT(21)
+#define TXDESC_SHORT_PREAMBLE          BIT(24)
+#define TXDESC_DATA_BW                 BIT(25)
+#define TXDESC_RTS_DATA_BW             BIT(27)
+#define TXDESC_RTS_PRIME_CH_OFF_LOWER  BIT(28)
+#define TXDESC_RTS_PRIME_CH_OFF_UPPER  BIT(29)
+
+/* Word 5 */
+#define TXDESC_RTS_RATE_SHIFT          0
+#define TXDESC_RTS_RATE_MASK           0x3f
+#define TXDESC_SHORT_GI                        BIT(6)
+#define TXDESC_CCX_TAG                 BIT(7)
+#define TXDESC_RETRY_LIMIT_ENABLE      BIT(17)
+#define TXDESC_RETRY_LIMIT_SHIFT       18
+#define TXDESC_RETRY_LIMIT_MASK                0x00fc0000
+
+/* Word 6 */
+#define TXDESC_MAX_AGG_SHIFT           11
+
+struct phy_rx_agc_info {
+#ifdef __LITTLE_ENDIAN
+       u8      gain:7, trsw:1;
+#else
+       u8      trsw:1, gain:7;
+#endif
+};
+
+struct rtl8723au_phy_stats {
+       struct phy_rx_agc_info path_agc[RTL8723A_MAX_RF_PATHS];
+       u8      ch_corr[RTL8723A_MAX_RF_PATHS];
+       u8      cck_sig_qual_ofdm_pwdb_all;
+       u8      cck_agc_rpt_ofdm_cfosho_a;
+       u8      cck_rpt_b_ofdm_cfosho_b;
+       u8      reserved_1;
+       u8      noise_power_db_msb;
+       u8      path_cfotail[RTL8723A_MAX_RF_PATHS];
+       u8      pcts_mask[RTL8723A_MAX_RF_PATHS];
+       s8      stream_rxevm[RTL8723A_MAX_RF_PATHS];
+       u8      path_rxsnr[RTL8723A_MAX_RF_PATHS];
+       u8      noise_power_db_lsb;
+       u8      reserved_2[3];
+       u8      stream_csi[RTL8723A_MAX_RF_PATHS];
+       u8      stream_target_csi[RTL8723A_MAX_RF_PATHS];
+       s8      sig_evm;
+       u8      reserved_3;
+
+#ifdef __LITTLE_ENDIAN
+       u8      antsel_rx_keep_2:1;     /* ex_intf_flg:1; */
+       u8      sgi_en:1;
+       u8      rxsc:2;
+       u8      idle_long:1;
+       u8      r_ant_train_en:1;
+       u8      antenna_select_b:1;
+       u8      antenna_select:1;
+#else  /*  _BIG_ENDIAN_ */
+       u8      antenna_select:1;
+       u8      antenna_select_b:1;
+       u8      r_ant_train_en:1;
+       u8      idle_long:1;
+       u8      rxsc:2;
+       u8      sgi_en:1;
+       u8      antsel_rx_keep_2:1;     /* ex_intf_flg:1; */
+#endif
+};
+
+/*
+ * Regs to backup
+ */
+#define RTL8XXXU_ADDA_REGS             16
+#define RTL8XXXU_MAC_REGS              4
+#define RTL8XXXU_BB_REGS               9
+
+struct rtl8xxxu_firmware_header {
+       __le16  signature;              /*  92C0: test chip; 92C,
+                                           88C0: test chip;
+                                           88C1: MP A-cut;
+                                           92C1: MP A-cut */
+       u8      category;               /*  AP/NIC and USB/PCI */
+       u8      function;
+
+       __le16  major_version;          /*  FW Version */
+       u8      minor_version;          /*  FW Subversion, default 0x00 */
+       u8      reserved1;
+
+       u8      month;                  /*  Release time Month field */
+       u8      date;                   /*  Release time Date field */
+       u8      hour;                   /*  Release time Hour field */
+       u8      minute;                 /*  Release time Minute field */
+
+       __le16  ramcodesize;            /*  Size of RAM code */
+       u16     reserved2;
+
+       __le32  svn_idx;                /*  SVN entry index */
+       u32     reserved3;
+
+       u32     reserved4;
+       u32     reserved5;
+
+       u8      data[0];
+};
+
+/*
+ * The 8723au has 3 channel groups: 1-3, 4-9, and 10-14
+ */
+struct rtl8723au_idx {
+#ifdef __LITTLE_ENDIAN
+       int     a:4;
+       int     b:4;
+#else
+       int     b:4;
+       int     a:4;
+#endif
+} __attribute__((packed));
+
+struct rtl8723au_efuse {
+       __le16 rtl_id;
+       u8 res0[0xe];
+       u8 cck_tx_power_index_A[3];     /* 0x10 */
+       u8 cck_tx_power_index_B[3];
+       u8 ht40_1s_tx_power_index_A[3]; /* 0x16 */
+       u8 ht40_1s_tx_power_index_B[3];
+       /*
+        * The following entries are half-bytes split as:
+        * bits 0-3: path A, bits 4-7: path B, all values 4 bits signed
+        */
+       struct rtl8723au_idx ht20_tx_power_index_diff[3];
+       struct rtl8723au_idx ofdm_tx_power_index_diff[3];
+       struct rtl8723au_idx ht40_max_power_offset[3];
+       struct rtl8723au_idx ht20_max_power_offset[3];
+       u8 channel_plan;                /* 0x28 */
+       u8 tssi_a;
+       u8 thermal_meter;
+       u8 rf_regulatory;
+       u8 rf_option_2;
+       u8 rf_option_3;
+       u8 rf_option_4;
+       u8 res7;
+       u8 version                      /* 0x30 */;
+       u8 customer_id_major;
+       u8 customer_id_minor;
+       u8 xtal_k;
+       u8 chipset;                     /* 0x34 */
+       u8 res8[0x82];
+       u8 vid;                         /* 0xb7 */
+       u8 res9;
+       u8 pid;                         /* 0xb9 */
+       u8 res10[0x0c];
+       u8 mac_addr[ETH_ALEN];          /* 0xc6 */
+       u8 res11[2];
+       u8 vendor_name[7];
+       u8 res12[2];
+       u8 device_name[0x29];           /* 0xd7 */
+};
+
+struct rtl8192cu_efuse {
+       __le16 rtl_id;
+       __le16 hpon;
+       u8 res0[2];
+       __le16 clk;
+       __le16 testr;
+       __le16 vid;
+       __le16 did;
+       __le16 svid;
+       __le16 smid;                                            /* 0x10 */
+       u8 res1[4];
+       u8 mac_addr[ETH_ALEN];                                  /* 0x16 */
+       u8 res2[2];
+       u8 vendor_name[7];
+       u8 res3[3];
+       u8 device_name[0x14];                                   /* 0x28 */
+       u8 res4[0x1e];                                          /* 0x3c */
+       u8 cck_tx_power_index_A[3];                             /* 0x5a */
+       u8 cck_tx_power_index_B[3];
+       u8 ht40_1s_tx_power_index_A[3];                         /* 0x60 */
+       u8 ht40_1s_tx_power_index_B[3];
+       /*
+        * The following entries are half-bytes split as:
+        * bits 0-3: path A, bits 4-7: path B, all values 4 bits signed
+        */
+       struct rtl8723au_idx ht40_2s_tx_power_index_diff[3];
+       struct rtl8723au_idx ht20_tx_power_index_diff[3];       /* 0x69 */
+       struct rtl8723au_idx ofdm_tx_power_index_diff[3];
+       struct rtl8723au_idx ht40_max_power_offset[3];          /* 0x6f */
+       struct rtl8723au_idx ht20_max_power_offset[3];
+       u8 channel_plan;                                        /* 0x75 */
+       u8 tssi_a;
+       u8 tssi_b;
+       u8 thermal_meter;       /* xtal_k */                    /* 0x78 */
+       u8 rf_regulatory;
+       u8 rf_option_2;
+       u8 rf_option_3;
+       u8 rf_option_4;
+       u8 res5[1];                                             /* 0x7d */
+       u8 version;
+       u8 customer_id;
+};
+
+struct rtl8xxxu_reg8val {
+       u16 reg;
+       u8 val;
+};
+
+struct rtl8xxxu_reg32val {
+       u16 reg;
+       u32 val;
+};
+
+struct rtl8xxxu_rfregval {
+       u8 reg;
+       u32 val;
+};
+
+enum rtl8xxxu_rfpath {
+       RF_A = 0,
+       RF_B = 1,
+};
+
+struct rtl8xxxu_rfregs {
+       u16 hssiparm1;
+       u16 hssiparm2;
+       u16 lssiparm;
+       u16 hspiread;
+       u16 lssiread;
+       u16 rf_sw_ctrl;
+};
+
+#define H2C_MAX_MBOX                   4
+#define H2C_EXT                                BIT(7)
+#define H2C_SET_POWER_MODE             1
+#define H2C_JOIN_BSS_REPORT            2
+#define  H2C_JOIN_BSS_DISCONNECT       0
+#define  H2C_JOIN_BSS_CONNECT          1
+#define H2C_SET_RSSI                   5
+#define H2C_SET_RATE_MASK              (6 | H2C_EXT)
+
+struct h2c_cmd {
+       union {
+               struct {
+                       u8 cmd;
+                       u8 data[5];
+               } __packed cmd;
+               struct {
+                       __le32 data;
+                       __le16 ext;
+               } __packed raw;
+               struct {
+                       u8 cmd;
+                       u8 data;
+                       u8 pad[4];
+               } __packed joinbss;
+               struct {
+                       u8 cmd;
+                       __le16 mask_hi;
+                       u8 arg;
+                       __le16 mask_lo;
+               } __packed ramask;
+       };
+};
+
+struct rtl8xxxu_fileops;
+
+struct rtl8xxxu_priv {
+       struct ieee80211_hw *hw;
+       struct usb_device *udev;
+       struct rtl8xxxu_fileops *fops;
+
+       spinlock_t tx_urb_lock;
+       struct list_head tx_urb_free_list;
+       int tx_urb_free_count;
+       bool tx_stopped;
+
+       spinlock_t rx_urb_lock;
+       struct list_head rx_urb_pending_list;
+       int rx_urb_pending_count;
+       bool shutdown;
+       struct work_struct rx_urb_wq;
+
+       u8 mac_addr[ETH_ALEN];
+       char chip_name[8];
+       u8 cck_tx_power_index_A[3];     /* 0x10 */
+       u8 cck_tx_power_index_B[3];
+       u8 ht40_1s_tx_power_index_A[3]; /* 0x16 */
+       u8 ht40_1s_tx_power_index_B[3];
+       /*
+        * The following entries are half-bytes split as:
+        * bits 0-3: path A, bits 4-7: path B, all values 4 bits signed
+        */
+       struct rtl8723au_idx ht40_2s_tx_power_index_diff[3];
+       struct rtl8723au_idx ht20_tx_power_index_diff[3];
+       struct rtl8723au_idx ofdm_tx_power_index_diff[3];
+       struct rtl8723au_idx ht40_max_power_offset[3];
+       struct rtl8723au_idx ht20_max_power_offset[3];
+       u32 chip_cut:4;
+       u32 rom_rev:4;
+       u32 has_wifi:1;
+       u32 has_bluetooth:1;
+       u32 enable_bluetooth:1;
+       u32 has_gps:1;
+       u32 hi_pa:1;
+       u32 vendor_umc:1;
+       u32 has_polarity_ctrl:1;
+       u32 has_eeprom:1;
+       u32 boot_eeprom:1;
+       u32 ep_tx_high_queue:1;
+       u32 ep_tx_normal_queue:1;
+       u32 ep_tx_low_queue:1;
+       u32 path_a_hi_power:1;
+       u32 path_a_rf_paths:4;
+       unsigned int pipe_interrupt;
+       unsigned int pipe_in;
+       unsigned int pipe_out[TXDESC_QUEUE_MAX];
+       u8 out_ep[RTL8XXXU_OUT_ENDPOINTS];
+       u8 path_a_ig_value;
+       u8 ep_tx_count;
+       u8 rf_paths;
+       u8 rx_paths;
+       u8 tx_paths;
+       u32 rf_mode_ag[2];
+       u32 rege94;
+       u32 rege9c;
+       u32 regeb4;
+       u32 regebc;
+       int next_mbox;
+       int nr_out_eps;
+
+       struct mutex h2c_mutex;
+
+       struct usb_anchor rx_anchor;
+       struct usb_anchor tx_anchor;
+       struct usb_anchor int_anchor;
+       struct rtl8xxxu_firmware_header *fw_data;
+       size_t fw_size;
+       struct mutex usb_buf_mutex;
+       union {
+               __le32 val32;
+               __le16 val16;
+               u8 val8;
+       } usb_buf;
+       union {
+               u8 raw[EFUSE_MAP_LEN_8723A];
+               struct rtl8723au_efuse efuse8723;
+               struct rtl8192cu_efuse efuse8192;
+       } efuse_wifi;
+       u32 adda_backup[RTL8XXXU_ADDA_REGS];
+       u32 mac_backup[RTL8XXXU_MAC_REGS];
+       u32 bb_backup[RTL8XXXU_BB_REGS];
+       u32 bb_recovery_backup[RTL8XXXU_BB_REGS];
+       u32 rtlchip;
+       u8 pi_enabled:1;
+       u8 iqk_initialized:1;
+       u8 int_buf[USB_INTR_CONTENT_LENGTH];
+};
+
+struct rtl8xxxu_rx_urb {
+       struct urb urb;
+       struct ieee80211_hw *hw;
+       struct list_head list;
+};
+
+struct rtl8xxxu_tx_urb {
+       struct urb urb;
+       struct ieee80211_hw *hw;
+       struct list_head list;
+};
+
+struct rtl8xxxu_fileops {
+       int (*parse_efuse) (struct rtl8xxxu_priv *priv);
+       int (*load_firmware) (struct rtl8xxxu_priv *priv);
+       int (*power_on) (struct rtl8xxxu_priv *priv);
+       int writeN_block_size;
+};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
new file mode 100644 (file)
index 0000000..23208f7
--- /dev/null
@@ -0,0 +1,981 @@
+/*
+ * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Register definitions taken from original Realtek rtl8723au driver
+ */
+
+/* 0x0000 ~ 0x00FF     System Configuration */
+#define REG_SYS_ISO_CTRL               0x0000
+#define  SYS_ISO_MD2PP                 BIT(0)
+#define  SYS_ISO_ANALOG_IPS            BIT(5)
+#define  SYS_ISO_DIOR                  BIT(9)
+#define  SYS_ISO_PWC_EV25V             BIT(14)
+#define  SYS_ISO_PWC_EV12V             BIT(15)
+
+#define REG_SYS_FUNC                   0x0002
+#define  SYS_FUNC_BBRSTB               BIT(0)
+#define  SYS_FUNC_BB_GLB_RSTN          BIT(1)
+#define  SYS_FUNC_USBA                 BIT(2)
+#define  SYS_FUNC_UPLL                 BIT(3)
+#define  SYS_FUNC_USBD                 BIT(4)
+#define  SYS_FUNC_DIO_PCIE             BIT(5)
+#define  SYS_FUNC_PCIEA                        BIT(6)
+#define  SYS_FUNC_PPLL                 BIT(7)
+#define  SYS_FUNC_PCIED                        BIT(8)
+#define  SYS_FUNC_DIOE                 BIT(9)
+#define  SYS_FUNC_CPU_ENABLE           BIT(10)
+#define  SYS_FUNC_DCORE                        BIT(11)
+#define  SYS_FUNC_ELDR                 BIT(12)
+#define  SYS_FUNC_DIO_RF               BIT(13)
+#define  SYS_FUNC_HWPDN                        BIT(14)
+#define  SYS_FUNC_MREGEN               BIT(15)
+
+#define REG_APS_FSMCO                  0x0004
+#define  APS_FSMCO_PFM_ALDN            BIT(1)
+#define  APS_FSMCO_PFM_WOWL            BIT(3)
+#define  APS_FSMCO_ENABLE_POWERDOWN    BIT(4)
+#define  APS_FSMCO_MAC_ENABLE          BIT(8)
+#define  APS_FSMCO_MAC_OFF             BIT(9)
+#define  APS_FSMCO_HW_SUSPEND          BIT(11)
+#define  APS_FSMCO_PCIE                        BIT(12)
+#define  APS_FSMCO_HW_POWERDOWN                BIT(15)
+#define  APS_FSMCO_WLON_RESET          BIT(16)
+
+#define REG_SYS_CLKR                   0x0008
+#define  SYS_CLK_ANAD16V_ENABLE                BIT(0)
+#define  SYS_CLK_ANA8M                 BIT(1)
+#define  SYS_CLK_MACSLP                        BIT(4)
+#define  SYS_CLK_LOADER_ENABLE         BIT(5)
+#define  SYS_CLK_80M_SSC_DISABLE       BIT(7)
+#define  SYS_CLK_80M_SSC_ENABLE_HO     BIT(8)
+#define  SYS_CLK_PHY_SSC_RSTB          BIT(9)
+#define  SYS_CLK_SEC_CLK_ENABLE                BIT(10)
+#define  SYS_CLK_MAC_CLK_ENABLE                BIT(11)
+#define  SYS_CLK_ENABLE                        BIT(12)
+#define  SYS_CLK_RING_CLK_ENABLE       BIT(13)
+
+#define REG_9346CR                     0x000a
+#define  EEPROM_BOOT                   BIT(4)
+#define  EEPROM_ENABLE                 BIT(5)
+
+#define REG_EE_VPD                     0x000c
+#define REG_AFE_MISC                   0x0010
+#define REG_SPS0_CTRL                  0x0011
+#define REG_SPS_OCP_CFG                        0x0018
+#define REG_RSV_CTRL                   0x001c
+
+#define REG_RF_CTRL                    0x001f
+#define  RF_ENABLE                     BIT(0)
+#define  RF_RSTB                       BIT(1)
+#define  RF_SDMRSTB                    BIT(2)
+
+#define REG_LDOA15_CTRL                        0x0020
+#define  LDOA15_ENABLE                 BIT(0)
+#define  LDOA15_STANDBY                        BIT(1)
+#define  LDOA15_OBUF                   BIT(2)
+#define  LDOA15_REG_VOS                        BIT(3)
+#define  LDOA15_VOADJ_SHIFT            4
+
+#define REG_LDOV12D_CTRL               0x0021
+#define  LDOV12D_ENABLE                        BIT(0)
+#define  LDOV12D_STANDBY               BIT(1)
+#define  LDOV12D_VADJ_SHIFT            4
+
+#define REG_LDOHCI12_CTRL              0x0022
+
+#define REG_LPLDO_CTRL                 0x0023
+#define  LPLDO_HSM                     BIT(2)
+#define  LPLDO_LSM_DIS                 BIT(3)
+
+#define REG_AFE_XTAL_CTRL              0x0024
+#define  AFE_XTAL_ENABLE               BIT(0)
+#define  AFE_XTAL_B_SELECT             BIT(1)
+#define  AFE_XTAL_GATE_USB             BIT(8)
+#define  AFE_XTAL_GATE_AFE             BIT(11)
+#define  AFE_XTAL_RF_GATE              BIT(14)
+#define  AFE_XTAL_GATE_DIG             BIT(17)
+#define  AFE_XTAL_BT_GATE              BIT(20)
+
+#define REG_AFE_PLL_CTRL               0x0028
+#define  AFE_PLL_ENABLE                        BIT(0)
+#define  AFE_PLL_320_ENABLE            BIT(1)
+#define  APE_PLL_FREF_SELECT           BIT(2)
+#define  AFE_PLL_EDGE_SELECT           BIT(3)
+#define  AFE_PLL_WDOGB                 BIT(4)
+#define  AFE_PLL_LPF_ENABLE            BIT(5)
+
+#define REG_MAC_PHY_CTRL               0x002c
+
+#define REG_EFUSE_CTRL                 0x0030
+#define REG_EFUSE_TEST                 0x0034
+#define  EFUSE_TRPT                    BIT(7)
+       /*  00: Wifi Efuse, 01: BT Efuse0, 10: BT Efuse1, 11: BT Efuse2 */
+#define  EFUSE_CELL_SEL                        (BIT(8) | BIT(9))
+#define  EFUSE_LDOE25_ENABLE           BIT(31)
+#define  EFUSE_SELECT_MASK             0x0300
+#define  EFUSE_WIFI_SELECT             0x0000
+#define  EFUSE_BT0_SELECT              0x0100
+#define  EFUSE_BT1_SELECT              0x0200
+#define  EFUSE_BT2_SELECT              0x0300
+
+#define  EFUSE_ACCESS_ENABLE           0x69    /* RTL8723 only */
+#define  EFUSE_ACCESS_DISABLE          0x00    /* RTL8723 only */
+
+#define REG_PWR_DATA                   0x0038
+#define REG_CAL_TIMER                  0x003c
+#define REG_ACLK_MON                   0x003e
+#define REG_GPIO_MUXCFG                        0x0040
+#define REG_GPIO_IO_SEL                        0x0042
+#define REG_MAC_PINMUX_CFG             0x0043
+#define REG_GPIO_PIN_CTRL              0x0044
+#define REG_GPIO_INTM                  0x0048
+#define REG_LEDCFG0                    0x004c
+#define REG_LEDCFG1                    0x004d
+#define REG_LEDCFG2                    0x004e
+#define  LEDCFG2_DPDT_SELECT           BIT(7)
+#define REG_LEDCFG3                    0x004f
+#define REG_LEDCFG                     REG_LEDCFG2
+#define REG_FSIMR                      0x0050
+#define REG_FSISR                      0x0054
+#define REG_HSIMR                      0x0058
+#define REG_HSISR                      0x005c
+/*  RTL8723 WIFI/BT/GPS Multi-Function GPIO Pin Control. */
+#define REG_GPIO_PIN_CTRL_2            0x0060
+/*  RTL8723 WIFI/BT/GPS Multi-Function GPIO Select. */
+#define REG_GPIO_IO_SEL_2              0x0062
+
+/*  RTL8723 only WIFI/BT/GPS Multi-Function control source. */
+#define REG_MULTI_FUNC_CTRL            0x0068
+
+#define  MULTI_FN_WIFI_HW_PWRDOWN_EN   BIT(0)  /* Enable GPIO[9] as WiFi HW
+                                                  powerdown source */
+#define  MULTI_FN_WIFI_HW_PWRDOWN_SL   BIT(1)  /* WiFi HW powerdown polarity
+                                                  control */
+#define  MULTI_WIFI_FUNC_EN            BIT(2)  /* WiFi function enable */
+
+#define  MULTI_WIFI_HW_ROF_EN          BIT(3)  /* Enable GPIO[9] as WiFi RF HW
+                                                  powerdown source */
+#define  MULTI_BT_HW_PWRDOWN_EN                BIT(16) /* Enable GPIO[11] as BT HW
+                                                  powerdown source */
+#define  MULTI_BT_HW_PWRDOWN_SL                BIT(17) /* BT HW powerdown polarity
+                                                  control */
+#define  MULTI_BT_FUNC_EN              BIT(18) /* BT function enable */
+#define  MULTI_BT_HW_ROF_EN            BIT(19) /* Enable GPIO[11] as BT/GPS
+                                                  RF HW powerdown source */
+#define  MULTI_GPS_HW_PWRDOWN_EN       BIT(20) /* Enable GPIO[10] as GPS HW
+                                                  powerdown source */
+#define  MULTI_GPS_HW_PWRDOWN_SL       BIT(21) /* GPS HW powerdown polarity
+                                                  control */
+#define  MULTI_GPS_FUNC_EN             BIT(22) /* GPS function enable */
+
+#define REG_MCU_FW_DL                  0x0080
+#define  MCU_FW_DL_ENABLE              BIT(0)
+#define  MCU_FW_DL_READY               BIT(1)
+#define  MCU_FW_DL_CSUM_REPORT         BIT(2)
+#define  MCU_MAC_INIT_READY            BIT(3)
+#define  MCU_BB_INIT_READY             BIT(4)
+#define  MCU_RF_INIT_READY             BIT(5)
+#define  MCU_WINT_INIT_READY           BIT(6)
+#define  MCU_FW_RAM_SEL                        BIT(7)  /* 1: RAM, 0:ROM */
+#define  MCU_CP_RESET                  BIT(23)
+
+#define REG_HMBOX_EXT_0                        0x0088
+#define REG_HMBOX_EXT_1                        0x008a
+#define REG_HMBOX_EXT_2                        0x008c
+#define REG_HMBOX_EXT_3                        0x008e
+/*  Host suspend counter on FPGA platform */
+#define REG_HOST_SUSP_CNT              0x00bc
+/*  Efuse access protection for RTL8723 */
+#define REG_EFUSE_ACCESS               0x00cf
+#define REG_BIST_SCAN                  0x00d0
+#define REG_BIST_RPT                   0x00d4
+#define REG_BIST_ROM_RPT               0x00d8
+#define REG_USB_SIE_INTF               0x00e0
+#define REG_PCIE_MIO_INTF              0x00e4
+#define REG_PCIE_MIO_INTD              0x00e8
+#define REG_HPON_FSM                   0x00ec
+#define  HPON_FSM_BONDING_MASK         (BIT(22) | BIT(23))
+#define  HPON_FSM_BONDING_1T2R         BIT(22)
+#define REG_SYS_CFG                    0x00f0
+#define  SYS_CFG_XCLK_VLD              BIT(0)
+#define  SYS_CFG_ACLK_VLD              BIT(1)
+#define  SYS_CFG_UCLK_VLD              BIT(2)
+#define  SYS_CFG_PCLK_VLD              BIT(3)
+#define  SYS_CFG_PCIRSTB               BIT(4)
+#define  SYS_CFG_V15_VLD               BIT(5)
+#define  SYS_CFG_TRP_B15V_EN           BIT(7)
+#define  SYS_CFG_SIC_IDLE              BIT(8)
+#define  SYS_CFG_BD_MAC2               BIT(9)
+#define  SYS_CFG_BD_MAC1               BIT(10)
+#define  SYS_CFG_IC_MACPHY_MODE                BIT(11)
+#define  SYS_CFG_CHIP_VER              (BIT(12) | BIT(13) | BIT(14) | BIT(15))
+#define  SYS_CFG_BT_FUNC               BIT(16)
+#define  SYS_CFG_VENDOR_ID             BIT(19)
+#define  SYS_CFG_PAD_HWPD_IDN          BIT(22)
+#define  SYS_CFG_TRP_VAUX_EN           BIT(23)
+#define  SYS_CFG_TRP_BT_EN             BIT(24)
+#define  SYS_CFG_BD_PKG_SEL            BIT(25)
+#define  SYS_CFG_BD_HCI_SEL            BIT(26)
+#define  SYS_CFG_TYPE_ID               BIT(27)
+#define  SYS_CFG_RTL_ID                        BIT(23) /*  TestChip ID,
+                                                   1:Test(RLE); 0:MP(RL) */
+#define  SYS_CFG_SPS_SEL               BIT(24) /*  1:LDO regulator mode;
+                                                   0:Switching regulator mode*/
+#define  SYS_CFG_CHIP_VERSION_MASK     0xf000  /* Bit 12 - 15 */
+#define  SYS_CFG_CHIP_VERSION_SHIFT    12
+
+#define REG_GPIO_OUTSTS                        0x00f4  /*  For RTL8723 only. */
+#define  GPIO_EFS_HCI_SEL              (BIT(0) | BIT(1))
+#define  GPIO_PAD_HCI_SEL              (BIT(2) | BIT(3))
+#define  GPIO_HCI_SEL                  (BIT(4) | BIT(5))
+#define  GPIO_PKG_SEL_HCI              BIT(6)
+#define  GPIO_FEN_GPS                  BIT(7)
+#define  GPIO_FEN_BT                   BIT(8)
+#define  GPIO_FEN_WL                   BIT(9)
+#define  GPIO_FEN_PCI                  BIT(10)
+#define  GPIO_FEN_USB                  BIT(11)
+#define  GPIO_BTRF_HWPDN_N             BIT(12)
+#define  GPIO_WLRF_HWPDN_N             BIT(13)
+#define  GPIO_PDN_BT_N                 BIT(14)
+#define  GPIO_PDN_GPS_N                        BIT(15)
+#define  GPIO_BT_CTL_HWPDN             BIT(16)
+#define  GPIO_GPS_CTL_HWPDN            BIT(17)
+#define  GPIO_PPHY_SUSB                        BIT(20)
+#define  GPIO_UPHY_SUSB                        BIT(21)
+#define  GPIO_PCI_SUSEN                        BIT(22)
+#define  GPIO_USB_SUSEN                        BIT(23)
+#define  GPIO_RF_RL_ID                 (BIT(31) | BIT(30) | BIT(29) | BIT(28))
+
+/* 0x0100 ~ 0x01FF     MACTOP General Configuration */
+#define REG_CR                         0x0100
+#define  CR_HCI_TXDMA_ENABLE           BIT(0)
+#define  CR_HCI_RXDMA_ENABLE           BIT(1)
+#define  CR_TXDMA_ENABLE               BIT(2)
+#define  CR_RXDMA_ENABLE               BIT(3)
+#define  CR_PROTOCOL_ENABLE            BIT(4)
+#define  CR_SCHEDULE_ENABLE            BIT(5)
+#define  CR_MAC_TX_ENABLE              BIT(6)
+#define  CR_MAC_RX_ENABLE              BIT(7)
+#define  CR_SW_BEACON_ENABLE           BIT(8)
+#define  CR_SECURITY_ENABLE            BIT(9)
+#define  CR_CALTIMER_ENABLE            BIT(10)
+
+/* Media Status Register */
+#define REG_MSR                                0x0102
+#define  MSR_LINKTYPE_MASK             0x3
+#define  MSR_LINKTYPE_NONE             0x0
+#define  MSR_LINKTYPE_ADHOC            0x1
+#define  MSR_LINKTYPE_STATION          0x2
+#define  MSR_LINKTYPE_AP               0x3
+
+#define REG_PBP                                0x0104
+#define  PBP_PAGE_SIZE_RX_SHIFT                0
+#define  PBP_PAGE_SIZE_TX_SHIFT                4
+#define  PBP_PAGE_SIZE_64              0x0
+#define  PBP_PAGE_SIZE_128             0x1
+#define  PBP_PAGE_SIZE_256             0x2
+#define  PBP_PAGE_SIZE_512             0x3
+#define  PBP_PAGE_SIZE_1024            0x4
+
+#define REG_TRXDMA_CTRL                        0x010c
+#define  TRXDMA_CTRL_VOQ_SHIFT         4
+#define  TRXDMA_CTRL_VIQ_SHIFT         6
+#define  TRXDMA_CTRL_BEQ_SHIFT         8
+#define  TRXDMA_CTRL_BKQ_SHIFT         10
+#define  TRXDMA_CTRL_MGQ_SHIFT         12
+#define  TRXDMA_CTRL_HIQ_SHIFT         14
+#define  TRXDMA_QUEUE_LOW              1
+#define  TRXDMA_QUEUE_NORMAL           2
+#define  TRXDMA_QUEUE_HIGH             3
+
+#define REG_TRXFF_BNDY                 0x0114
+#define REG_TRXFF_STATUS               0x0118
+#define REG_RXFF_PTR                   0x011c
+#define REG_HIMR                       0x0120
+#define REG_HISR                       0x0124
+#define REG_HIMRE                      0x0128
+#define REG_HISRE                      0x012c
+#define REG_CPWM                       0x012f
+#define REG_FWIMR                      0x0130
+#define REG_FWISR                      0x0134
+#define REG_PKTBUF_DBG_CTRL            0x0140
+#define REG_PKTBUF_DBG_DATA_L          0x0144
+#define REG_PKTBUF_DBG_DATA_H          0x0148
+
+#define REG_TC0_CTRL                   0x0150
+#define REG_TC1_CTRL                   0x0154
+#define REG_TC2_CTRL                   0x0158
+#define REG_TC3_CTRL                   0x015c
+#define REG_TC4_CTRL                   0x0160
+#define REG_TCUNIT_BASE                        0x0164
+#define REG_MBIST_START                        0x0174
+#define REG_MBIST_DONE                 0x0178
+#define REG_MBIST_FAIL                 0x017c
+#define REG_C2HEVT_MSG_NORMAL          0x01a0
+#define REG_C2HEVT_CLEAR               0x01af
+#define REG_C2HEVT_MSG_TEST            0x01b8
+#define REG_MCUTST_1                   0x01c0
+#define REG_FMTHR                      0x01c8
+#define REG_HMTFR                      0x01cc
+#define REG_HMBOX_0                    0x01d0
+#define REG_HMBOX_1                    0x01d4
+#define REG_HMBOX_2                    0x01d8
+#define REG_HMBOX_3                    0x01dc
+
+#define REG_LLT_INIT                   0x01e0
+#define  LLT_OP_INACTIVE               0x0
+#define  LLT_OP_WRITE                  (0x1 << 30)
+#define  LLT_OP_READ                   (0x2 << 30)
+#define  LLT_OP_MASK                   (0x3 << 30)
+
+#define REG_BB_ACCEESS_CTRL            0x01e8
+#define REG_BB_ACCESS_DATA             0x01ec
+
+/* 0x0200 ~ 0x027F     TXDMA Configuration */
+#define REG_RQPN                       0x0200
+#define  RQPN_HI_PQ_SHIFT              0
+#define  RQPN_LO_PQ_SHIFT              8
+#define  RQPN_NORM_PQ_SHIFT            16
+#define  RQPN_LOAD                     BIT(31)
+
+#define REG_FIFOPAGE                   0x0204
+#define REG_TDECTRL                    0x0208
+#define REG_TXDMA_OFFSET_CHK           0x020c
+#define REG_TXDMA_STATUS               0x0210
+#define REG_RQPN_NPQ                   0x0214
+
+/* 0x0280 ~ 0x02FF     RXDMA Configuration */
+#define REG_RXDMA_AGG_PG_TH            0x0280
+#define REG_RXPKT_NUM                  0x0284
+#define REG_RXDMA_STATUS               0x0288
+
+#define REG_RF_BB_CMD_ADDR             0x02c0
+#define REG_RF_BB_CMD_DATA             0x02c4
+
+/*  spec version 11 */
+/* 0x0400 ~ 0x047F     Protocol Configuration */
+#define REG_VOQ_INFORMATION            0x0400
+#define REG_VIQ_INFORMATION            0x0404
+#define REG_BEQ_INFORMATION            0x0408
+#define REG_BKQ_INFORMATION            0x040c
+#define REG_MGQ_INFORMATION            0x0410
+#define REG_HGQ_INFORMATION            0x0414
+#define REG_BCNQ_INFORMATION           0x0418
+
+#define REG_CPU_MGQ_INFORMATION                0x041c
+#define REG_FWHW_TXQ_CTRL              0x0420
+#define  FWHW_TXQ_CTRL_AMPDU_RETRY     BIT(7)
+#define  FWHW_TXQ_CTRL_XMIT_MGMT_ACK   BIT(12)
+
+#define REG_HWSEQ_CTRL                 0x0423
+#define REG_TXPKTBUF_BCNQ_BDNY         0x0424
+#define REG_TXPKTBUF_MGQ_BDNY          0x0425
+#define REG_LIFETIME_EN                        0x0426
+#define REG_MULTI_BCNQ_OFFSET          0x0427
+
+#define REG_SPEC_SIFS                  0x0428
+#define  SPEC_SIFS_CCK_MASK            0x00ff
+#define  SPEC_SIFS_CCK_SHIFT           0
+#define  SPEC_SIFS_OFDM_MASK           0xff00
+#define  SPEC_SIFS_OFDM_SHIFT          8
+
+#define REG_RETRY_LIMIT                        0x042a
+#define  RETRY_LIMIT_LONG_SHIFT                0
+#define  RETRY_LIMIT_LONG_MASK         0x003f
+#define  RETRY_LIMIT_SHORT_SHIFT       8
+#define  RETRY_LIMIT_SHORT_MASK                0x3f00
+
+#define REG_DARFRC                     0x0430
+#define REG_RARFRC                     0x0438
+#define REG_RESPONSE_RATE_SET          0x0440
+#define  RESPONSE_RATE_BITMAP_ALL      0xfffff
+#define  RESPONSE_RATE_RRSR_CCK_ONLY_1M        0xffff1
+#define  RSR_1M                                BIT(0)
+#define  RSR_2M                                BIT(1)
+#define  RSR_5_5M                      BIT(2)
+#define  RSR_11M                       BIT(3)
+#define  RSR_6M                                BIT(4)
+#define  RSR_9M                                BIT(5)
+#define  RSR_12M                       BIT(6)
+#define  RSR_18M                       BIT(7)
+#define  RSR_24M                       BIT(8)
+#define  RSR_36M                       BIT(9)
+#define  RSR_48M                       BIT(10)
+#define  RSR_54M                       BIT(11)
+#define  RSR_MCS0                      BIT(12)
+#define  RSR_MCS1                      BIT(13)
+#define  RSR_MCS2                      BIT(14)
+#define  RSR_MCS3                      BIT(15)
+#define  RSR_MCS4                      BIT(16)
+#define  RSR_MCS5                      BIT(17)
+#define  RSR_MCS6                      BIT(18)
+#define  RSR_MCS7                      BIT(19)
+#define  RSR_RSC_LOWER_SUB_CHANNEL     BIT(21) /* 0x200000 */
+#define  RSR_RSC_UPPER_SUB_CHANNEL     BIT(22) /* 0x400000 */
+#define  RSR_RSC_BANDWIDTH_40M         (RSR_RSC_UPPER_SUB_CHANNEL | \
+                                        RSR_RSC_LOWER_SUB_CHANNEL)
+#define  RSR_ACK_SHORT_PREAMBLE                BIT(23)
+
+#define REG_ARFR0                      0x0444
+#define REG_ARFR1                      0x0448
+#define REG_ARFR2                      0x044c
+#define REG_ARFR3                      0x0450
+#define REG_AGGLEN_LMT                 0x0458
+#define REG_AMPDU_MIN_SPACE            0x045c
+#define REG_TXPKTBUF_WMAC_LBK_BF_HD    0x045d
+#define REG_FAST_EDCA_CTRL             0x0460
+#define REG_RD_RESP_PKT_TH             0x0463
+#define REG_INIRTS_RATE_SEL            0x0480
+#define REG_INIDATA_RATE_SEL           0x0484
+
+#define REG_POWER_STATUS               0x04a4
+#define REG_POWER_STAGE1               0x04b4
+#define REG_POWER_STAGE2               0x04b8
+#define REG_PKT_VO_VI_LIFE_TIME                0x04c0
+#define REG_PKT_BE_BK_LIFE_TIME                0x04c2
+#define REG_STBC_SETTING               0x04c4
+#define REG_PROT_MODE_CTRL             0x04c8
+#define REG_MAX_AGGR_NUM               0x04ca
+#define REG_RTS_MAX_AGGR_NUM           0x04cb
+#define REG_BAR_MODE_CTRL              0x04cc
+#define REG_RA_TRY_RATE_AGG_LMT                0x04cf
+#define REG_NQOS_SEQ                   0x04dc
+#define REG_QOS_SEQ                    0x04de
+#define REG_NEED_CPU_HANDLE            0x04e0
+#define REG_PKT_LOSE_RPT               0x04e1
+#define REG_PTCL_ERR_STATUS            0x04e2
+#define REG_DUMMY                      0x04fc
+
+/* 0x0500 ~ 0x05FF     EDCA Configuration */
+#define REG_EDCA_VO_PARAM              0x0500
+#define REG_EDCA_VI_PARAM              0x0504
+#define REG_EDCA_BE_PARAM              0x0508
+#define REG_EDCA_BK_PARAM              0x050c
+#define  EDCA_PARAM_ECW_MIN_SHIFT      8
+#define  EDCA_PARAM_ECW_MAX_SHIFT      12
+#define  EDCA_PARAM_TXOP_SHIFT         16
+#define REG_BEACON_TCFG                        0x0510
+#define REG_PIFS                       0x0512
+#define REG_RDG_PIFS                   0x0513
+#define REG_SIFS_CCK                   0x0514
+#define REG_SIFS_OFDM                  0x0516
+#define REG_TSFTR_SYN_OFFSET           0x0518
+#define REG_AGGR_BREAK_TIME            0x051a
+#define REG_SLOT                       0x051b
+#define REG_TX_PTCL_CTRL               0x0520
+#define REG_TXPAUSE                    0x0522
+#define REG_DIS_TXREQ_CLR              0x0523
+#define REG_RD_CTRL                    0x0524
+#define REG_TBTT_PROHIBIT              0x0540
+#define REG_RD_NAV_NXT                 0x0544
+#define REG_NAV_PROT_LEN               0x0546
+
+#define REG_BEACON_CTRL                        0x0550
+#define REG_BEACON_CTRL_1              0x0551
+#define  BEACON_ATIM                   BIT(0)
+#define  BEACON_CTRL_MBSSID            BIT(1)
+#define  BEACON_CTRL_TX_BEACON_RPT     BIT(2)
+#define  BEACON_FUNCTION_ENABLE                BIT(3)
+#define  BEACON_DISABLE_TSF_UPDATE     BIT(4)
+
+#define REG_MBID_NUM                   0x0552
+#define REG_DUAL_TSF_RST               0x0553
+#define  DUAL_TSF_RESET_TSF0           BIT(0)
+#define  DUAL_TSF_RESET_TSF1           BIT(1)
+#define  DUAL_TSF_RESET_P2P            BIT(4)
+#define  DUAL_TSF_TX_OK                        BIT(5)
+
+/*  The same as REG_MBSSID_BCN_SPACE */
+#define REG_BCN_INTERVAL               0x0554
+#define REG_MBSSID_BCN_SPACE           0x0554
+
+#define REG_DRIVER_EARLY_INT           0x0558
+#define  DRIVER_EARLY_INT_TIME         5
+
+#define REG_BEACON_DMA_TIME            0x0559
+#define  BEACON_DMA_ATIME_INT_TIME     2
+
+#define REG_ATIMWND                    0x055a
+#define REG_BCN_MAX_ERR                        0x055d
+#define REG_RXTSF_OFFSET_CCK           0x055e
+#define REG_RXTSF_OFFSET_OFDM          0x055f
+#define REG_TSFTR                      0x0560
+#define REG_TSFTR1                     0x0568
+#define REG_INIT_TSFTR                 0x0564
+#define REG_ATIMWND_1                  0x0570
+#define REG_PSTIMER                    0x0580
+#define REG_TIMER0                     0x0584
+#define REG_TIMER1                     0x0588
+#define REG_ACM_HW_CTRL                        0x05c0
+#define  ACM_HW_CTRL_BK                        BIT(0)
+#define  ACM_HW_CTRL_BE                        BIT(1)
+#define  ACM_HW_CTRL_VI                        BIT(2)
+#define  ACM_HW_CTRL_VO                        BIT(3)
+#define REG_ACM_RST_CTRL               0x05c1
+#define REG_ACMAVG                     0x05c2
+#define REG_VO_ADMTIME                 0x05c4
+#define REG_VI_ADMTIME                 0x05c6
+#define REG_BE_ADMTIME                 0x05c8
+#define REG_EDCA_RANDOM_GEN            0x05cc
+#define REG_SCH_TXCMD                  0x05d0
+
+/* define REG_FW_TSF_SYNC_CNT          0x04a0 */
+#define REG_FW_RESET_TSF_CNT_1         0x05fc
+#define REG_FW_RESET_TSF_CNT_0         0x05fd
+#define REG_FW_BCN_DIS_CNT             0x05fe
+
+/* 0x0600 ~ 0x07FF  WMAC Configuration */
+#define REG_APSD_CTRL                  0x0600
+#define  APSD_CTRL_OFF                 BIT(6)
+#define  APSD_CTRL_OFF_STATUS          BIT(7)
+#define REG_BW_OPMODE                  0x0603
+#define  BW_OPMODE_20MHZ               BIT(2)
+#define  BW_OPMODE_5G                  BIT(1)
+#define  BW_OPMODE_11J                 BIT(0)
+
+#define REG_TCR                                0x0604
+
+/* Receive Configuration Register */
+#define REG_RCR                                0x0608
+#define  RCR_ACCEPT_AP                 BIT(0)  /* Accept all unicast packet */
+#define  RCR_ACCEPT_PHYS_MATCH         BIT(1)  /* Accept phys match packet */
+#define  RCR_ACCEPT_MCAST              BIT(2)
+#define  RCR_ACCEPT_BCAST              BIT(3)
+#define  RCR_ACCEPT_ADDR3              BIT(4)  /* Accept address 3 match
+                                                packet */
+#define  RCR_ACCEPT_PM                 BIT(5)  /* Accept power management
+                                                packet */
+#define  RCR_CHECK_BSSID_MATCH         BIT(6)  /* Accept BSSID match packet */
+#define  RCR_CHECK_BSSID_BEACON                BIT(7)  /* Accept BSSID match packet
+                                                (Rx beacon, probe rsp) */
+#define  RCR_ACCEPT_CRC32              BIT(8)  /* Accept CRC32 error packet */
+#define  RCR_ACCEPT_ICV                        BIT(9)  /* Accept ICV error packet */
+#define  RCR_ACCEPT_DATA_FRAME         BIT(11)
+#define  RCR_ACCEPT_CTRL_FRAME         BIT(12)
+#define  RCR_ACCEPT_MGMT_FRAME         BIT(13)
+#define  RCR_HTC_LOC_CTRL              BIT(14) /* MFC<--HTC=1 MFC-->HTC=0 */
+#define  RCR_MFBEN                     BIT(22)
+#define  RCR_LSIGEN                    BIT(23)
+#define  RCR_MULTI_BSSID_ENABLE                BIT(24) /* Enable Multiple BssId */
+#define  RCR_ACCEPT_BA_SSN             BIT(27) /* Accept BA SSN */
+#define  RCR_APPEND_PHYSTAT            BIT(28)
+#define  RCR_APPEND_ICV                        BIT(29)
+#define  RCR_APPEND_MIC                        BIT(30)
+#define  RCR_APPEND_FCS                        BIT(31) /* WMAC append FCS after */
+
+#define REG_RX_PKT_LIMIT               0x060c
+#define REG_RX_DLK_TIME                        0x060d
+#define REG_RX_DRVINFO_SZ              0x060f
+
+#define REG_MACID                      0x0610
+#define REG_BSSID                      0x0618
+#define REG_MAR                                0x0620
+#define REG_MBIDCAMCFG                 0x0628
+
+#define REG_USTIME_EDCA                        0x0638
+#define REG_MAC_SPEC_SIFS              0x063a
+
+/*  20100719 Joseph: Hardware register definition change. (HW datasheet v54) */
+       /*  [15:8]SIFS_R2T_OFDM, [7:0]SIFS_R2T_CCK */
+#define REG_R2T_SIFS                   0x063c
+       /*  [15:8]SIFS_T2T_OFDM, [7:0]SIFS_T2T_CCK */
+#define REG_T2T_SIFS                   0x063e
+#define REG_ACKTO                      0x0640
+#define REG_CTS2TO                     0x0641
+#define REG_EIFS                       0x0642
+
+/* WMA, BA, CCX */
+#define REG_NAV_CTRL                   0x0650
+/* In units of 128us */
+#define REG_NAV_UPPER                  0x0652
+#define  NAV_UPPER_UNIT                        128
+
+#define REG_BACAMCMD                   0x0654
+#define REG_BACAMCONTENT               0x0658
+#define REG_LBDLY                      0x0660
+#define REG_FWDLY                      0x0661
+#define REG_RXERR_RPT                  0x0664
+#define REG_WMAC_TRXPTCL_CTL           0x0668
+
+/*  Security */
+#define REG_CAM_CMD                    0x0670
+#define  CAM_CMD_POLLING               BIT(31)
+#define  CAM_CMD_WRITE                 BIT(16)
+#define  CAM_CMD_KEY_SHIFT             3
+#define REG_CAM_WRITE                  0x0674
+#define  CAM_WRITE_VALID               BIT(15)
+#define REG_CAM_READ                   0x0678
+#define REG_CAM_DEBUG                  0x067c
+#define REG_SECURITY_CFG               0x0680
+#define  SEC_CFG_TX_USE_DEFKEY         BIT(0)
+#define  SEC_CFG_RX_USE_DEFKEY         BIT(1)
+#define  SEC_CFG_TX_SEC_ENABLE         BIT(2)
+#define  SEC_CFG_RX_SEC_ENABLE         BIT(3)
+#define  SEC_CFG_SKBYA2                        BIT(4)
+#define  SEC_CFG_NO_SKMC               BIT(5)
+#define  SEC_CFG_TXBC_USE_DEFKEY       BIT(6)
+#define  SEC_CFG_RXBC_USE_DEFKEY       BIT(7)
+
+/*  Power */
+#define REG_WOW_CTRL                   0x0690
+#define REG_PSSTATUS                   0x0691
+#define REG_PS_RX_INFO                 0x0692
+#define REG_LPNAV_CTRL                 0x0694
+#define REG_WKFMCAM_CMD                        0x0698
+#define REG_WKFMCAM_RWD                        0x069c
+#define REG_RXFLTMAP0                  0x06a0
+#define REG_RXFLTMAP1                  0x06a2
+#define REG_RXFLTMAP2                  0x06a4
+#define REG_BCN_PSR_RPT                        0x06a8
+#define REG_CALB32K_CTRL               0x06ac
+#define REG_PKT_MON_CTRL               0x06b4
+#define REG_BT_COEX_TABLE              0x06c0
+#define REG_WMAC_RESP_TXINFO           0x06d8
+
+#define REG_MACID1                     0x0700
+#define REG_BSSID1                     0x0708
+
+#define REG_FPGA0_RF_MODE              0x0800
+#define  FPGA_RF_MODE                  BIT(0)
+#define  FPGA_RF_MODE_JAPAN            BIT(1)
+#define  FPGA_RF_MODE_CCK              BIT(24)
+#define  FPGA_RF_MODE_OFDM             BIT(25)
+
+#define REG_FPGA0_TX_INFO              0x0804
+#define REG_FPGA0_PSD_FUNC             0x0808
+#define REG_FPGA0_TX_GAIN              0x080c
+#define REG_FPGA0_RF_TIMING1           0x0810
+#define REG_FPGA0_RF_TIMING2           0x0814
+#define REG_FPGA0_POWER_SAVE           0x0818
+#define  FPGA0_PS_LOWER_CHANNEL                BIT(26)
+#define  FPGA0_PS_UPPER_CHANNEL                BIT(27)
+
+#define REG_FPGA0_XA_HSSI_PARM1                0x0820  /* RF 3 wire register */
+#define  FPGA0_HSSI_PARM1_PI           BIT(8)
+#define REG_FPGA0_XA_HSSI_PARM2                0x0824
+#define REG_FPGA0_XB_HSSI_PARM1                0x0828
+#define REG_FPGA0_XB_HSSI_PARM2                0x082c
+#define  FPGA0_HSSI_3WIRE_DATA_LEN     0x800
+#define  FPGA0_HSSI_3WIRE_ADDR_LEN     0x400
+#define  FPGA0_HSSI_PARM2_ADDR_SHIFT   23
+#define  FPGA0_HSSI_PARM2_ADDR_MASK    0x7f800000      /* 0xff << 23 */
+#define  FPGA0_HSSI_PARM2_CCK_HIGH_PWR BIT(9)
+#define  FPGA0_HSSI_PARM2_EDGE_READ    BIT(31)
+
+#define REG_TX_AGC_B_RATE18_06         0x0830
+#define REG_TX_AGC_B_RATE54_24         0x0834
+#define REG_TX_AGC_B_CCK1_55_MCS32     0x0838
+#define REG_TX_AGC_B_MCS03_MCS00       0x083c
+
+#define REG_FPGA0_XA_LSSI_PARM         0x0840
+#define REG_FPGA0_XB_LSSI_PARM         0x0844
+#define  FPGA0_LSSI_PARM_ADDR_SHIFT    20
+#define  FPGA0_LSSI_PARM_ADDR_MASK     0x0ff00000
+#define  FPGA0_LSSI_PARM_DATA_MASK     0x000fffff
+
+#define REG_TX_AGC_B_MCS07_MCS04       0x0848
+#define REG_TX_AGC_B_MCS11_MCS08       0x084c
+
+#define REG_FPGA0_XCD_SWITCH_CTRL      0x085c
+
+#define REG_FPGA0_XA_RF_INT_OE         0x0860  /* RF Channel switch */
+#define REG_FPGA0_XB_RF_INT_OE         0x0864
+#define  FPGA0_INT_OE_ANTENNA_AB_OPEN  0x000
+#define  FPGA0_INT_OE_ANTENNA_A                BIT(8)
+#define  FPGA0_INT_OE_ANTENNA_B                BIT(9)
+#define  FPGA0_INT_OE_ANTENNA_MASK     (FPGA0_INT_OE_ANTENNA_A | \
+                                        FPGA0_INT_OE_ANTENNA_B)
+
+#define REG_TX_AGC_B_MCS15_MCS12       0x0868
+#define REG_TX_AGC_B_CCK11_A_CCK2_11   0x086c
+
+#define REG_FPGA0_XAB_RF_SW_CTRL       0x0870
+#define REG_FPGA0_XA_RF_SW_CTRL                0x0870  /* 16 bit */
+#define REG_FPGA0_XB_RF_SW_CTRL                0x0872  /* 16 bit */
+#define REG_FPGA0_XCD_RF_SW_CTRL       0x0874
+#define REG_FPGA0_XC_RF_SW_CTRL                0x0874  /* 16 bit */
+#define REG_FPGA0_XD_RF_SW_CTRL                0x0876  /* 16 bit */
+#define  FPGA0_RF_3WIRE_DATA           BIT(0)
+#define  FPGA0_RF_3WIRE_CLOC           BIT(1)
+#define  FPGA0_RF_3WIRE_LOAD           BIT(2)
+#define  FPGA0_RF_3WIRE_RW             BIT(3)
+#define  FPGA0_RF_3WIRE_MASK           0xf
+#define  FPGA0_RF_RFENV                        BIT(4)
+#define  FPGA0_RF_TRSW                 BIT(5)  /* Useless now */
+#define  FPGA0_RF_TRSWB                        BIT(6)
+#define  FPGA0_RF_ANTSW                        BIT(8)
+#define  FPGA0_RF_ANTSWB               BIT(9)
+#define  FPGA0_RF_PAPE                 BIT(10)
+#define  FPGA0_RF_PAPE5G               BIT(11)
+#define  FPGA0_RF_BD_CTRL_SHIFT                16
+
+#define REG_FPGA0_XAB_RF_PARM          0x0878  /* Antenna select path in ODM */
+#define REG_FPGA0_XA_RF_PARM           0x0878  /* 16 bit */
+#define REG_FPGA0_XB_RF_PARM           0x087a  /* 16 bit */
+#define REG_FPGA0_XCD_RF_PARM          0x087c
+#define REG_FPGA0_XC_RF_PARM           0x087c  /* 16 bit */
+#define REG_FPGA0_XD_RF_PARM           0x087e  /* 16 bit */
+#define  FPGA0_RF_PARM_RFA_ENABLE      BIT(1)
+#define  FPGA0_RF_PARM_RFB_ENABLE      BIT(17)
+#define  FPGA0_RF_PARM_CLK_GATE                BIT(31)
+
+#define REG_FPGA0_ANALOG1              0x0880
+#define REG_FPGA0_ANALOG2              0x0884
+#define  FPGA0_ANALOG2_20MHZ           BIT(10)
+#define REG_FPGA0_ANALOG3              0x0888
+#define REG_FPGA0_ANALOG4              0x088c
+
+#define REG_FPGA0_XA_LSSI_READBACK     0x08a0  /* Tranceiver LSSI Readback */
+#define REG_FPGA0_XB_LSSI_READBACK     0x08a4
+#define REG_HSPI_XA_READBACK           0x08b8  /* Transceiver A HSPI read */
+#define REG_HSPI_XB_READBACK           0x08bc  /* Transceiver B HSPI read */
+
+#define REG_FPGA1_RF_MODE              0x0900
+
+#define REG_FPGA1_TX_INFO              0x090c
+
+#define REG_CCK0_SYSTEM                        0x0a00
+#define  CCK0_SIDEBAND                 BIT(4)
+
+#define REG_CCK0_AFE_SETTING           0x0a04
+
+#define REG_CONFIG_ANT_A               0x0b68
+#define REG_CONFIG_ANT_B               0x0b6c
+
+#define REG_OFDM0_TRX_PATH_ENABLE      0x0c04
+#define OFDM_RF_PATH_RX_MASK           0x0f
+#define OFDM_RF_PATH_RX_A              BIT(0)
+#define OFDM_RF_PATH_RX_B              BIT(1)
+#define OFDM_RF_PATH_RX_C              BIT(2)
+#define OFDM_RF_PATH_RX_D              BIT(3)
+#define OFDM_RF_PATH_TX_MASK           0xf0
+#define OFDM_RF_PATH_TX_A              BIT(4)
+#define OFDM_RF_PATH_TX_B              BIT(5)
+#define OFDM_RF_PATH_TX_C              BIT(6)
+#define OFDM_RF_PATH_TX_D              BIT(7)
+
+#define REG_OFDM0_TR_MUX_PAR           0x0c08
+
+#define REG_OFDM0_XA_RX_IQ_IMBALANCE   0x0c14
+#define REG_OFDM0_XB_RX_IQ_IMBALANCE   0x0c1c
+
+#define REG_OFDM0_ENERGY_CCA_THRES     0x0c4c
+
+#define REG_OFDM0_XA_AGC_CORE1         0x0c50
+#define REG_OFDM0_XA_AGC_CORE2         0x0c54
+#define REG_OFDM0_XB_AGC_CORE1         0x0c58
+#define REG_OFDM0_XB_AGC_CORE2         0x0c5c
+#define REG_OFDM0_XC_AGC_CORE1         0x0c60
+#define REG_OFDM0_XC_AGC_CORE2         0x0c64
+#define REG_OFDM0_XD_AGC_CORE1         0x0c68
+#define REG_OFDM0_XD_AGC_CORE2         0x0c6c
+#define  OFDM0_X_AGC_CORE1_IGI_MASK    0x0000007F
+
+#define REG_OFDM0_AGC_PARM1            0x0c70
+
+#define REG_OFDM0_AGCR_SSI_TABLE       0x0c78
+
+#define REG_OFDM0_XA_TX_IQ_IMBALANCE   0x0c80
+#define REG_OFDM0_XB_TX_IQ_IMBALANCE   0x0c88
+#define REG_OFDM0_XC_TX_IQ_IMBALANCE   0x0c90
+#define REG_OFDM0_XD_TX_IQ_IMBALANCE   0x0c98
+
+#define REG_OFDM0_XC_TX_AFE            0x0c94
+#define REG_OFDM0_XD_TX_AFE            0x0c9c
+
+#define REG_OFDM0_RX_IQ_EXT_ANTA       0x0ca0
+
+#define REG_OFDM1_LSTF                 0x0d00
+#define  OFDM_LSTF_PRIME_CH_LOW                BIT(10)
+#define  OFDM_LSTF_PRIME_CH_HIGH       BIT(11)
+#define  OFDM_LSTF_PRIME_CH_MASK       (OFDM_LSTF_PRIME_CH_LOW | \
+                                        OFDM_LSTF_PRIME_CH_HIGH)
+#define  OFDM_LSTF_CONTINUE_TX         BIT(28)
+#define  OFDM_LSTF_SINGLE_CARRIER      BIT(29)
+#define  OFDM_LSTF_SINGLE_TONE         BIT(30)
+#define  OFDM_LSTF_MASK                        0x70000000
+
+#define REG_OFDM1_TRX_PATH_ENABLE      0x0d04
+
+#define REG_TX_AGC_A_RATE18_06         0x0e00
+#define REG_TX_AGC_A_RATE54_24         0x0e04
+#define REG_TX_AGC_A_CCK1_MCS32                0x0e08
+#define REG_TX_AGC_A_MCS03_MCS00       0x0e10
+#define REG_TX_AGC_A_MCS07_MCS04       0x0e14
+#define REG_TX_AGC_A_MCS11_MCS08       0x0e18
+#define REG_TX_AGC_A_MCS15_MCS12       0x0e1c
+
+#define REG_FPGA0_IQK                  0x0e28
+
+#define REG_TX_IQK_TONE_A              0x0e30
+#define REG_RX_IQK_TONE_A              0x0e34
+#define REG_TX_IQK_PI_A                        0x0e38
+#define REG_RX_IQK_PI_A                        0x0e3c
+
+#define REG_TX_IQK                     0x0e40
+#define REG_RX_IQK                     0x0e44
+#define REG_IQK_AGC_PTS                        0x0e48
+#define REG_IQK_AGC_RSP                        0x0e4c
+#define REG_TX_IQK_TONE_B              0x0e50
+#define REG_RX_IQK_TONE_B              0x0e54
+#define REG_TX_IQK_PI_B                        0x0e58
+#define REG_RX_IQK_PI_B                        0x0e5c
+#define REG_IQK_AGC_CONT               0x0e60
+
+#define REG_BLUETOOTH                  0x0e6c
+#define REG_RX_WAIT_CCA                        0x0e70
+#define REG_TX_CCK_RFON                        0x0e74
+#define REG_TX_CCK_BBON                        0x0e78
+#define REG_TX_OFDM_RFON               0x0e7c
+#define REG_TX_OFDM_BBON               0x0e80
+#define REG_TX_TO_RX                   0x0e84
+#define REG_TX_TO_TX                   0x0e88
+#define REG_RX_CCK                     0x0e8c
+
+#define REG_TX_POWER_BEFORE_IQK_A      0x0e94
+#define REG_TX_POWER_AFTER_IQK_A       0x0e9c
+
+#define REG_RX_POWER_BEFORE_IQK_A      0x0ea0
+#define REG_RX_POWER_BEFORE_IQK_A_2    0x0ea4
+#define REG_RX_POWER_AFTER_IQK_A       0x0ea8
+#define REG_RX_POWER_AFTER_IQK_A_2     0x0eac
+
+#define REG_TX_POWER_BEFORE_IQK_B      0x0eb4
+#define REG_TX_POWER_AFTER_IQK_B       0x0ebc
+
+#define REG_RX_POWER_BEFORE_IQK_B      0x0ec0
+#define REG_RX_POWER_BEFORE_IQK_B_2    0x0ec4
+#define REG_RX_POWER_AFTER_IQK_B       0x0ec8
+#define REG_RX_POWER_AFTER_IQK_B_2     0x0ecc
+
+#define REG_RX_OFDM                    0x0ed0
+#define REG_RX_WAIT_RIFS               0x0ed4
+#define REG_RX_TO_RX                   0x0ed8
+#define REG_STANDBY                    0x0edc
+#define REG_SLEEP                      0x0ee0
+#define REG_PMPD_ANAEN                 0x0eec
+
+#define REG_FW_START_ADDRESS           0x1000
+
+#define REG_USB_INFO                   0xfe17
+#define REG_USB_HIMR                   0xfe38
+#define  USB_HIMR_TIMEOUT2             BIT(31)
+#define  USB_HIMR_TIMEOUT1             BIT(30)
+#define  USB_HIMR_PSTIMEOUT            BIT(29)
+#define  USB_HIMR_GTINT4               BIT(28)
+#define  USB_HIMR_GTINT3               BIT(27)
+#define  USB_HIMR_TXBCNERR             BIT(26)
+#define  USB_HIMR_TXBCNOK              BIT(25)
+#define  USB_HIMR_TSF_BIT32_TOGGLE     BIT(24)
+#define  USB_HIMR_BCNDMAINT3           BIT(23)
+#define  USB_HIMR_BCNDMAINT2           BIT(22)
+#define  USB_HIMR_BCNDMAINT1           BIT(21)
+#define  USB_HIMR_BCNDMAINT0           BIT(20)
+#define  USB_HIMR_BCNDOK3              BIT(19)
+#define  USB_HIMR_BCNDOK2              BIT(18)
+#define  USB_HIMR_BCNDOK1              BIT(17)
+#define  USB_HIMR_BCNDOK0              BIT(16)
+#define  USB_HIMR_HSISR_IND            BIT(15)
+#define  USB_HIMR_BCNDMAINT_E          BIT(14)
+/* RSVD        BIT(13) */
+#define  USB_HIMR_CTW_END              BIT(12)
+/* RSVD        BIT(11) */
+#define  USB_HIMR_C2HCMD               BIT(10)
+#define  USB_HIMR_CPWM2                        BIT(9)
+#define  USB_HIMR_CPWM                 BIT(8)
+#define  USB_HIMR_HIGHDOK              BIT(7)  /*  High Queue DMA OK
+                                                   Interrupt */
+#define  USB_HIMR_MGNTDOK              BIT(6)  /*  Management Queue DMA OK
+                                                   Interrupt */
+#define  USB_HIMR_BKDOK                        BIT(5)  /*  AC_BK DMA OK Interrupt */
+#define  USB_HIMR_BEDOK                        BIT(4)  /*  AC_BE DMA OK Interrupt */
+#define  USB_HIMR_VIDOK                        BIT(3)  /*  AC_VI DMA OK Interrupt */
+#define  USB_HIMR_VODOK                        BIT(2)  /*  AC_VO DMA Interrupt */
+#define  USB_HIMR_RDU                  BIT(1)  /*  Receive Descriptor
+                                                   Unavailable */
+#define  USB_HIMR_ROK                  BIT(0)  /*  Receive DMA OK Interrupt */
+
+#define REG_USB_SPECIAL_OPTION         0xfe55
+#define REG_USB_DMA_AGG_TO             0xfe5b
+#define REG_USB_AGG_TO                 0xfe5c
+#define REG_USB_AGG_TH                 0xfe5d
+
+#define REG_NORMAL_SIE_VID             0xfe60  /* 0xfe60 - 0xfe61 */
+#define REG_NORMAL_SIE_PID             0xfe62  /* 0xfe62 - 0xfe63 */
+#define REG_NORMAL_SIE_OPTIONAL                0xfe64
+#define REG_NORMAL_SIE_EP              0xfe65  /* 0xfe65 - 0xfe67 */
+#define REG_NORMAL_SIE_EP_TX           0xfe66
+#define  NORMAL_SIE_EP_TX_HIGH_MASK    0x000f
+#define  NORMAL_SIE_EP_TX_NORMAL_MASK  0x00f0
+#define  NORMAL_SIE_EP_TX_LOW_MASK     0x0f00
+
+#define REG_NORMAL_SIE_PHY             0xfe68  /* 0xfe68 - 0xfe6b */
+#define REG_NORMAL_SIE_OPTIONAL2       0xfe6c
+#define REG_NORMAL_SIE_GPS_EP          0xfe6d  /* RTL8723 only */
+#define REG_NORMAL_SIE_MAC_ADDR                0xfe70  /* 0xfe70 - 0xfe75 */
+#define REG_NORMAL_SIE_STRING          0xfe80  /* 0xfe80 - 0xfedf */
+
+/* RF6052 registers */
+#define RF6052_REG_AC                  0x00
+#define RF6052_REG_IQADJ_G1            0x01
+#define RF6052_REG_IQADJ_G2            0x02
+#define RF6052_REG_BS_PA_APSET_G1_G4   0x03
+#define RF6052_REG_BS_PA_APSET_G5_G8   0x04
+#define RF6052_REG_POW_TRSW            0x05
+#define RF6052_REG_GAIN_RX             0x06
+#define RF6052_REG_GAIN_TX             0x07
+#define RF6052_REG_TXM_IDAC            0x08
+#define RF6052_REG_IPA_G               0x09
+#define RF6052_REG_TXBIAS_G            0x0a
+#define RF6052_REG_TXPA_AG             0x0b
+#define RF6052_REG_IPA_A               0x0c
+#define RF6052_REG_TXBIAS_A            0x0d
+#define RF6052_REG_BS_PA_APSET_G9_G11  0x0e
+#define RF6052_REG_BS_IQGEN            0x0f
+#define RF6052_REG_MODE1               0x10
+#define RF6052_REG_MODE2               0x11
+#define RF6052_REG_RX_AGC_HP           0x12
+#define RF6052_REG_TX_AGC              0x13
+#define RF6052_REG_BIAS                        0x14
+#define RF6052_REG_IPA                 0x15
+#define RF6052_REG_TXBIAS              0x16
+#define RF6052_REG_POW_ABILITY         0x17
+#define RF6052_REG_MODE_AG             0x18    /* RF channel and BW switch */
+#define  MODE_AG_CHANNEL_MASK          0x3ff
+#define  MODE_AG_CHANNEL_20MHZ         BIT(10)
+
+#define RF6052_REG_TOP                 0x19
+#define RF6052_REG_RX_G1               0x1a
+#define RF6052_REG_RX_G2               0x1b
+#define RF6052_REG_RX_BB2              0x1c
+#define RF6052_REG_RX_BB1              0x1d
+#define RF6052_REG_RCK1                        0x1e
+#define RF6052_REG_RCK2                        0x1f
+#define RF6052_REG_TX_G1               0x20
+#define RF6052_REG_TX_G2               0x21
+#define RF6052_REG_TX_G3               0x22
+#define RF6052_REG_TX_BB1              0x23
+#define RF6052_REG_T_METER             0x24
+#define RF6052_REG_SYN_G1              0x25    /* RF TX Power control */
+#define RF6052_REG_SYN_G2              0x26    /* RF TX Power control */
+#define RF6052_REG_SYN_G3              0x27    /* RF TX Power control */
+#define RF6052_REG_SYN_G4              0x28    /* RF TX Power control */
+#define RF6052_REG_SYN_G5              0x29    /* RF TX Power control */
+#define RF6052_REG_SYN_G6              0x2a    /* RF TX Power control */
+#define RF6052_REG_SYN_G7              0x2b    /* RF TX Power control */
+#define RF6052_REG_SYN_G8              0x2c    /* RF TX Power control */
+
+#define RF6052_REG_RCK_OS              0x30    /* RF TX PA control */
+
+#define RF6052_REG_TXPA_G1             0x31    /* RF TX PA control */
+#define RF6052_REG_TXPA_G2             0x32    /* RF TX PA control */
+#define RF6052_REG_TXPA_G3             0x33    /* RF TX PA control */
similarity index 99%
rename from drivers/net/wireless/rtlwifi/pci.h
rename to drivers/net/wireless/realtek/rtlwifi/pci.h
index d4567d12e07ebd13f17f0097baeec41a25702d31..5da6703942d9dd08017d896070404fdbe29a96e4 100644 (file)
@@ -247,6 +247,8 @@ struct rtl_pci {
        /* MSI support */
        bool msi_support;
        bool using_msi;
+       /* interrupt clear before set */
+       bool int_clear;
 };
 
 struct mp_adapter {
similarity index 99%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 25db369b5d18c4e62a9862015f572d18affdf53d..34ce06441d1b625ebacb2cd74641ffc95f820bb4 100644 (file)
@@ -1946,6 +1946,14 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val);
                mac->rx_data_filter = *(u16 *)val;
                break;
+       case HW_VAR_KEEP_ALIVE:{
+                       u8 array[2];
+                       array[0] = 0xff;
+                       array[1] = *((u8 *)val);
+                       rtl92c_fill_h2c_cmd(hw, H2C_92C_KEEP_ALIVE_CTRL, 2,
+                                           array);
+                       break;
+               }
        default:
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
                         "switch case not processed\n");
similarity index 99%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index b7f18e2155eb18358cf4d4f9f3f82774f9b6f522..6e9418ed90c289bee5b7f2dfc478f847dfc7ca68 100644 (file)
@@ -2253,11 +2253,28 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci)
        }
 }
 
+static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 tmp = rtl_read_dword(rtlpriv, REG_HISR);
+
+       rtl_write_dword(rtlpriv, REG_HISR, tmp);
+
+       tmp = rtl_read_dword(rtlpriv, REG_HISRE);
+       rtl_write_dword(rtlpriv, REG_HISRE, tmp);
+
+       tmp = rtl_read_dword(rtlpriv, REG_HSISR);
+       rtl_write_dword(rtlpriv, REG_HSISR, tmp);
+}
+
 void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
+       if (!rtlpci->int_clear)
+               rtl8821ae_clear_interrupt(hw);/*clear it here first*/
+
        rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
        rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
        rtlpci->irq_enabled = true;
similarity index 97%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index a4988121e1ab6a20bad5ad9b5934c166a41f36d6..8ee141a55bc5cc6b566e79dde58cdb05583e7fdf 100644 (file)
@@ -96,6 +96,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
 
        rtl8821ae_bt_reg_init(hw);
        rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+       rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear;
        rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
 
        rtlpriv->dm.dm_initialgain_enable = 1;
@@ -167,6 +168,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
        rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
        rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
        rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+       rtlpci->msi_support = rtlpriv->cfg->mod_params->int_clear;
        if (rtlpriv->cfg->mod_params->disable_watchdog)
                pr_info("watchdog disabled\n");
        rtlpriv->psc.reg_fwctrl_lps = 3;
@@ -308,6 +310,7 @@ static struct rtl_mod_params rtl8821ae_mod_params = {
        .swctrl_lps = false,
        .fwctrl_lps = true,
        .msi_support = true,
+       .int_clear = true,
        .debug = DBG_EMERG,
        .disable_watchdog = 0,
 };
@@ -437,6 +440,7 @@ module_param_named(fwlps, rtl8821ae_mod_params.fwctrl_lps, bool, 0444);
 module_param_named(msi, rtl8821ae_mod_params.msi_support, bool, 0444);
 module_param_named(disable_watchdog, rtl8821ae_mod_params.disable_watchdog,
                   bool, 0444);
+module_param_named(int_clear, rtl8821ae_mod_params.int_clear, bool, 0444);
 MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
 MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
 MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
@@ -444,6 +448,7 @@ MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
 MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
 MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
 MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
+MODULE_PARM_DESC(int_clear, "Set to 1 to disable interrupt clear before set (default 0)\n");
 
 static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
 
similarity index 99%
rename from drivers/net/wireless/rtlwifi/wifi.h
rename to drivers/net/wireless/realtek/rtlwifi/wifi.h
index b90ca618b123209a1724bc808c4fdfe8ea330a69..4544752a2ba83ca173f7cb4f14abc72538b73beb 100644 (file)
@@ -2249,6 +2249,9 @@ struct rtl_mod_params {
 
        /* default 0: 1 means disable */
        bool disable_watchdog;
+
+       /* default 0: 1 means do not disable interrupts */
+       bool int_clear;
 };
 
 struct rtl_hal_usbint_cfg {
index 5932306084fd305a6f45ccf03d8957b771eaaa13..bf9afbf46c1bbbc1220bad06e429d2622a1f9f58 100644 (file)
@@ -1114,6 +1114,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x0db0, 0x871c) },
        { USB_DEVICE(0x0db0, 0x899a) },
        /* Ovislink */
+       { USB_DEVICE(0x1b75, 0x3070) },
        { USB_DEVICE(0x1b75, 0x3071) },
        { USB_DEVICE(0x1b75, 0x3072) },
        { USB_DEVICE(0x1b75, 0xa200) },
index 929a6e7e5ecfe9249569c0059516e531fb0b79eb..56ebd8267386e6a91cabf506962e15f5d83531ec 100644 (file)
@@ -788,6 +788,12 @@ static void connect(struct backend_info *be)
        /* Use the number of queues requested by the frontend */
        be->vif->queues = vzalloc(requested_num_queues *
                                  sizeof(struct xenvif_queue));
+       if (!be->vif->queues) {
+               xenbus_dev_fatal(dev, -ENOMEM,
+                                "allocating queues");
+               return;
+       }
+
        be->vif->num_queues = requested_num_queues;
        be->vif->stalled_queues = requested_num_queues;
 
index 9bf63c27a9b7af80dc3ba46d2e7633c9f9391eb8..441b158d04f73c0a7d3a1dbe61cc37a4913eec2a 100644 (file)
@@ -1706,19 +1706,19 @@ static void xennet_destroy_queues(struct netfront_info *info)
 }
 
 static int xennet_create_queues(struct netfront_info *info,
-                               unsigned int num_queues)
+                               unsigned int *num_queues)
 {
        unsigned int i;
        int ret;
 
-       info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
+       info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
                               GFP_KERNEL);
        if (!info->queues)
                return -ENOMEM;
 
        rtnl_lock();
 
-       for (i = 0; i < num_queues; i++) {
+       for (i = 0; i < *num_queues; i++) {
                struct netfront_queue *queue = &info->queues[i];
 
                queue->id = i;
@@ -1728,7 +1728,7 @@ static int xennet_create_queues(struct netfront_info *info,
                if (ret < 0) {
                        dev_warn(&info->netdev->dev,
                                 "only created %d queues\n", i);
-                       num_queues = i;
+                       *num_queues = i;
                        break;
                }
 
@@ -1738,11 +1738,11 @@ static int xennet_create_queues(struct netfront_info *info,
                        napi_enable(&queue->napi);
        }
 
-       netif_set_real_num_tx_queues(info->netdev, num_queues);
+       netif_set_real_num_tx_queues(info->netdev, *num_queues);
 
        rtnl_unlock();
 
-       if (num_queues == 0) {
+       if (*num_queues == 0) {
                dev_err(&info->netdev->dev, "no queues\n");
                return -EINVAL;
        }
@@ -1788,7 +1788,7 @@ static int talk_to_netback(struct xenbus_device *dev,
        if (info->queues)
                xennet_destroy_queues(info);
 
-       err = xennet_create_queues(info, num_queues);
+       err = xennet_create_queues(info, &num_queues);
        if (err < 0)
                goto destroy_ring;
 
index d3c6676b3c0cafbaa996c1f60d1c21adb0f20b21..6fd4e5a5ef4a495bbd412ee33b931f4fb3a8a24f 100644 (file)
@@ -67,7 +67,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
        int rc;
 
        /* Stop the user from reading */
-       if (pos > nvmem->size)
+       if (pos >= nvmem->size)
                return 0;
 
        if (pos + count > nvmem->size)
@@ -92,7 +92,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
        int rc;
 
        /* Stop the user from writing */
-       if (pos > nvmem->size)
+       if (pos >= nvmem->size)
                return 0;
 
        if (pos + count > nvmem->size)
@@ -825,7 +825,7 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
                return rc;
 
        /* shift bits in-place */
-       if (cell->bit_offset || cell->bit_offset)
+       if (cell->bit_offset || cell->nbits)
                nvmem_shift_read_buffer_in_place(cell, buf);
 
        *len = cell->bytes;
@@ -938,7 +938,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
        rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes);
 
        /* free the tmp buffer */
-       if (cell->bit_offset)
+       if (cell->bit_offset || cell->nbits)
                kfree(buf);
 
        if (IS_ERR_VALUE(rc))
index 14777dd5212d29d10c672a18c8b85c17fdcdceb4..cfa3b85064dd233a463b1556742274d960e4f47b 100644 (file)
@@ -103,7 +103,7 @@ static int sunxi_sid_probe(struct platform_device *pdev)
        struct nvmem_device *nvmem;
        struct regmap *regmap;
        struct sunxi_sid *sid;
-       int i, size;
+       int ret, i, size;
        char *randomness;
 
        sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL);
@@ -131,6 +131,11 @@ static int sunxi_sid_probe(struct platform_device *pdev)
                return PTR_ERR(nvmem);
 
        randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL);
+       if (!randomness) {
+               ret = -EINVAL;
+               goto err_unreg_nvmem;
+       }
+
        for (i = 0; i < size; i++)
                randomness[i] = sunxi_sid_read_byte(sid, i);
 
@@ -140,6 +145,10 @@ static int sunxi_sid_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, nvmem);
 
        return 0;
+
+err_unreg_nvmem:
+       nvmem_unregister(nvmem);
+       return ret;
 }
 
 static int sunxi_sid_remove(struct platform_device *pdev)
index d4497141d083a71d5d5206496fee58fbddc5cb13..4a7da3c3e0353c3c746e9b10be9a093c8d085920 100644 (file)
@@ -1243,6 +1243,10 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
        BUG_ON(!chip);
        if (!chip->irq_write_msi_msg)
                chip->irq_write_msi_msg = pci_msi_domain_write_msg;
+       if (!chip->irq_mask)
+               chip->irq_mask = pci_msi_mask_irq;
+       if (!chip->irq_unmask)
+               chip->irq_unmask = pci_msi_unmask_irq;
 }
 
 /**
index 0062027afb1ef90335ae46782dba06448949b989..77a2e054fdea0f46ccd3d2841f5f837f80a985e1 100644 (file)
@@ -276,6 +276,7 @@ static const struct of_device_id phy_berlin_sata_of_match[] = {
        { .compatible = "marvell,berlin2q-sata-phy" },
        { },
 };
+MODULE_DEVICE_TABLE(of, phy_berlin_sata_of_match);
 
 static struct platform_driver phy_berlin_sata_driver = {
        .probe  = phy_berlin_sata_probe,
index 49a1ed0cef56fe7cbf9aed102b47149415f021f0..107cb57c3513c22642bb14420f47c469a39dcfa2 100644 (file)
@@ -432,6 +432,7 @@ out_disable_src:
 out:
        return ret;
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk);
 
 static
 int ufs_qcom_phy_disable_vreg(struct phy *phy,
@@ -474,6 +475,7 @@ void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
                phy->is_ref_clk_enabled = false;
        }
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk);
 
 #define UFS_REF_CLK_EN (1 << 5)
 
@@ -517,11 +519,13 @@ void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy)
 {
        ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true);
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk);
 
 void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
 {
        ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false);
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk);
 
 /* Turn ON M-PHY RMMI interface clocks */
 int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
@@ -550,6 +554,7 @@ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
 out:
        return ret;
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk);
 
 /* Turn OFF M-PHY RMMI interface clocks */
 void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
@@ -562,6 +567,7 @@ void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
                phy->is_iface_clk_enabled = false;
        }
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk);
 
 int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
 {
@@ -578,6 +584,7 @@ int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes);
 
 int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
 {
@@ -595,6 +602,7 @@ int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable);
 
 void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
                                          u8 major, u16 minor, u16 step)
@@ -605,6 +613,7 @@ void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
        ufs_qcom_phy->host_ctrl_rev_minor = minor;
        ufs_qcom_phy->host_ctrl_rev_step = step;
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version);
 
 int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
 {
@@ -625,6 +634,7 @@ int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
 
 int ufs_qcom_phy_remove(struct phy *generic_phy,
                        struct ufs_qcom_phy *ufs_qcom_phy)
@@ -662,6 +672,7 @@ int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
        return ufs_qcom_phy->phy_spec_ops->
                        is_physical_coding_sublayer_ready(ufs_qcom_phy);
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready);
 
 int ufs_qcom_phy_power_on(struct phy *generic_phy)
 {
index 5a5c073e72fe1ee6115bea310890b646b6b40a69..91d6f342c56596fc2e3fcff18213dba004546c3e 100644 (file)
@@ -98,6 +98,7 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
        struct device_node *child;
        struct regmap *grf;
        unsigned int reg_offset;
+       int err;
 
        grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
        if (IS_ERR(grf)) {
@@ -129,6 +130,11 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
                        return PTR_ERR(rk_phy->phy);
                }
                phy_set_drvdata(rk_phy->phy, rk_phy);
+
+               /* only power up usb phy when it use, so disable it when init*/
+               err = rockchip_usb_phy_power(rk_phy, 1);
+               if (err)
+                       return err;
        }
 
        phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
index faf635654312a75168ad7237ecf1995d317e396d..293ed4381cc0e08c1bb195bbb9f535fba66075ed 100644 (file)
@@ -26,7 +26,8 @@
 #include "pinctrl-imx.h"
 
 enum imx25_pads {
-       MX25_PAD_RESERVE0 = 1,
+       MX25_PAD_RESERVE0 = 0,
+       MX25_PAD_RESERVE1 = 1,
        MX25_PAD_A10 = 2,
        MX25_PAD_A13 = 3,
        MX25_PAD_A14 = 4,
@@ -169,6 +170,7 @@ enum imx25_pads {
 /* Pad names for the pinmux subsystem */
 static const struct pinctrl_pin_desc imx25_pinctrl_pads[] = {
        IMX_PINCTRL_PIN(MX25_PAD_RESERVE0),
+       IMX_PINCTRL_PIN(MX25_PAD_RESERVE1),
        IMX_PINCTRL_PIN(MX25_PAD_A10),
        IMX_PINCTRL_PIN(MX25_PAD_A13),
        IMX_PINCTRL_PIN(MX25_PAD_A14),
index 63676617bc5997218729a56c15c8597f8c2499b9..f9a3f8f446f76afe28177d5f9dcb8b8376b0310b 100644 (file)
@@ -653,7 +653,7 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
                  SUNXI_FUNCTION(0x2, "spi1"),          /* CS1 */
-                 SUNXI_FUNCTION(0x3, "uart3"),         /* PWM1 */
+                 SUNXI_FUNCTION(0x3, "pwm"),           /* PWM1 */
                  SUNXI_FUNCTION(0x5, "uart2"),         /* CTS */
                  SUNXI_FUNCTION_IRQ(0x6, 13)),         /* EINT13 */
 };
index 7e9dae54fcb22e1df91d8a9fc6e01556de2e14f1..2df8bbecebfc4c5742e5652386186ce67a7aba7e 100644 (file)
 #define DRIVER_NAME "ph1-sld8-pinctrl"
 
 static const struct pinctrl_pin_desc ph1_sld8_pins[] = {
-       UNIPHIER_PINCTRL_PIN(0, "PCA00", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(0, "PCA00", 0,
                             15, UNIPHIER_PIN_DRV_4_8,
                             15, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(1, "PCA01", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(1, "PCA01", 0,
                             16, UNIPHIER_PIN_DRV_4_8,
                             16, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(2, "PCA02", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(2, "PCA02", 0,
                             17, UNIPHIER_PIN_DRV_4_8,
                             17, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(3, "PCA03", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(3, "PCA03", 0,
                             18, UNIPHIER_PIN_DRV_4_8,
                             18, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(4, "PCA04", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(4, "PCA04", 0,
                             19, UNIPHIER_PIN_DRV_4_8,
                             19, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(5, "PCA05", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(5, "PCA05", 0,
                             20, UNIPHIER_PIN_DRV_4_8,
                             20, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(6, "PCA06", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(6, "PCA06", 0,
                             21, UNIPHIER_PIN_DRV_4_8,
                             21, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(7, "PCA07", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(7, "PCA07", 0,
                             22, UNIPHIER_PIN_DRV_4_8,
                             22, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(8, "PCA08", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(8, "PCA08", 0,
                             23, UNIPHIER_PIN_DRV_4_8,
                             23, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(9, "PCA09", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(9, "PCA09", 0,
                             24, UNIPHIER_PIN_DRV_4_8,
                             24, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(10, "PCA10", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(10, "PCA10", 0,
                             25, UNIPHIER_PIN_DRV_4_8,
                             25, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(11, "PCA11", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(11, "PCA11", 0,
                             26, UNIPHIER_PIN_DRV_4_8,
                             26, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(12, "PCA12", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(12, "PCA12", 0,
                             27, UNIPHIER_PIN_DRV_4_8,
                             27, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(13, "PCA13", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(13, "PCA13", 0,
                             28, UNIPHIER_PIN_DRV_4_8,
                             28, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(14, "PCA14", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(14, "PCA14", 0,
                             29, UNIPHIER_PIN_DRV_4_8,
                             29, UNIPHIER_PIN_PULL_DOWN),
        UNIPHIER_PINCTRL_PIN(15, "XNFRE_GB", UNIPHIER_PIN_IECTRL_NONE,
@@ -118,199 +118,199 @@ static const struct pinctrl_pin_desc ph1_sld8_pins[] = {
        UNIPHIER_PINCTRL_PIN(31, "NFD7_GB", UNIPHIER_PIN_IECTRL_NONE,
                             36, UNIPHIER_PIN_DRV_8_12_16_20,
                             128, UNIPHIER_PIN_PULL_UP),
-       UNIPHIER_PINCTRL_PIN(32, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(32, "SDCLK", 8,
                             40, UNIPHIER_PIN_DRV_8_12_16_20,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(33, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(33, "SDCMD", 8,
                             44, UNIPHIER_PIN_DRV_8_12_16_20,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(34, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(34, "SDDAT0", 8,
                             48, UNIPHIER_PIN_DRV_8_12_16_20,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(35, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(35, "SDDAT1", 8,
                             52, UNIPHIER_PIN_DRV_8_12_16_20,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(36, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(36, "SDDAT2", 8,
                             56, UNIPHIER_PIN_DRV_8_12_16_20,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(37, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(37, "SDDAT3", 8,
                             60, UNIPHIER_PIN_DRV_8_12_16_20,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(38, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(38, "SDCD", 8,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             129, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(39, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(39, "SDWP", 8,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             130, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(40, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(40, "SDVOLC", 9,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             131, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", 0,
                             37, UNIPHIER_PIN_DRV_4_8,
                             37, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(42, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(42, "USB0OD", 0,
                             38, UNIPHIER_PIN_DRV_4_8,
                             38, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", 0,
                             39, UNIPHIER_PIN_DRV_4_8,
                             39, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(44, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(44, "USB1OD", 0,
                             40, UNIPHIER_PIN_DRV_4_8,
                             40, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(45, "PCRESET", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(45, "PCRESET", 0,
                             41, UNIPHIER_PIN_DRV_4_8,
                             41, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(46, "PCREG", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(46, "PCREG", 0,
                             42, UNIPHIER_PIN_DRV_4_8,
                             42, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(47, "PCCE2", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(47, "PCCE2", 0,
                             43, UNIPHIER_PIN_DRV_4_8,
                             43, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(48, "PCVS1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(48, "PCVS1", 0,
                             44, UNIPHIER_PIN_DRV_4_8,
                             44, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(49, "PCCD2", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(49, "PCCD2", 0,
                             45, UNIPHIER_PIN_DRV_4_8,
                             45, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(50, "PCCD1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(50, "PCCD1", 0,
                             46, UNIPHIER_PIN_DRV_4_8,
                             46, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(51, "PCREADY", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(51, "PCREADY", 0,
                             47, UNIPHIER_PIN_DRV_4_8,
                             47, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(52, "PCDOE", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(52, "PCDOE", 0,
                             48, UNIPHIER_PIN_DRV_4_8,
                             48, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(53, "PCCE1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(53, "PCCE1", 0,
                             49, UNIPHIER_PIN_DRV_4_8,
                             49, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(54, "PCWE", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(54, "PCWE", 0,
                             50, UNIPHIER_PIN_DRV_4_8,
                             50, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(55, "PCOE", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(55, "PCOE", 0,
                             51, UNIPHIER_PIN_DRV_4_8,
                             51, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(56, "PCWAIT", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(56, "PCWAIT", 0,
                             52, UNIPHIER_PIN_DRV_4_8,
                             52, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(57, "PCIOWR", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(57, "PCIOWR", 0,
                             53, UNIPHIER_PIN_DRV_4_8,
                             53, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(58, "PCIORD", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(58, "PCIORD", 0,
                             54, UNIPHIER_PIN_DRV_4_8,
                             54, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", 0,
                             55, UNIPHIER_PIN_DRV_4_8,
                             55, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", 0,
                             56, UNIPHIER_PIN_DRV_4_8,
                             56, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", 0,
                             57, UNIPHIER_PIN_DRV_4_8,
                             57, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", 0,
                             58, UNIPHIER_PIN_DRV_4_8,
                             58, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", 0,
                             59, UNIPHIER_PIN_DRV_4_8,
                             59, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", 0,
                             60, UNIPHIER_PIN_DRV_4_8,
                             60, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", 0,
                             61, UNIPHIER_PIN_DRV_4_8,
                             61, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", 0,
                             62, UNIPHIER_PIN_DRV_4_8,
                             62, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", 0,
                             63, UNIPHIER_PIN_DRV_4_8,
                             63, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", 0,
                             64, UNIPHIER_PIN_DRV_4_8,
                             64, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", 0,
                             65, UNIPHIER_PIN_DRV_4_8,
                             65, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", 0,
                             66, UNIPHIER_PIN_DRV_4_8,
                             66, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", 0,
                             67, UNIPHIER_PIN_DRV_4_8,
                             67, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", 0,
                             68, UNIPHIER_PIN_DRV_4_8,
                             68, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", 0,
                             69, UNIPHIER_PIN_DRV_4_8,
                             69, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", 0,
                             70, UNIPHIER_PIN_DRV_4_8,
                             70, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", 0,
                             71, UNIPHIER_PIN_DRV_4_8,
                             71, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", 0,
                             72, UNIPHIER_PIN_DRV_4_8,
                             72, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", 0,
                             73, UNIPHIER_PIN_DRV_4_8,
                             73, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", 0,
                             74, UNIPHIER_PIN_DRV_4_8,
                             74, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", 0,
                             75, UNIPHIER_PIN_DRV_4_8,
                             75, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", 0,
                             76, UNIPHIER_PIN_DRV_4_8,
                             76, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", 0,
                             77, UNIPHIER_PIN_DRV_4_8,
                             77, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", 0,
                             78, UNIPHIER_PIN_DRV_4_8,
                             78, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", 0,
                             79, UNIPHIER_PIN_DRV_4_8,
                             79, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", 0,
                             80, UNIPHIER_PIN_DRV_4_8,
                             80, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", 0,
                             81, UNIPHIER_PIN_DRV_4_8,
                             81, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", 0,
                             82, UNIPHIER_PIN_DRV_4_8,
                             82, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", 0,
                             83, UNIPHIER_PIN_DRV_4_8,
                             83, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", 0,
                             84, UNIPHIER_PIN_DRV_4_8,
                             84, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", 0,
                             85, UNIPHIER_PIN_DRV_4_8,
                             85, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", 0,
                             86, UNIPHIER_PIN_DRV_4_8,
                             86, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", 0,
                             87, UNIPHIER_PIN_DRV_4_8,
                             87, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(92, "AGCI", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(92, "AGCI", 3,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             132, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(93, "AGCR", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(93, "AGCR", 4,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             133, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(94, "AGCBS", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(94, "AGCBS", 5,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             134, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(95, "IECOUT", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(95, "IECOUT", 0,
                             88, UNIPHIER_PIN_DRV_4_8,
                             88, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(96, "ASMCK", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(96, "ASMCK", 0,
                             89, UNIPHIER_PIN_DRV_4_8,
                             89, UNIPHIER_PIN_PULL_DOWN),
        UNIPHIER_PINCTRL_PIN(97, "ABCKO", UNIPHIER_PIN_IECTRL_NONE,
@@ -325,31 +325,31 @@ static const struct pinctrl_pin_desc ph1_sld8_pins[] = {
        UNIPHIER_PINCTRL_PIN(100, "ASDOUT1", UNIPHIER_PIN_IECTRL_NONE,
                             93, UNIPHIER_PIN_DRV_4_8,
                             93, UNIPHIER_PIN_PULL_UP),
-       UNIPHIER_PINCTRL_PIN(101, "ARCOUT", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(101, "ARCOUT", 0,
                             94, UNIPHIER_PIN_DRV_4_8,
                             94, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(102, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(102, "SDA0", 10,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(103, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(103, "SCL0", 10,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(104, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(104, "SDA1", 11,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(105, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(105, "SCL1", 11,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", 12,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", 12,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", 13,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", 13,
                             -1, UNIPHIER_PIN_DRV_FIXED_4,
                             -1, UNIPHIER_PIN_PULL_NONE),
        UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE,
@@ -358,76 +358,76 @@ static const struct pinctrl_pin_desc ph1_sld8_pins[] = {
        UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE,
                             96, UNIPHIER_PIN_DRV_4_8,
                             96, UNIPHIER_PIN_PULL_UP),
-       UNIPHIER_PINCTRL_PIN(112, "SBO1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(112, "SBO1", 0,
                             97, UNIPHIER_PIN_DRV_4_8,
                             97, UNIPHIER_PIN_PULL_UP),
-       UNIPHIER_PINCTRL_PIN(113, "SBI1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(113, "SBI1", 0,
                             98, UNIPHIER_PIN_DRV_4_8,
                             98, UNIPHIER_PIN_PULL_UP),
-       UNIPHIER_PINCTRL_PIN(114, "TXD1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(114, "TXD1", 0,
                             99, UNIPHIER_PIN_DRV_4_8,
                             99, UNIPHIER_PIN_PULL_UP),
-       UNIPHIER_PINCTRL_PIN(115, "RXD1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(115, "RXD1", 0,
                             100, UNIPHIER_PIN_DRV_4_8,
                             100, UNIPHIER_PIN_PULL_UP),
-       UNIPHIER_PINCTRL_PIN(116, "HIN", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(116, "HIN", 1,
                             -1, UNIPHIER_PIN_DRV_FIXED_5,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(117, "VIN", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(117, "VIN", 2,
                             -1, UNIPHIER_PIN_DRV_FIXED_5,
                             -1, UNIPHIER_PIN_PULL_NONE),
-       UNIPHIER_PINCTRL_PIN(118, "TCON0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(118, "TCON0", 0,
                             101, UNIPHIER_PIN_DRV_4_8,
                             101, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(119, "TCON1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(119, "TCON1", 0,
                             102, UNIPHIER_PIN_DRV_4_8,
                             102, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(120, "TCON2", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(120, "TCON2", 0,
                             103, UNIPHIER_PIN_DRV_4_8,
                             103, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(121, "TCON3", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(121, "TCON3", 0,
                             104, UNIPHIER_PIN_DRV_4_8,
                             104, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(122, "TCON4", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(122, "TCON4", 0,
                             105, UNIPHIER_PIN_DRV_4_8,
                             105, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(123, "TCON5", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(123, "TCON5", 0,
                             106, UNIPHIER_PIN_DRV_4_8,
                             106, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(124, "TCON6", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(124, "TCON6", 0,
                             107, UNIPHIER_PIN_DRV_4_8,
                             107, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(125, "TCON7", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(125, "TCON7", 0,
                             108, UNIPHIER_PIN_DRV_4_8,
                             108, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(126, "TCON8", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(126, "TCON8", 0,
                             109, UNIPHIER_PIN_DRV_4_8,
                             109, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(127, "PWMA", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(127, "PWMA", 0,
                             110, UNIPHIER_PIN_DRV_4_8,
                             110, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(128, "XIRQ0", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(128, "XIRQ0", 0,
                             111, UNIPHIER_PIN_DRV_4_8,
                             111, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(129, "XIRQ1", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(129, "XIRQ1", 0,
                             112, UNIPHIER_PIN_DRV_4_8,
                             112, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(130, "XIRQ2", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(130, "XIRQ2", 0,
                             113, UNIPHIER_PIN_DRV_4_8,
                             113, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(131, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(131, "XIRQ3", 0,
                             114, UNIPHIER_PIN_DRV_4_8,
                             114, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(132, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(132, "XIRQ4", 0,
                             115, UNIPHIER_PIN_DRV_4_8,
                             115, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(133, "XIRQ5", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(133, "XIRQ5", 0,
                             116, UNIPHIER_PIN_DRV_4_8,
                             116, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(134, "XIRQ6", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(134, "XIRQ6", 0,
                             117, UNIPHIER_PIN_DRV_4_8,
                             117, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(135, "XIRQ7", UNIPHIER_PIN_IECTRL_NONE,
+       UNIPHIER_PINCTRL_PIN(135, "XIRQ7", 0,
                             118, UNIPHIER_PIN_DRV_4_8,
                             118, UNIPHIER_PIN_PULL_DOWN),
 };
index 01bf3476a79183714f62f67efcf5d8b17b70d497..a9567af7cec02c5a13102be118010e7bb7b1c888 100644 (file)
@@ -192,9 +192,9 @@ static const struct regulator_desc axp22x_regulators[] = {
        AXP_DESC(AXP22X, DCDC3, "dcdc3", "vin3", 600, 1860, 20,
                 AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)),
        AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20,
-                AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)),
+                AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(4)),
        AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
-                AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(4)),
+                AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)),
        /* secondary switchable output of DCDC1 */
        AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", "dcdc1", 1600, 3400, 100,
                    AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)),
index 7849187d91aea909fdd9d0ce5bbabb35fc2e5736..8a34f6acc801531ce8eb16882fed2b04ed4c874c 100644 (file)
@@ -1403,6 +1403,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
                        return 0;
                }
 
+               /* Did the lookup explicitly defer for us? */
+               if (ret == -EPROBE_DEFER)
+                       return ret;
+
                if (have_full_constraints()) {
                        r = dummy_regulator_rdev;
                } else {
index add419d6ff34996ed4aab8a145aee637ee987dbf..a56a7b243e91fae96b05cae0118d96e9d284dd7b 100644 (file)
@@ -212,6 +212,17 @@ static const struct file_operations twa_fops = {
        .llseek         = noop_llseek,
 };
 
+/*
+ * The controllers use an inline buffer instead of a mapped SGL for small,
+ * single entry buffers.  Note that we treat a zero-length transfer like
+ * a mapped SGL.
+ */
+static bool twa_command_mapped(struct scsi_cmnd *cmd)
+{
+       return scsi_sg_count(cmd) != 1 ||
+               scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
+}
+
 /* This function will complete an aen request from the isr */
 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
 {
@@ -1339,7 +1350,8 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
                                }
 
                                /* Now complete the io */
-                               scsi_dma_unmap(cmd);
+                               if (twa_command_mapped(cmd))
+                                       scsi_dma_unmap(cmd);
                                cmd->scsi_done(cmd);
                                tw_dev->state[request_id] = TW_S_COMPLETED;
                                twa_free_request_id(tw_dev, request_id);
@@ -1582,7 +1594,8 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
                                struct scsi_cmnd *cmd = tw_dev->srb[i];
 
                                cmd->result = (DID_RESET << 16);
-                               scsi_dma_unmap(cmd);
+                               if (twa_command_mapped(cmd))
+                                       scsi_dma_unmap(cmd);
                                cmd->scsi_done(cmd);
                        }
                }
@@ -1765,12 +1778,14 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
        retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
        switch (retval) {
        case SCSI_MLQUEUE_HOST_BUSY:
-               scsi_dma_unmap(SCpnt);
+               if (twa_command_mapped(SCpnt))
+                       scsi_dma_unmap(SCpnt);
                twa_free_request_id(tw_dev, request_id);
                break;
        case 1:
                SCpnt->result = (DID_ERROR << 16);
-               scsi_dma_unmap(SCpnt);
+               if (twa_command_mapped(SCpnt))
+                       scsi_dma_unmap(SCpnt);
                done(SCpnt);
                tw_dev->state[request_id] = TW_S_COMPLETED;
                twa_free_request_id(tw_dev, request_id);
@@ -1831,8 +1846,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
                /* Map sglist from scsi layer to cmd packet */
 
                if (scsi_sg_count(srb)) {
-                       if ((scsi_sg_count(srb) == 1) &&
-                           (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
+                       if (!twa_command_mapped(srb)) {
                                if (srb->sc_data_direction == DMA_TO_DEVICE ||
                                    srb->sc_data_direction == DMA_BIDIRECTIONAL)
                                        scsi_sg_copy_to_buffer(srb,
@@ -1905,7 +1919,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
 {
        struct scsi_cmnd *cmd = tw_dev->srb[request_id];
 
-       if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
+       if (!twa_command_mapped(cmd) &&
            (cmd->sc_data_direction == DMA_FROM_DEVICE ||
             cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
                if (scsi_sg_count(cmd) == 1) {
index de6feb8964c912e8f844010f9e6af32760481826..804806e1cbb4be79d3223a2d5684de4c6ff58afd 100644 (file)
@@ -160,7 +160,7 @@ static struct scsi_transport_template *cxgb4i_stt;
 
 #define DIV_ROUND_UP(n, d)     (((n) + (d) - 1) / (d))
 #define RCV_BUFSIZ_MASK                0x3FFU
-#define MAX_IMM_TX_PKT_LEN     128
+#define MAX_IMM_TX_PKT_LEN     256
 
 static int push_tx_frames(struct cxgbi_sock *, int);
 
index 33c74d3436c947a7f11ca22498206f6efa97fcc2..6bffd91b973a475d614500a077be0034dbf6786f 100644 (file)
@@ -976,13 +976,13 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
        wake_up(&conn->ehwait);
 }
 
-static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
 {
         struct iscsi_nopout hdr;
        struct iscsi_task *task;
 
        if (!rhdr && conn->ping_task)
-               return;
+               return -EINVAL;
 
        memset(&hdr, 0, sizeof(struct iscsi_nopout));
        hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
@@ -996,13 +996,16 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
                hdr.ttt = RESERVED_ITT;
 
        task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
-       if (!task)
+       if (!task) {
                iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
-       else if (!rhdr) {
+               return -EIO;
+       } else if (!rhdr) {
                /* only track our nops */
                conn->ping_task = task;
                conn->last_ping = jiffies;
        }
+
+       return 0;
 }
 
 static int iscsi_nop_out_rsp(struct iscsi_task *task,
@@ -2092,8 +2095,10 @@ static void iscsi_check_transport_timeouts(unsigned long data)
        if (time_before_eq(last_recv + recv_timeout, jiffies)) {
                /* send a ping to try to provoke some traffic */
                ISCSI_DBG_CONN(conn, "Sending nopout as ping\n");
-               iscsi_send_nopout(conn, NULL);
-               next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
+               if (iscsi_send_nopout(conn, NULL))
+                       next_timeout = jiffies + (1 * HZ);
+               else
+                       next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
        } else
                next_timeout = last_recv + recv_timeout;
 
index edb044a7b56d348a269634212155edce3a89f9b8..0a2168e69bbcd31c91a995dae5439a1485cc0a7d 100644 (file)
@@ -111,7 +111,7 @@ static struct scsi_device_handler *scsi_dh_lookup(const char *name)
 
        dh = __scsi_dh_lookup(name);
        if (!dh) {
-               request_module(name);
+               request_module("scsi_dh_%s", name);
                dh = __scsi_dh_lookup(name);
        }
 
index cbfc5990052b6b2733ae1c8a81467d3a0e9e70f4..126a48c6431e5a5d9798aed3472916b06ef476c8 100644 (file)
@@ -1957,7 +1957,7 @@ static int scsi_mq_prep_fn(struct request *req)
 static void scsi_mq_done(struct scsi_cmnd *cmd)
 {
        trace_scsi_dispatch_cmd_done(cmd);
-       blk_mq_complete_request(cmd->request);
+       blk_mq_complete_request(cmd->request, cmd->request->errors);
 }
 
 static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
index 3cf9faa6cc3fe871174ec1b2777472b0ac4c6883..a85d863d4a442f2f30633db5de0ff469ee9c6348 100644 (file)
@@ -992,11 +992,12 @@ static int davinci_spi_probe(struct platform_device *pdev)
                goto free_master;
        }
 
-       dspi->irq = platform_get_irq(pdev, 0);
-       if (dspi->irq <= 0) {
+       ret = platform_get_irq(pdev, 0);
+       if (ret == 0)
                ret = -EINVAL;
+       if (ret < 0)
                goto free_master;
-       }
+       dspi->irq = ret;
 
        ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq,
                                dummy_thread_fn, 0, dev_name(&pdev->dev), dspi);
index 769b61193d87ef29c7868465c50e9b8ab87ee045..a9bc6e23fc2582f39c5a753638979fba15451e61 100644 (file)
@@ -224,7 +224,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
 
                prefetchw(&page->flags);
                ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
-                                           GFP_KERNEL);
+                                           GFP_NOFS);
                if (ret == 0) {
                        unlock_page(page);
                } else {
index 4299cf45f947ded9433fa045c1cb54bc957a02c4..5e1f16c36b49adfd45dbd2221435fd9bcda57daa 100644 (file)
@@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void)
        __this_cpu_write(reporting_keystroke, true);
        input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
        input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
+       input_sync(virt_keyboard);
        __this_cpu_write(reporting_keystroke, false);
 
        /* reenable preemption */
index 20932cc9c8f71681038bf5e505fec87d9c402280..b09023b071696c2a5d25e003dcff798b42235602 100644 (file)
@@ -343,8 +343,7 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty)
                spin_lock_irqsave(&tty->ctrl_lock, flags);
                tty->ctrl_status |= TIOCPKT_FLUSHREAD;
                spin_unlock_irqrestore(&tty->ctrl_lock, flags);
-               if (waitqueue_active(&tty->link->read_wait))
-                       wake_up_interruptible(&tty->link->read_wait);
+               wake_up_interruptible(&tty->link->read_wait);
        }
 }
 
@@ -1382,8 +1381,7 @@ handle_newline:
                        put_tty_queue(c, ldata);
                        smp_store_release(&ldata->canon_head, ldata->read_head);
                        kill_fasync(&tty->fasync, SIGIO, POLL_IN);
-                       if (waitqueue_active(&tty->read_wait))
-                               wake_up_interruptible_poll(&tty->read_wait, POLLIN);
+                       wake_up_interruptible_poll(&tty->read_wait, POLLIN);
                        return 0;
                }
        }
@@ -1667,8 +1665,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
 
        if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) {
                kill_fasync(&tty->fasync, SIGIO, POLL_IN);
-               if (waitqueue_active(&tty->read_wait))
-                       wake_up_interruptible_poll(&tty->read_wait, POLLIN);
+               wake_up_interruptible_poll(&tty->read_wait, POLLIN);
        }
 }
 
@@ -1887,10 +1884,8 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
        }
 
        /* The termios change make the tty ready for I/O */
-       if (waitqueue_active(&tty->write_wait))
-               wake_up_interruptible(&tty->write_wait);
-       if (waitqueue_active(&tty->read_wait))
-               wake_up_interruptible(&tty->read_wait);
+       wake_up_interruptible(&tty->write_wait);
+       wake_up_interruptible(&tty->read_wait);
 }
 
 /**
index b1e0ba3e525b069d9649dff9d7cd4a661f2c2014..0bbf34035d6a51edb267d2f53c66fc13d7b54260 100644 (file)
@@ -261,6 +261,14 @@ configured less than Maximum supported fifo bytes */
                                  UART_FCR7_64BYTE,
                .flags          = UART_CAP_FIFO,
        },
+       [PORT_RT2880] = {
+               .name           = "Palmchip BK-3103",
+               .fifo_size      = 16,
+               .tx_loadsz      = 16,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .rxtrig_bytes   = {1, 4, 8, 14},
+               .flags          = UART_CAP_FIFO,
+       },
 };
 
 /* Uart divisor latch read */
index 5ca5cf3e9359cf17f9a3aaebbff028ecada3a910..538ea03bc101a2994324d2ce33b8f7b237c12c78 100644 (file)
@@ -2786,7 +2786,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
        ret = atmel_init_gpios(port, &pdev->dev);
        if (ret < 0) {
                dev_err(&pdev->dev, "Failed to initialize GPIOs.");
-               goto err;
+               goto err_clear_bit;
        }
 
        ret = atmel_init_port(port, pdev);
index fe3d41cc841632134fd907b1fb7af08f0e9d6e81..d0388a071ba1d474025a74fec8cfb80f5a1ed4a0 100644 (file)
@@ -1631,12 +1631,12 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
        int locked = 1;
        int retval;
 
-       retval = clk_prepare_enable(sport->clk_per);
+       retval = clk_enable(sport->clk_per);
        if (retval)
                return;
-       retval = clk_prepare_enable(sport->clk_ipg);
+       retval = clk_enable(sport->clk_ipg);
        if (retval) {
-               clk_disable_unprepare(sport->clk_per);
+               clk_disable(sport->clk_per);
                return;
        }
 
@@ -1675,8 +1675,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
        if (locked)
                spin_unlock_irqrestore(&sport->port.lock, flags);
 
-       clk_disable_unprepare(sport->clk_ipg);
-       clk_disable_unprepare(sport->clk_per);
+       clk_disable(sport->clk_ipg);
+       clk_disable(sport->clk_per);
 }
 
 /*
@@ -1777,7 +1777,15 @@ imx_console_setup(struct console *co, char *options)
 
        retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
 
-       clk_disable_unprepare(sport->clk_ipg);
+       clk_disable(sport->clk_ipg);
+       if (retval) {
+               clk_unprepare(sport->clk_ipg);
+               goto error_console;
+       }
+
+       retval = clk_prepare(sport->clk_per);
+       if (retval)
+               clk_disable_unprepare(sport->clk_ipg);
 
 error_console:
        return retval;
index 5a3fa89138801ea63907ec102fbb589b36d7201c..a660ab181cca7357c59c7256303628eb8bb929a9 100644 (file)
@@ -242,7 +242,10 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
        atomic_inc(&buf->priority);
 
        mutex_lock(&buf->lock);
-       while ((next = buf->head->next) != NULL) {
+       /* paired w/ release in __tty_buffer_request_room; ensures there are
+        * no pending memory accesses to the freed buffer
+        */
+       while ((next = smp_load_acquire(&buf->head->next)) != NULL) {
                tty_buffer_free(port, buf->head);
                buf->head = next;
        }
@@ -290,7 +293,10 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size,
                if (n != NULL) {
                        n->flags = flags;
                        buf->tail = n;
-                       b->commit = b->used;
+                       /* paired w/ acquire in flush_to_ldisc(); ensures
+                        * flush_to_ldisc() sees buffer data.
+                        */
+                       smp_store_release(&b->commit, b->used);
                        /* paired w/ acquire in flush_to_ldisc(); ensures the
                         * latest commit value can be read before the head is
                         * advanced to the next buffer
@@ -393,7 +399,10 @@ void tty_schedule_flip(struct tty_port *port)
 {
        struct tty_bufhead *buf = &port->buf;
 
-       buf->tail->commit = buf->tail->used;
+       /* paired w/ acquire in flush_to_ldisc(); ensures
+        * flush_to_ldisc() sees buffer data.
+        */
+       smp_store_release(&buf->tail->commit, buf->tail->used);
        schedule_work(&buf->work);
 }
 EXPORT_SYMBOL(tty_schedule_flip);
@@ -467,7 +476,7 @@ static void flush_to_ldisc(struct work_struct *work)
        struct tty_struct *tty;
        struct tty_ldisc *disc;
 
-       tty = port->itty;
+       tty = READ_ONCE(port->itty);
        if (tty == NULL)
                return;
 
@@ -491,7 +500,10 @@ static void flush_to_ldisc(struct work_struct *work)
                 * is advancing to the next buffer
                 */
                next = smp_load_acquire(&head->next);
-               count = head->commit - head->read;
+               /* paired w/ release in __tty_buffer_request_room() or in
+                * tty_buffer_flush(); ensures we see the committed buffer data
+                */
+               count = smp_load_acquire(&head->commit) - head->read;
                if (!count) {
                        if (next == NULL) {
                                check_other_closed(tty);
index 02785d844354be01b9774ad10e70ab398297c6da..2eefaa6e3e3a4af9a5ab2b03cf03f9e75a04ca1d 100644 (file)
@@ -2128,8 +2128,24 @@ retry_open:
        if (!noctty &&
            current->signal->leader &&
            !current->signal->tty &&
-           tty->session == NULL)
-               __proc_set_tty(tty);
+           tty->session == NULL) {
+               /*
+                * Don't let a process that only has write access to the tty
+                * obtain the privileges associated with having a tty as
+                * controlling terminal (being able to reopen it with full
+                * access through /dev/tty, being able to perform pushback).
+                * Many distributions set the group of all ttys to "tty" and
+                * grant write-only access to all terminals for setgid tty
+                * binaries, which should not imply full privileges on all ttys.
+                *
+                * This could theoretically break old code that performs open()
+                * on a write-only file descriptor. In that case, it might be
+                * necessary to also permit this if
+                * inode_permission(inode, MAY_READ) == 0.
+                */
+               if (filp->f_mode & FMODE_READ)
+                       __proc_set_tty(tty);
+       }
        spin_unlock_irq(&current->sighand->siglock);
        read_unlock(&tasklist_lock);
        tty_unlock(tty);
@@ -2418,7 +2434,7 @@ static int fionbio(struct file *file, int __user *p)
  *             Takes ->siglock() when updating signal->tty
  */
 
-static int tiocsctty(struct tty_struct *tty, int arg)
+static int tiocsctty(struct tty_struct *tty, struct file *file, int arg)
 {
        int ret = 0;
 
@@ -2452,6 +2468,13 @@ static int tiocsctty(struct tty_struct *tty, int arg)
                        goto unlock;
                }
        }
+
+       /* See the comment in tty_open(). */
+       if ((file->f_mode & FMODE_READ) == 0 && !capable(CAP_SYS_ADMIN)) {
+               ret = -EPERM;
+               goto unlock;
+       }
+
        proc_set_tty(tty);
 unlock:
        read_unlock(&tasklist_lock);
@@ -2844,7 +2867,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                no_tty();
                return 0;
        case TIOCSCTTY:
-               return tiocsctty(tty, arg);
+               return tiocsctty(tty, file, arg);
        case TIOCGPGRP:
                return tiocgpgrp(tty, real_tty, p);
        case TIOCSPGRP:
@@ -3151,13 +3174,18 @@ struct class *tty_class;
 static int tty_cdev_add(struct tty_driver *driver, dev_t dev,
                unsigned int index, unsigned int count)
 {
+       int err;
+
        /* init here, since reused cdevs cause crashes */
        driver->cdevs[index] = cdev_alloc();
        if (!driver->cdevs[index])
                return -ENOMEM;
-       cdev_init(driver->cdevs[index], &tty_fops);
+       driver->cdevs[index]->ops = &tty_fops;
        driver->cdevs[index]->owner = driver->owner;
-       return cdev_add(driver->cdevs[index], dev, count);
+       err = cdev_add(driver->cdevs[index], dev, count);
+       if (err)
+               kobject_put(&driver->cdevs[index]->kobj);
+       return err;
 }
 
 /**
index d85abfed84ccaa2327820f1b35cabac11422d647..f5a381945db2886a77e23a8fcf40ba9a34bb7fe7 100644 (file)
@@ -54,6 +54,13 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
        { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
 
+       /* Logitech ConferenceCam CC3000e */
+       { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
+       { USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT },
+
+       /* Logitech PTZ Pro Camera */
+       { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
+
        /* Logitech Quickcam Fusion */
        { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -78,6 +85,12 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Philips PSC805 audio device */
        { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Plantronic Audio 655 DSP */
+       { USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Plantronic Audio 648 USB */
+       { USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Artisman Watchdog Dongle */
        { USB_DEVICE(0x04b4, 0x0526), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
index d1b81539d6320b3baed7c5bc9bc4cdde7440aee9..d6199507f86140b15439463f97f234aa7955d6fe 100644 (file)
@@ -159,8 +159,10 @@ static int ep_bd_list_alloc(struct bdc_ep *ep)
                bd_table->start_bd = dma_pool_alloc(bdc->bd_table_pool,
                                                        GFP_ATOMIC,
                                                        &dma);
-               if (!bd_table->start_bd)
+               if (!bd_table->start_bd) {
+                       kfree(bd_table);
                        goto fail;
+               }
 
                bd_table->dma = dma;
 
index 3ad5d19e4d04ede93fb8bc34debdbb9fcf2f4704..23c794813e6a923bff5b0a3719abbea860ee416d 100644 (file)
@@ -472,7 +472,7 @@ static int chaoskey_rng_read(struct hwrng *rng, void *data,
        if (this_time > max)
                this_time = max;
 
-       memcpy(data, dev->buf, this_time);
+       memcpy(data, dev->buf + dev->used, this_time);
 
        dev->used += this_time;
 
index 7b98e1d9194cb3571452c7143603f4e9d9966244..d82fa36c346503985867cc55b0e40dfd724ebf12 100644 (file)
@@ -476,6 +476,11 @@ static const struct of_device_id usbhs_of_match[] = {
                .compatible = "renesas,usbhs-r8a7794",
                .data = (void *)USBHS_TYPE_RCAR_GEN2,
        },
+       {
+               /* Gen3 is compatible with Gen2 */
+               .compatible = "renesas,usbhs-r8a7795",
+               .data = (void *)USBHS_TYPE_RCAR_GEN2,
+       },
        { },
 };
 MODULE_DEVICE_TABLE(of, usbhs_of_match);
@@ -493,7 +498,7 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev)
                return NULL;
 
        dparam = &info->driver_param;
-       dparam->type = of_id ? (u32)of_id->data : 0;
+       dparam->type = of_id ? (uintptr_t)of_id->data : 0;
        if (!of_property_read_u32(dev->of_node, "renesas,buswait", &tmp))
                dparam->buswait_bwait = tmp;
        gpio = of_get_named_gpio_flags(dev->of_node, "renesas,enable-gpio", 0,
index 0e5fde1d3ffbe5a152035f33063afa98bf84f33e..9f9a7bef1ff6d46d80fe8cb6dcfeea5a3e26729d 100644 (file)
@@ -752,7 +752,7 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev,
        if ((fw_entry->size < 8*1024) || (fw_entry->size > 64*1024)) {
                dev_err(dev, "Invalid waveform\n");
                err = -EINVAL;
-               goto err_failed;
+               goto err_fw;
        }
 
        mutex_lock(&(par->io_lock));
@@ -762,13 +762,15 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev,
        mutex_unlock(&(par->io_lock));
        if (err < 0) {
                dev_err(dev, "Failed to store broadsheet waveform\n");
-               goto err_failed;
+               goto err_fw;
        }
 
        dev_info(dev, "Stored broadsheet waveform, size %zd\n", fw_entry->size);
 
-       return len;
+       err = len;
 
+err_fw:
+       release_firmware(fw_entry);
 err_failed:
        return err;
 }
index 7fa2e6f9e322d1e2223116474800b515684abfc2..b335c1ae8625106efff818d696ebad532ade7f17 100644 (file)
@@ -1628,9 +1628,16 @@ static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state)
 static int fsl_diu_resume(struct platform_device *ofdev)
 {
        struct fsl_diu_data *data;
+       unsigned int i;
 
        data = dev_get_drvdata(&ofdev->dev);
-       enable_lcdc(data->fsl_diu_info);
+
+       fsl_diu_enable_interrupts(data);
+       update_lcdc(data->fsl_diu_info);
+       for (i = 0; i < NUM_AOIS; i++) {
+               if (data->mfb[i].count)
+                       fsl_diu_enable_panel(&data->fsl_diu_info[i]);
+       }
 
        return 0;
 }
index 9b8bebdf8f86e1209f0ca2f6f9779e8c64fa2e43..f9ec5c0484fabbd8d6f2cc5b5e5897c003e07b10 100644 (file)
@@ -831,6 +831,7 @@ static struct of_device_id of_platform_mb862xx_tbl[] = {
        { .compatible = "fujitsu,coral", },
        { /* end */ }
 };
+MODULE_DEVICE_TABLE(of, of_platform_mb862xx_tbl);
 
 static struct platform_driver of_platform_mb862xxfb_driver = {
        .driver = {
index a8ce920fa797d335d2dbfbbc1c9d8f93a4378959..d811e6dcaef727588cdc65695673a4f7144f0f30 100644 (file)
@@ -294,7 +294,7 @@ static int dvic_probe_of(struct platform_device *pdev)
 
        adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
        if (adapter_node) {
-               adapter = of_find_i2c_adapter_by_node(adapter_node);
+               adapter = of_get_i2c_adapter_by_node(adapter_node);
                if (adapter == NULL) {
                        dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
                        omap_dss_put_device(ddata->in);
index 90cbc4c3406c719909f3495cb97533face292d3c..c581231c74a53bb837dcc24da190202ed56cb648 100644 (file)
@@ -898,6 +898,7 @@ static const struct of_device_id acx565akm_of_match[] = {
        { .compatible = "omapdss,sony,acx565akm", },
        {},
 };
+MODULE_DEVICE_TABLE(of, acx565akm_of_match);
 
 static struct spi_driver acx565akm_driver = {
        .driver = {
index 7ed9a227f5eaf006ed5c2a9759ee9db299d114e3..01b43e9ce941acb8751c0c2e8294e19db7ce927c 100644 (file)
@@ -226,7 +226,7 @@ static void blade_image_blit(struct tridentfb_par *par, const char *data,
        writemmr(par, DST1, point(x, y));
        writemmr(par, DST2, point(x + w - 1, y + h - 1));
 
-       memcpy(par->io_virt + 0x10000, data, 4 * size);
+       iowrite32_rep(par->io_virt + 0x10000, data, size);
 }
 
 static void blade_copy_rect(struct tridentfb_par *par,
@@ -673,8 +673,14 @@ static int get_nativex(struct tridentfb_par *par)
 static inline void set_lwidth(struct tridentfb_par *par, int width)
 {
        write3X4(par, VGA_CRTC_OFFSET, width & 0xFF);
-       write3X4(par, AddColReg,
-                (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4));
+       /* chips older than TGUI9660 have only 1 width bit in AddColReg */
+       /* touching the other one breaks I2C/DDC */
+       if (par->chip_id == TGUI9440 || par->chip_id == CYBER9320)
+               write3X4(par, AddColReg,
+                    (read3X4(par, AddColReg) & 0xEF) | ((width & 0x100) >> 4));
+       else
+               write3X4(par, AddColReg,
+                    (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4));
 }
 
 /* For resolutions smaller than FP resolution stretch */
index 32d8275e4c88485b2b522f56733e90ba614fc7b2..8a1076beecd33aa29891849f5feaa36b42027036 100644 (file)
@@ -210,6 +210,7 @@ struct display_timings *of_get_display_timings(struct device_node *np)
                         */
                        pr_err("%s: error in timing %d\n",
                                of_node_full_name(np), disp->num_timings + 1);
+                       kfree(dt);
                        goto timingfail;
                }
 
index ecbc63d3143e78d53a9ab0e3dd2091686e51d620..9a2ec79e8cfb6c4ad26a5578e39f62f8fa80226b 100644 (file)
@@ -1828,7 +1828,6 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
        int found = 0;
        struct extent_buffer *eb;
        struct btrfs_inode_extref *extref;
-       struct extent_buffer *leaf;
        u32 item_size;
        u32 cur_offset;
        unsigned long ptr;
@@ -1856,9 +1855,8 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
                btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
                btrfs_release_path(path);
 
-               leaf = path->nodes[0];
-               item_size = btrfs_item_size_nr(leaf, slot);
-               ptr = btrfs_item_ptr_offset(leaf, slot);
+               item_size = btrfs_item_size_nr(eb, slot);
+               ptr = btrfs_item_ptr_offset(eb, slot);
                cur_offset = 0;
 
                while (cur_offset < item_size) {
@@ -1872,7 +1870,7 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
                        if (ret)
                                break;
 
-                       cur_offset += btrfs_inode_extref_name_len(leaf, extref);
+                       cur_offset += btrfs_inode_extref_name_len(eb, extref);
                        cur_offset += sizeof(*extref);
                }
                btrfs_tree_read_unlock_blocking(eb);
index 295795aebe0b42330cc1147e02340eb2c59f1d7b..1e60d00d4ea7c42104614ede9e203a1f56e6408a 100644 (file)
@@ -2847,6 +2847,8 @@ int open_ctree(struct super_block *sb,
            !extent_buffer_uptodate(chunk_root->node)) {
                printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n",
                       sb->s_id);
+               if (!IS_ERR(chunk_root->node))
+                       free_extent_buffer(chunk_root->node);
                chunk_root->node = NULL;
                goto fail_tree_roots;
        }
@@ -2885,6 +2887,8 @@ retry_root_backup:
            !extent_buffer_uptodate(tree_root->node)) {
                printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
                       sb->s_id);
+               if (!IS_ERR(tree_root->node))
+                       free_extent_buffer(tree_root->node);
                tree_root->node = NULL;
                goto recovery_tree_root;
        }
index 8d052209f473be1d0959b6e65bded71b92c05584..2513a7f533342c827c5c5e1150de6a3d196879ac 100644 (file)
@@ -112,11 +112,11 @@ static struct dentry *btrfs_fh_to_parent(struct super_block *sb, struct fid *fh,
        u32 generation;
 
        if (fh_type == FILEID_BTRFS_WITH_PARENT) {
-               if (fh_len !=  BTRFS_FID_SIZE_CONNECTABLE)
+               if (fh_len <  BTRFS_FID_SIZE_CONNECTABLE)
                        return NULL;
                root_objectid = fid->root_objectid;
        } else if (fh_type == FILEID_BTRFS_WITH_PARENT_ROOT) {
-               if (fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT)
+               if (fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT)
                        return NULL;
                root_objectid = fid->parent_root_objectid;
        } else
@@ -136,11 +136,11 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
        u32 generation;
 
        if ((fh_type != FILEID_BTRFS_WITH_PARENT ||
-            fh_len != BTRFS_FID_SIZE_CONNECTABLE) &&
+            fh_len < BTRFS_FID_SIZE_CONNECTABLE) &&
            (fh_type != FILEID_BTRFS_WITH_PARENT_ROOT ||
-            fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) &&
+            fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT) &&
            (fh_type != FILEID_BTRFS_WITHOUT_PARENT ||
-            fh_len != BTRFS_FID_SIZE_NON_CONNECTABLE))
+            fh_len < BTRFS_FID_SIZE_NON_CONNECTABLE))
                return NULL;
 
        objectid = fid->objectid;
index 9f960420133307b5d9b26c7b07bd37d64bec89cf..601d7d45d164a7e91477748a900bbef8cf67d0b0 100644 (file)
@@ -2828,6 +2828,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
        struct btrfs_delayed_ref_head *head;
        int ret;
        int run_all = count == (unsigned long)-1;
+       bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
 
        /* We'll clean this up in btrfs_cleanup_transaction */
        if (trans->aborted)
@@ -2844,6 +2845,7 @@ again:
 #ifdef SCRAMBLE_DELAYED_REFS
        delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
 #endif
+       trans->can_flush_pending_bgs = false;
        ret = __btrfs_run_delayed_refs(trans, root, count);
        if (ret < 0) {
                btrfs_abort_transaction(trans, root, ret);
@@ -2893,6 +2895,7 @@ again:
        }
 out:
        assert_qgroups_uptodate(trans);
+       trans->can_flush_pending_bgs = can_flush_pending_bgs;
        return 0;
 }
 
@@ -4306,7 +4309,8 @@ out:
         * the block groups that were made dirty during the lifetime of the
         * transaction.
         */
-       if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
+       if (trans->can_flush_pending_bgs &&
+           trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
                btrfs_create_pending_block_groups(trans, trans->root);
                btrfs_trans_release_chunk_metadata(trans);
        }
@@ -9560,7 +9564,9 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
        struct btrfs_block_group_item item;
        struct btrfs_key key;
        int ret = 0;
+       bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
 
+       trans->can_flush_pending_bgs = false;
        list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
                if (ret)
                        goto next;
@@ -9581,6 +9587,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
 next:
                list_del_init(&block_group->bg_list);
        }
+       trans->can_flush_pending_bgs = can_flush_pending_bgs;
 }
 
 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
index e2357e31609a2e8469b38c7e95b66f6dd68fcd93..3915c9473e9445d4aeada81c8fb96af7fb521f2c 100644 (file)
@@ -3132,12 +3132,12 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
                                             get_extent_t *get_extent,
                                             struct extent_map **em_cached,
                                             struct bio **bio, int mirror_num,
-                                            unsigned long *bio_flags, int rw)
+                                            unsigned long *bio_flags, int rw,
+                                            u64 *prev_em_start)
 {
        struct inode *inode;
        struct btrfs_ordered_extent *ordered;
        int index;
-       u64 prev_em_start = (u64)-1;
 
        inode = pages[0]->mapping->host;
        while (1) {
@@ -3153,7 +3153,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
 
        for (index = 0; index < nr_pages; index++) {
                __do_readpage(tree, pages[index], get_extent, em_cached, bio,
-                             mirror_num, bio_flags, rw, &prev_em_start);
+                             mirror_num, bio_flags, rw, prev_em_start);
                page_cache_release(pages[index]);
        }
 }
@@ -3163,7 +3163,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
                               int nr_pages, get_extent_t *get_extent,
                               struct extent_map **em_cached,
                               struct bio **bio, int mirror_num,
-                              unsigned long *bio_flags, int rw)
+                              unsigned long *bio_flags, int rw,
+                              u64 *prev_em_start)
 {
        u64 start = 0;
        u64 end = 0;
@@ -3184,7 +3185,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
                                                  index - first_index, start,
                                                  end, get_extent, em_cached,
                                                  bio, mirror_num, bio_flags,
-                                                 rw);
+                                                 rw, prev_em_start);
                        start = page_start;
                        end = start + PAGE_CACHE_SIZE - 1;
                        first_index = index;
@@ -3195,7 +3196,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
                __do_contiguous_readpages(tree, &pages[first_index],
                                          index - first_index, start,
                                          end, get_extent, em_cached, bio,
-                                         mirror_num, bio_flags, rw);
+                                         mirror_num, bio_flags, rw,
+                                         prev_em_start);
 }
 
 static int __extent_read_full_page(struct extent_io_tree *tree,
@@ -4207,6 +4209,7 @@ int extent_readpages(struct extent_io_tree *tree,
        struct page *page;
        struct extent_map *em_cached = NULL;
        int nr = 0;
+       u64 prev_em_start = (u64)-1;
 
        for (page_idx = 0; page_idx < nr_pages; page_idx++) {
                page = list_entry(pages->prev, struct page, lru);
@@ -4223,12 +4226,12 @@ int extent_readpages(struct extent_io_tree *tree,
                if (nr < ARRAY_SIZE(pagepool))
                        continue;
                __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
-                                  &bio, 0, &bio_flags, READ);
+                                  &bio, 0, &bio_flags, READ, &prev_em_start);
                nr = 0;
        }
        if (nr)
                __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
-                                  &bio, 0, &bio_flags, READ);
+                                  &bio, 0, &bio_flags, READ, &prev_em_start);
 
        if (em_cached)
                free_extent_map(em_cached);
index 0adf5422fce9d4b9fc62c2bbf319429b38aaec7c..3e3e6130637fa7268e6834c6fafe129de84345a7 100644 (file)
@@ -4639,6 +4639,11 @@ locked:
                bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
        }
 
+       if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
+               ret = -EINVAL;
+               goto out_bargs;
+       }
+
 do_balance:
        /*
         * Ownership of bctl and mutually_exclusive_operation_running
index aa72bfd28f7dcbd88c73452aafd2a3d9e7f42e00..a739b825bdd364cfa9cbf16edc9f978a68feb95f 100644 (file)
@@ -1920,10 +1920,12 @@ static int did_overwrite_ref(struct send_ctx *sctx,
        /*
         * We know that it is or will be overwritten. Check this now.
         * The current inode being processed might have been the one that caused
-        * inode 'ino' to be orphanized, therefore ow_inode can actually be the
-        * same as sctx->send_progress.
+        * inode 'ino' to be orphanized, therefore check if ow_inode matches
+        * the current inode being processed.
         */
-       if (ow_inode <= sctx->send_progress)
+       if ((ow_inode < sctx->send_progress) ||
+           (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
+            gen == sctx->cur_inode_gen))
                ret = 1;
        else
                ret = 0;
index 74bc3338418be39badb2eb73160c20b3e2240c74..a5b06442f0bf9d1630f201da3e0eb5c0422e8cc9 100644 (file)
@@ -557,6 +557,7 @@ again:
        h->delayed_ref_elem.seq = 0;
        h->type = type;
        h->allocating_chunk = false;
+       h->can_flush_pending_bgs = true;
        h->reloc_reserved = false;
        h->sync = false;
        INIT_LIST_HEAD(&h->qgroup_ref_list);
index 87964bf8892d50f1da01abb725a1d3f6286279f9..a994bb097ee59c12bb0d5599f10c8a64b1954f43 100644 (file)
@@ -118,6 +118,7 @@ struct btrfs_trans_handle {
        short aborted;
        short adding_csums;
        bool allocating_chunk;
+       bool can_flush_pending_bgs;
        bool reloc_reserved;
        bool sync;
        unsigned int type;
index 2ca784a14e84bc2a00d0c3d1ec1a15290128edfc..595279a8b99fd461e24cb24df3805fa8401f3dd6 100644 (file)
@@ -376,6 +376,14 @@ struct map_lookup {
 #define BTRFS_BALANCE_ARGS_VRANGE      (1ULL << 4)
 #define BTRFS_BALANCE_ARGS_LIMIT       (1ULL << 5)
 
+#define BTRFS_BALANCE_ARGS_MASK                        \
+       (BTRFS_BALANCE_ARGS_PROFILES |          \
+        BTRFS_BALANCE_ARGS_USAGE |             \
+        BTRFS_BALANCE_ARGS_DEVID |             \
+        BTRFS_BALANCE_ARGS_DRANGE |            \
+        BTRFS_BALANCE_ARGS_VRANGE |            \
+        BTRFS_BALANCE_ARGS_LIMIT)
+
 /*
  * Profile changing flags.  When SOFT is set we won't relocate chunk if
  * it already has the target profile (even though it may be
index 27aea110e92365e1e91610579369215cc54644ea..c3cc1609025fa3a966c2d5b10f32626214a9e4ef 100644 (file)
@@ -136,5 +136,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.07"
+#define CIFS_VERSION   "2.08"
 #endif                         /* _CIFSFS_H */
index e2a6af1508af2aef789d0caab21fedfa91d49c60..62203c387db45a23b05c1cadcc0946843ea5332f 100644 (file)
@@ -3380,6 +3380,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
        struct page *page, *tpage;
        unsigned int expected_index;
        int rc;
+       gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);
 
        INIT_LIST_HEAD(tmplist);
 
@@ -3392,7 +3393,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
         */
        __set_page_locked(page);
        rc = add_to_page_cache_locked(page, mapping,
-                                     page->index, GFP_KERNEL);
+                                     page->index, gfp);
 
        /* give up if we can't stick it in the cache */
        if (rc) {
@@ -3418,8 +3419,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
                        break;
 
                __set_page_locked(page);
-               if (add_to_page_cache_locked(page, mapping, page->index,
-                                                               GFP_KERNEL)) {
+               if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
                        __clear_page_locked(page);
                        break;
                }
index f621b44cb8009fe87bf631e0a96c941fe63d3408..6b66dd5d15408676ab6510f7ce415164fe5c0571 100644 (file)
@@ -2034,7 +2034,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
        struct tcon_link *tlink = NULL;
        struct cifs_tcon *tcon = NULL;
        struct TCP_Server_Info *server;
-       struct cifs_io_parms io_parms;
 
        /*
         * To avoid spurious oplock breaks from server, in the case of
@@ -2056,18 +2055,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
                        rc = -ENOSYS;
                cifsFileInfo_put(open_file);
                cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc);
-               if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
-                       unsigned int bytes_written;
-
-                       io_parms.netfid = open_file->fid.netfid;
-                       io_parms.pid = open_file->pid;
-                       io_parms.tcon = tcon;
-                       io_parms.offset = 0;
-                       io_parms.length = attrs->ia_size;
-                       rc = CIFSSMBWrite(xid, &io_parms, &bytes_written,
-                                         NULL, NULL, 1);
-                       cifs_dbg(FYI, "Wrt seteof rc %d\n", rc);
-               }
        } else
                rc = -EINVAL;
 
@@ -2093,28 +2080,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
        else
                rc = -ENOSYS;
        cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc);
-       if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
-               __u16 netfid;
-               int oplock = 0;
 
-               rc = SMBLegacyOpen(xid, tcon, full_path, FILE_OPEN,
-                                  GENERIC_WRITE, CREATE_NOT_DIR, &netfid,
-                                  &oplock, NULL, cifs_sb->local_nls,
-                                  cifs_remap(cifs_sb));
-               if (rc == 0) {
-                       unsigned int bytes_written;
-
-                       io_parms.netfid = netfid;
-                       io_parms.pid = current->tgid;
-                       io_parms.tcon = tcon;
-                       io_parms.offset = 0;
-                       io_parms.length = attrs->ia_size;
-                       rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, NULL,
-                                         NULL,  1);
-                       cifs_dbg(FYI, "wrt seteof rc %d\n", rc);
-                       CIFSSMBClose(xid, tcon, netfid);
-               }
-       }
        if (tlink)
                cifs_put_tlink(tlink);
 
index ce83e2edbe0a22ae9858ec5a04caa4e2b6ad59d2..597a417ba94d3bb910f52e3f14119a197ff2d090 100644 (file)
@@ -922,7 +922,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
        if (tcon && tcon->bad_network_name)
                return -ENOENT;
 
-       if ((tcon->seal) &&
+       if ((tcon && tcon->seal) &&
            ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) {
                cifs_dbg(VFS, "encryption requested but no server support");
                return -EOPNOTSUPP;
index bcfb14bfc1e49eb36d507c53ccd1c3f4f23135f9..a86d3cc2b38941b0e39f23be84e4986d8852ae42 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -285,6 +285,7 @@ static int copy_user_bh(struct page *to, struct buffer_head *bh,
 static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
                        struct vm_area_struct *vma, struct vm_fault *vmf)
 {
+       struct address_space *mapping = inode->i_mapping;
        sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
        unsigned long vaddr = (unsigned long)vmf->virtual_address;
        void __pmem *addr;
@@ -292,6 +293,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
        pgoff_t size;
        int error;
 
+       i_mmap_lock_read(mapping);
+
        /*
         * Check truncate didn't happen while we were allocating a block.
         * If it did, this block may or may not be still allocated to the
@@ -321,6 +324,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
        error = vm_insert_mixed(vma, vaddr, pfn);
 
  out:
+       i_mmap_unlock_read(mapping);
+
        return error;
 }
 
@@ -382,17 +387,15 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                         * from a read fault and we've raced with a truncate
                         */
                        error = -EIO;
-                       goto unlock;
+                       goto unlock_page;
                }
-       } else {
-               i_mmap_lock_write(mapping);
        }
 
        error = get_block(inode, block, &bh, 0);
        if (!error && (bh.b_size < PAGE_SIZE))
                error = -EIO;           /* fs corruption? */
        if (error)
-               goto unlock;
+               goto unlock_page;
 
        if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
                if (vmf->flags & FAULT_FLAG_WRITE) {
@@ -403,9 +406,8 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                        if (!error && (bh.b_size < PAGE_SIZE))
                                error = -EIO;
                        if (error)
-                               goto unlock;
+                               goto unlock_page;
                } else {
-                       i_mmap_unlock_write(mapping);
                        return dax_load_hole(mapping, page, vmf);
                }
        }
@@ -417,15 +419,17 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                else
                        clear_user_highpage(new_page, vaddr);
                if (error)
-                       goto unlock;
+                       goto unlock_page;
                vmf->page = page;
                if (!page) {
+                       i_mmap_lock_read(mapping);
                        /* Check we didn't race with truncate */
                        size = (i_size_read(inode) + PAGE_SIZE - 1) >>
                                                                PAGE_SHIFT;
                        if (vmf->pgoff >= size) {
+                               i_mmap_unlock_read(mapping);
                                error = -EIO;
-                               goto unlock;
+                               goto out;
                        }
                }
                return VM_FAULT_LOCKED;
@@ -461,8 +465,6 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                        WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
        }
 
-       if (!page)
-               i_mmap_unlock_write(mapping);
  out:
        if (error == -ENOMEM)
                return VM_FAULT_OOM | major;
@@ -471,14 +473,11 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                return VM_FAULT_SIGBUS | major;
        return VM_FAULT_NOPAGE | major;
 
- unlock:
+ unlock_page:
        if (page) {
                unlock_page(page);
                page_cache_release(page);
-       } else {
-               i_mmap_unlock_write(mapping);
        }
-
        goto out;
 }
 EXPORT_SYMBOL(__dax_fault);
@@ -556,10 +555,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
        block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
 
        bh.b_size = PMD_SIZE;
-       i_mmap_lock_write(mapping);
        length = get_block(inode, block, &bh, write);
        if (length)
                return VM_FAULT_SIGBUS;
+       i_mmap_lock_read(mapping);
 
        /*
         * If the filesystem isn't willing to tell us the length of a hole,
@@ -569,36 +568,14 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
        if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
                goto fallback;
 
-       sector = bh.b_blocknr << (blkbits - 9);
-
-       if (buffer_unwritten(&bh) || buffer_new(&bh)) {
-               int i;
-
-               length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
-                                               bh.b_size);
-               if (length < 0) {
-                       result = VM_FAULT_SIGBUS;
-                       goto out;
-               }
-               if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
-                       goto fallback;
-
-               for (i = 0; i < PTRS_PER_PMD; i++)
-                       clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
-               wmb_pmem();
-               count_vm_event(PGMAJFAULT);
-               mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
-               result |= VM_FAULT_MAJOR;
-       }
-
        /*
         * If we allocated new storage, make sure no process has any
         * zero pages covering this hole
         */
        if (buffer_new(&bh)) {
-               i_mmap_unlock_write(mapping);
+               i_mmap_unlock_read(mapping);
                unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
-               i_mmap_lock_write(mapping);
+               i_mmap_lock_read(mapping);
        }
 
        /*
@@ -635,6 +612,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
                result = VM_FAULT_NOPAGE;
                spin_unlock(ptl);
        } else {
+               sector = bh.b_blocknr << (blkbits - 9);
                length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
                                                bh.b_size);
                if (length < 0) {
@@ -644,15 +622,25 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
                if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
                        goto fallback;
 
+               if (buffer_unwritten(&bh) || buffer_new(&bh)) {
+                       int i;
+                       for (i = 0; i < PTRS_PER_PMD; i++)
+                               clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
+                       wmb_pmem();
+                       count_vm_event(PGMAJFAULT);
+                       mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
+                       result |= VM_FAULT_MAJOR;
+               }
+
                result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write);
        }
 
  out:
+       i_mmap_unlock_read(mapping);
+
        if (buffer_unwritten(&bh))
                complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
 
-       i_mmap_unlock_write(mapping);
-
        return result;
 
  fallback:
index 47728da7702cdf69d2977af996dea42170a8b07d..b46e9fc641960aeba81b48d61b6e933d724b5205 100644 (file)
@@ -63,7 +63,7 @@ config EXT4_FS
          If unsure, say N.
 
 config EXT4_USE_FOR_EXT2
-       bool "Use ext4 for ext2/ext3 file systems"
+       bool "Use ext4 for ext2 file systems"
        depends on EXT4_FS
        depends on EXT2_FS=n
        default y
index e26803fb210d3bf1134f500b85f43802b2ce7e58..560af043770462df6170acd4d523f9d0385884a3 100644 (file)
@@ -165,8 +165,8 @@ int ext4_mpage_readpages(struct address_space *mapping,
                if (pages) {
                        page = list_entry(pages->prev, struct page, lru);
                        list_del(&page->lru);
-                       if (add_to_page_cache_lru(page, mapping,
-                                                 page->index, GFP_KERNEL))
+                       if (add_to_page_cache_lru(page, mapping, page->index,
+                                       GFP_KERNEL & mapping_gfp_mask(mapping)))
                                goto next_page;
                }
 
index 778a4ddef77a21844b08af82058d3b188371dc01..a7c34274f2076bc36e9cb9797f682988e617417f 100644 (file)
@@ -139,7 +139,8 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
 static struct bio *
 do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
                sector_t *last_block_in_bio, struct buffer_head *map_bh,
-               unsigned long *first_logical_block, get_block_t get_block)
+               unsigned long *first_logical_block, get_block_t get_block,
+               gfp_t gfp)
 {
        struct inode *inode = page->mapping->host;
        const unsigned blkbits = inode->i_blkbits;
@@ -277,8 +278,7 @@ alloc_new:
                                goto out;
                }
                bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
-                               min_t(int, nr_pages, BIO_MAX_PAGES),
-                               GFP_KERNEL);
+                               min_t(int, nr_pages, BIO_MAX_PAGES), gfp);
                if (bio == NULL)
                        goto confused;
        }
@@ -361,6 +361,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
        sector_t last_block_in_bio = 0;
        struct buffer_head map_bh;
        unsigned long first_logical_block = 0;
+       gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);
 
        map_bh.b_state = 0;
        map_bh.b_size = 0;
@@ -370,12 +371,13 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
                prefetchw(&page->flags);
                list_del(&page->lru);
                if (!add_to_page_cache_lru(page, mapping,
-                                       page->index, GFP_KERNEL)) {
+                                       page->index,
+                                       gfp)) {
                        bio = do_mpage_readpage(bio, page,
                                        nr_pages - page_idx,
                                        &last_block_in_bio, &map_bh,
                                        &first_logical_block,
-                                       get_block);
+                                       get_block, gfp);
                }
                page_cache_release(page);
        }
@@ -395,11 +397,12 @@ int mpage_readpage(struct page *page, get_block_t get_block)
        sector_t last_block_in_bio = 0;
        struct buffer_head map_bh;
        unsigned long first_logical_block = 0;
+       gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(page->mapping);
 
        map_bh.b_state = 0;
        map_bh.b_size = 0;
        bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
-                       &map_bh, &first_logical_block, get_block);
+                       &map_bh, &first_logical_block, get_block, gfp);
        if (bio)
                mpage_bio_submit(READ, bio);
        return 0;
index 726d211db4842715f71e1911f6940c93b19fe57f..33e9495a31293e2c080b5b0bd2e50523a460ceee 100644 (file)
@@ -1558,8 +1558,6 @@ static int lookup_fast(struct nameidata *nd,
                negative = d_is_negative(dentry);
                if (read_seqcount_retry(&dentry->d_seq, seq))
                        return -ECHILD;
-               if (negative)
-                       return -ENOENT;
 
                /*
                 * This sequence count validates that the parent had no
@@ -1580,6 +1578,12 @@ static int lookup_fast(struct nameidata *nd,
                                goto unlazy;
                        }
                }
+               /*
+                * Note: do negative dentry check after revalidation in
+                * case that drops it.
+                */
+               if (negative)
+                       return -ENOENT;
                path->mnt = mnt;
                path->dentry = dentry;
                if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
index f93b9cdb4934d17739bf4c6442d79bbfe32dcf13..5133bb18830e8c8b97e68e8f2c55d617ff92a321 100644 (file)
@@ -1458,12 +1458,18 @@ nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
        if (delegation)
                delegation_flags = delegation->flags;
        rcu_read_unlock();
-       if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
+       switch (data->o_arg.claim) {
+       default:
+               break;
+       case NFS4_OPEN_CLAIM_DELEGATE_CUR:
+       case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
                pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
                                   "returning a delegation for "
                                   "OPEN(CLAIM_DELEGATE_CUR)\n",
                                   clp->cl_hostname);
-       } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
+               return;
+       }
+       if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
                nfs_inode_set_delegation(state->inode,
                                         data->owner->so_cred,
                                         &data->o_res);
@@ -1771,6 +1777,9 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
        if (IS_ERR(opendata))
                return PTR_ERR(opendata);
        nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
+       write_seqlock(&state->seqlock);
+       nfs4_stateid_copy(&state->stateid, &state->open_stateid);
+       write_sequnlock(&state->seqlock);
        clear_bit(NFS_DELEGATED_STATE, &state->flags);
        switch (type & (FMODE_READ|FMODE_WRITE)) {
        case FMODE_READ|FMODE_WRITE:
@@ -1863,6 +1872,8 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
        data->rpc_done = 0;
        data->rpc_status = 0;
        data->timestamp = jiffies;
+       if (data->is_recover)
+               nfs4_set_sequence_privileged(&data->c_arg.seq_args);
        task = rpc_run_task(&task_setup_data);
        if (IS_ERR(task))
                return PTR_ERR(task);
index 5db324635e920a51923b37c3d22c9d3dee2f6682..d854693a15b0e2443779986552d29d9db3f6cdc2 100644 (file)
@@ -1725,7 +1725,8 @@ restart:
                        if (!test_and_clear_bit(ops->owner_flag_bit,
                                                        &sp->so_flags))
                                continue;
-                       atomic_inc(&sp->so_count);
+                       if (!atomic_inc_not_zero(&sp->so_count))
+                               continue;
                        spin_unlock(&clp->cl_lock);
                        rcu_read_unlock();
 
index 28df12e525bac5857c0d41aba62d558db82f526a..671cf68fe56bed7a457fddd4ccdd5913509ff1bd 100644 (file)
@@ -409,7 +409,7 @@ DECLARE_EVENT_CLASS(nfs4_open_event,
                        __entry->flags = flags;
                        __entry->fmode = (__force unsigned int)ctx->mode;
                        __entry->dev = ctx->dentry->d_sb->s_dev;
-                       if (!IS_ERR(state))
+                       if (!IS_ERR_OR_NULL(state))
                                inode = state->inode;
                        if (inode != NULL) {
                                __entry->fileid = NFS_FILEID(inode);
index 72624dc4a623b894ca0be949c5feab1cec455e02..75ab7622e0cc193bab28f2ba5bb56d37e5f49465 100644 (file)
@@ -569,19 +569,17 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
        if (!nfs_pageio_add_request(pgio, req)) {
                nfs_redirty_request(req);
                ret = pgio->pg_error;
-       }
+       } else
+               nfs_add_stats(page_file_mapping(page)->host,
+                               NFSIOS_WRITEPAGES, 1);
 out:
        return ret;
 }
 
 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
 {
-       struct inode *inode = page_file_mapping(page)->host;
        int ret;
 
-       nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
-       nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
-
        nfs_pageio_cond_complete(pgio, page_file_index(page));
        ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
        if (ret == -EAGAIN) {
@@ -597,9 +595,11 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st
 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
 {
        struct nfs_pageio_descriptor pgio;
+       struct inode *inode = page_file_mapping(page)->host;
        int err;
 
-       nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc),
+       nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
+       nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
                                false, &nfs_async_write_completion_ops);
        err = nfs_do_writepage(page, wbc, &pgio);
        nfs_pageio_complete(&pgio);
@@ -1223,7 +1223,7 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino
                return 1;
        if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
                       list_empty_careful(&flctx->flc_posix)))
-               return 0;
+               return 1;
 
        /* Check to see if there are whole file write locks */
        ret = 0;
index cdefaa331a0719e88df91ef7c04c32706ae199a1..c29d9421bd5e1f8c890178c6ec961899b319ceef 100644 (file)
@@ -56,14 +56,6 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
        u32 device_generation = 0;
        int error;
 
-       /*
-        * We do not attempt to support I/O smaller than the fs block size,
-        * or not aligned to it.
-        */
-       if (args->lg_minlength < block_size) {
-               dprintk("pnfsd: I/O too small\n");
-               goto out_layoutunavailable;
-       }
        if (seg->offset & (block_size - 1)) {
                dprintk("pnfsd: I/O misaligned\n");
                goto out_layoutunavailable;
index ba1323a94924962299d27cbe67d76ff4e0056bb9..a586467f6ff6c73a4f31234fd96fce0a7b0dc34b 100644 (file)
@@ -70,6 +70,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
        unsigned order;
        void *data;
        int ret;
+       gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
 
        /* make various checks */
        order = get_order(newsize);
@@ -84,7 +85,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
 
        /* allocate enough contiguous pages to be able to satisfy the
         * request */
-       pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order);
+       pages = alloc_pages(gfp, order);
        if (!pages)
                return -ENOMEM;
 
@@ -108,7 +109,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
                struct page *page = pages + loop;
 
                ret = add_to_page_cache_lru(page, inode->i_mapping, loop,
-                                       GFP_KERNEL);
+                                       gfp);
                if (ret < 0)
                        goto add_error;
 
index 94f9ea8abcae35af8ca36560403fbd25facb7c65..011dde083f231e763e4c96fcc7fb3cb9b6ce23c7 100644 (file)
@@ -1,15 +1,10 @@
 #ifndef _ASM_WORD_AT_A_TIME_H
 #define _ASM_WORD_AT_A_TIME_H
 
-/*
- * This says "generic", but it's actually big-endian only.
- * Little-endian can use more efficient versions of these
- * interfaces, see for example
- *      arch/x86/include/asm/word-at-a-time.h
- * for those.
- */
-
 #include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
 
 struct word_at_a_time {
        const unsigned long high_bits, low_bits;
@@ -53,4 +48,73 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
 #define zero_bytemask(mask) (~1ul << __fls(mask))
 #endif
 
+#else
+
+/*
+ * The optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+struct word_at_a_time {
+       const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+       return mask*0x0001020304050608ul >> 56;
+}
+
+#else  /* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+       /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+       long a = (0x0ff0001+mask) >> 23;
+       /* Fix the 1 for 00 case */
+       return a & mask;
+}
+
+#endif
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+       unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+       *bits = mask;
+       return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+       return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+       bits = (bits - 1) & ~bits;
+       return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+       return count_masked_bytes(mask);
+}
+
+#endif /* __BIG_ENDIAN */
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
index 2a747a91fdede982354438e1c48fc1a332d06885..3febb4b9fce9243793fbaf3e8dbb331fe6adf81d 100644 (file)
@@ -240,5 +240,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
 
 extern void drm_kms_helper_poll_disable(struct drm_device *dev);
 extern void drm_kms_helper_poll_enable(struct drm_device *dev);
+extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev);
 
 #endif
index 499e9f625aeffb2f618458b45121a6a6286e6e3e..0212d139a480909a216da244ffd8aadf6effa630 100644 (file)
 #define MODE_I2C_READ  4
 #define MODE_I2C_STOP  8
 
+/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */
+#define DP_MST_PHYSICAL_PORT_0 0
+#define DP_MST_LOGICAL_PORT_0 8
+
 #define DP_LINK_STATUS_SIZE       6
 bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
                          int lane_count);
index 86d0b25ed0547db2f63606af97d8bf9e966e39db..5340099741aec8c48575b1c1056f23903e150ed6 100644 (file)
@@ -253,6 +253,7 @@ struct drm_dp_remote_dpcd_write {
        u8 *bytes;
 };
 
+#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
 struct drm_dp_remote_i2c_read {
        u8 num_transactions;
        u8 port_number;
@@ -262,7 +263,7 @@ struct drm_dp_remote_i2c_read {
                u8 *bytes;
                u8 no_stop_bit;
                u8 i2c_transaction_delay;
-       } transactions[4];
+       } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
        u8 read_i2c_device_id;
        u8 num_bytes_read;
 };
@@ -374,6 +375,7 @@ struct drm_dp_mst_topology_mgr;
 struct drm_dp_mst_topology_cbs {
        /* create a connector for a port */
        struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
+       void (*register_connector)(struct drm_connector *connector);
        void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
                                  struct drm_connector *connector);
        void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
index 37d1602c4f7aa08b464577c675910046a4db3dde..5e7d43ab61c000d894164e093132f607344e9cc0 100644 (file)
@@ -145,7 +145,6 @@ enum {
        BLK_MQ_F_SHOULD_MERGE   = 1 << 0,
        BLK_MQ_F_TAG_SHARED     = 1 << 1,
        BLK_MQ_F_SG_MERGE       = 1 << 2,
-       BLK_MQ_F_SYSFS_UP       = 1 << 3,
        BLK_MQ_F_DEFER_ISSUE    = 1 << 4,
        BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
        BLK_MQ_F_ALLOC_POLICY_BITS = 1,
@@ -215,7 +214,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
 void blk_mq_cancel_requeue_work(struct request_queue *q);
 void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_abort_requeue_list(struct request_queue *q);
-void blk_mq_complete_request(struct request *rq);
+void blk_mq_complete_request(struct request *rq, int error);
 
 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -224,8 +223,6 @@ void blk_mq_start_hw_queues(struct request_queue *q);
 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
-void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
-               void *priv);
 void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
                void *priv);
 void blk_mq_freeze_queue(struct request_queue *q);
index 99da9ebc73776af0a5efb69a73310f522b952b25..19c2e947d4d127364887a133d4b0d0ce92090e1c 100644 (file)
@@ -456,6 +456,8 @@ struct request_queue {
        struct blk_mq_tag_set   *tag_set;
        struct list_head        tag_set_list;
        struct bio_set          *bio_split;
+
+       bool                    mq_sysfs_init_done;
 };
 
 #define QUEUE_FLAG_QUEUED      1       /* uses generic tag queueing */
index e3a51b74e275933700690afcc6c2c233b3612066..75718fa28260b6c3cf5f19711d555bdf9b512f73 100644 (file)
@@ -194,7 +194,6 @@ extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
 extern const struct bpf_func_proto bpf_map_update_elem_proto;
 extern const struct bpf_func_proto bpf_map_delete_elem_proto;
 
-extern const struct bpf_func_proto bpf_perf_event_read_proto;
 extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
 extern const struct bpf_func_proto bpf_tail_call_proto;
index dfaa7b3e9ae900676b61dc7c3f693c78de97e8cd..82c159e0532aeac8901caea417eddfd6b16e2f12 100644 (file)
 #define KASAN_ABI_VERSION 3
 #endif
 
+#if GCC_VERSION >= 50000
+#define CC_HAVE_BUILTIN_OVERFLOW
+#endif
+
 #endif /* gcc version >= 40000 specific checks */
 
 #if !defined(__noclone)
index ae5d0d22955d8fdedba5b03d981dbcdd7585ee03..f923d15b432c75b831a57eb9d2f1f532684eb689 100644 (file)
@@ -24,5 +24,6 @@ struct ifla_vf_info {
        __u32 min_tx_rate;
        __u32 max_tx_rate;
        __u32 rss_query_en;
+       __u32 trusted;
 };
 #endif /* _LINUX_IF_LINK_H */
index d3ca79236fb00ee5543e507ae9e69bc62b700e43..f644fdb06dd691ba2d218384b1172791100f9aaf 100644 (file)
@@ -161,6 +161,11 @@ enum {
        IRQ_DOMAIN_FLAG_NONCORE         = (1 << 16),
 };
 
+static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d)
+{
+       return d->of_node;
+}
+
 #ifdef CONFIG_IRQ_DOMAIN
 struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
                                    irq_hw_number_t hwirq_max, int direct_max,
index b122eeafb5dc17b8a8b1a1852dc1c420ecf0f8d2..fa359c79c825e666789ec1ce65392b6ff184d93c 100644 (file)
@@ -283,6 +283,13 @@ static inline void led_trigger_register_simple(const char *name,
 static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {}
 static inline void led_trigger_event(struct led_trigger *trigger,
                                enum led_brightness event) {}
+static inline void led_trigger_blink(struct led_trigger *trigger,
+                                     unsigned long *delay_on,
+                                     unsigned long *delay_off) {}
+static inline void led_trigger_blink_oneshot(struct led_trigger *trigger,
+                                     unsigned long *delay_on,
+                                     unsigned long *delay_off,
+                                     int invert) {}
 static inline void led_trigger_set_default(struct led_classdev *led_cdev) {}
 static inline void led_trigger_set(struct led_classdev *led_cdev,
                                struct led_trigger *trigger) {}
index baad4cb8e9b065fb76c8e2b66379d9415d0f4e47..5a8677bafe0408bad140320471e38c100e4bf33a 100644 (file)
@@ -833,6 +833,7 @@ struct mlx4_dev {
        struct mlx4_quotas      quotas;
        struct radix_tree_root  qp_table_tree;
        u8                      rev_id;
+       u8                      port_random_macs;
        char                    board_id[MLX4_BOARD_ID_LEN];
        int                     numa_node;
        int                     oper_log_mgm_entry_size;
index 2a0b956625482bd2d0f03b35647c4619628de233..0b473cbfa7ef10c875c1eed802c2a7dd74f415d4 100644 (file)
@@ -439,7 +439,8 @@ struct mlx5_init_seg {
        __be32                  cmdq_addr_h;
        __be32                  cmdq_addr_l_sz;
        __be32                  cmd_dbell;
-       __be32                  rsvd1[121];
+       __be32                  rsvd1[120];
+       __be32                  initializing;
        struct health_buffer    health;
        __be32                  rsvd2[884];
        __be32                  health_counter;
index 41a32873f608421c03b14b86597fb2bd9c95a94a..5c857f2a20d7b5da77e2d50f0b95a53cc88a5107 100644 (file)
@@ -393,6 +393,7 @@ struct mlx5_core_health {
        struct timer_list               timer;
        u32                             prev;
        int                             miss_counter;
+       bool                            sick;
        struct workqueue_struct        *wq;
        struct work_struct              work;
 };
@@ -486,8 +487,26 @@ struct mlx5_priv {
        spinlock_t              ctx_lock;
 };
 
+enum mlx5_device_state {
+       MLX5_DEVICE_STATE_UP,
+       MLX5_DEVICE_STATE_INTERNAL_ERROR,
+};
+
+enum mlx5_interface_state {
+       MLX5_INTERFACE_STATE_DOWN,
+       MLX5_INTERFACE_STATE_UP,
+};
+
+enum mlx5_pci_status {
+       MLX5_PCI_STATUS_DISABLED,
+       MLX5_PCI_STATUS_ENABLED,
+};
+
 struct mlx5_core_dev {
        struct pci_dev         *pdev;
+       /* sync pci state */
+       struct mutex            pci_status_mutex;
+       enum mlx5_pci_status    pci_status;
        u8                      rev_id;
        char                    board_id[MLX5_BOARD_ID_LEN];
        struct mlx5_cmd         cmd;
@@ -496,6 +515,10 @@ struct mlx5_core_dev {
        u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
        phys_addr_t             iseg_base;
        struct mlx5_init_seg __iomem *iseg;
+       enum mlx5_device_state  state;
+       /* sync interface state */
+       struct mutex            intf_state_mutex;
+       enum mlx5_interface_state interface_state;
        void                    (*event) (struct mlx5_core_dev *dev,
                                          enum mlx5_dev_event event,
                                          unsigned long param);
@@ -803,6 +826,11 @@ void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
 int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
                        struct mlx5_odp_caps *odp_caps);
 
+static inline int fw_initializing(struct mlx5_core_dev *dev)
+{
+       return ioread32be(&dev->iseg->initializing) >> 31;
+}
+
 static inline u32 mlx5_mkey_to_idx(u32 mkey)
 {
        return mkey >> 8;
index b3374402c1ea1d2c0df80d3b5c6a38a1cf1b4156..4ac653b7b8ace2a9a2f2ee0feae07aab1bbcf459 100644 (file)
@@ -881,6 +881,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
  *                       int max_tx_rate);
  * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
+ * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
  * int (*ndo_get_vf_config)(struct net_device *dev,
  *                         int vf, struct ifla_vf_info *ivf);
  * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
@@ -1054,6 +1055,10 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  *     This function is used to pass protocol port error state information
  *     to the switch driver. The switch driver can react to the proto_down
  *      by doing a phys down on the associated switch port.
+ * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
+ *     This function is used to get egress tunnel information for given skb.
+ *     This is useful for retrieving outer tunnel header parameters while
+ *     sampling packet.
  *
  */
 struct net_device_ops {
@@ -1109,6 +1114,8 @@ struct net_device_ops {
                                                   int max_tx_rate);
        int                     (*ndo_set_vf_spoofchk)(struct net_device *dev,
                                                       int vf, bool setting);
+       int                     (*ndo_set_vf_trust)(struct net_device *dev,
+                                                   int vf, bool setting);
        int                     (*ndo_get_vf_config)(struct net_device *dev,
                                                     int vf,
                                                     struct ifla_vf_info *ivf);
@@ -1227,6 +1234,8 @@ struct net_device_ops {
        int                     (*ndo_get_iflink)(const struct net_device *dev);
        int                     (*ndo_change_proto_down)(struct net_device *dev,
                                                         bool proto_down);
+       int                     (*ndo_fill_metadata_dst)(struct net_device *dev,
+                                                      struct sk_buff *skb);
 };
 
 /**
@@ -2106,6 +2115,7 @@ struct pcpu_sw_netstats {
 #define NETDEV_PRECHANGEMTU    0x0017 /* notify before mtu change happened */
 #define NETDEV_CHANGEINFODATA  0x0018
 #define NETDEV_BONDING_INFO    0x0019
+#define NETDEV_PRECHANGEUPPER  0x001A
 
 int register_netdevice_notifier(struct notifier_block *nb);
 int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2206,6 +2216,7 @@ void dev_add_offload(struct packet_offload *po);
 void dev_remove_offload(struct packet_offload *po);
 
 int dev_get_iflink(const struct net_device *dev);
+int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
                                      unsigned short mask);
 struct net_device *dev_get_by_name(struct net *net, const char *name);
index 165ab2d14734ade6b0766f96fdd8fdb904ba94ea..0ad556726181ada44d9f137502b65e6a10c242c7 100644 (file)
@@ -90,7 +90,6 @@ struct nf_hook_ops {
        /* User fills in from here down. */
        nf_hookfn               *hook;
        struct net_device       *dev;
-       struct module           *owner;
        void                    *priv;
        u_int8_t                pf;
        unsigned int            hooknum;
@@ -347,8 +346,23 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
 }
 
 #else /* !CONFIG_NETFILTER */
-#define NF_HOOK(pf, hook, net, sk, skb, indev, outdev, okfn) (okfn)(net, sk, skb)
-#define NF_HOOK_COND(pf, hook, net, sk, skb, indev, outdev, okfn, cond) (okfn)(net, sk, skb)
+static inline int
+NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
+            struct sk_buff *skb, struct net_device *in, struct net_device *out,
+            int (*okfn)(struct net *, struct sock *, struct sk_buff *),
+            bool cond)
+{
+       return okfn(net, sk, skb);
+}
+
+static inline int
+NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
+       struct sk_buff *skb, struct net_device *in, struct net_device *out,
+       int (*okfn)(struct net *, struct sock *, struct sk_buff *))
+{
+       return okfn(net, sk, skb);
+}
+
 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
                          struct sock *sk, struct sk_buff *skb,
                          struct net_device *indev, struct net_device *outdev,
@@ -369,24 +383,28 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
 extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
 void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
 extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
+#else
+static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
+#endif
 
 struct nf_conn;
 enum ip_conntrack_info;
 struct nlattr;
 
-struct nfq_ct_hook {
+struct nfnl_ct_hook {
+       struct nf_conn *(*get_ct)(const struct sk_buff *skb,
+                                 enum ip_conntrack_info *ctinfo);
        size_t (*build_size)(const struct nf_conn *ct);
-       int (*build)(struct sk_buff *skb, struct nf_conn *ct);
+       int (*build)(struct sk_buff *skb, struct nf_conn *ct,
+                    enum ip_conntrack_info ctinfo,
+                    u_int16_t ct_attr, u_int16_t ct_info_attr);
        int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
        int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct,
                             u32 portid, u32 report);
        void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
                           enum ip_conntrack_info ctinfo, s32 off);
 };
-extern struct nfq_ct_hook __rcu *nfq_ct_hook;
-#else
-static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
-#endif
+extern struct nfnl_ct_hook __rcu *nfnl_ct_hook;
 
 /**
  * nf_skb_duplicated - TEE target has sent a packet
diff --git a/include/linux/overflow-arith.h b/include/linux/overflow-arith.h
new file mode 100644 (file)
index 0000000..e12ccf8
--- /dev/null
@@ -0,0 +1,18 @@
+#pragma once
+
+#include <linux/kernel.h>
+
+#ifdef CC_HAVE_BUILTIN_OVERFLOW
+
+#define overflow_usub __builtin_usub_overflow
+
+#else
+
+static inline bool overflow_usub(unsigned int a, unsigned int b,
+                                unsigned int *res)
+{
+       *res = a - b;
+       return *res > a ? true : false;
+}
+
+#endif
index 4c477e6ece33356530da3d38210df2820d2be04d..05fde31b6dc6dbe2f97356cea09a961dc1cd3af0 100644 (file)
@@ -213,7 +213,9 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
 void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
 struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
 int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
+int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum);
 int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val);
 
 
 #define PHY_INTERRUPT_DISABLED 0x0
index 527a85c6192443a50ca8846f24fe40d7e726eca9..c121ddf74f7ff403696ecfe9521b84ab71bb4b0c 100644 (file)
@@ -74,11 +74,6 @@ struct atmel_uart_data {
        struct serial_rs485     rs485;          /* rs485 settings */
 };
 
-/* CAN */
-struct at91_can_data {
-       void (*transceiver_switch)(int on);
-};
-
 /* FIXME: this needs a better location, but gets stuff building again */
 extern int at91_suspend_entering_slow_clock(void);
 
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
new file mode 100644 (file)
index 0000000..6a43476
--- /dev/null
@@ -0,0 +1,607 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __COMMON_HSI__
+#define __COMMON_HSI__
+
+#define FW_MAJOR_VERSION       8
+#define FW_MINOR_VERSION       4
+#define FW_REVISION_VERSION    2
+#define FW_ENGINEERING_VERSION 0
+
+/***********************/
+/* COMMON HW CONSTANTS */
+/***********************/
+
+/* PCI functions */
+#define MAX_NUM_PORTS_K2       (4)
+#define MAX_NUM_PORTS_BB       (2)
+#define MAX_NUM_PORTS          (MAX_NUM_PORTS_K2)
+
+#define MAX_NUM_PFS_K2 (16)
+#define MAX_NUM_PFS_BB (8)
+#define MAX_NUM_PFS    (MAX_NUM_PFS_K2)
+#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
+
+#define MAX_NUM_VFS_K2 (192)
+#define MAX_NUM_VFS_BB (120)
+#define MAX_NUM_VFS    (MAX_NUM_VFS_K2)
+
+#define MAX_NUM_FUNCTIONS_BB   (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS      (MAX_NUM_PFS + MAX_NUM_VFS)
+
+#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
+#define MAX_FUNCTION_NUMBER    (MAX_NUM_PFS + MAX_NUM_VFS)
+
+#define MAX_NUM_VPORTS_K2      (208)
+#define MAX_NUM_VPORTS_BB      (160)
+#define MAX_NUM_VPORTS         (MAX_NUM_VPORTS_K2)
+
+#define MAX_NUM_L2_QUEUES_K2   (320)
+#define MAX_NUM_L2_QUEUES_BB   (256)
+#define MAX_NUM_L2_QUEUES      (MAX_NUM_L2_QUEUES_K2)
+
+/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
+#define NUM_PHYS_TCS_4PORT_K2  (4)
+#define NUM_OF_PHYS_TCS                (8)
+
+#define NUM_TCS_4PORT_K2       (NUM_PHYS_TCS_4PORT_K2 + 1)
+#define NUM_OF_TCS             (NUM_OF_PHYS_TCS + 1)
+
+#define LB_TC                  (NUM_OF_PHYS_TCS)
+
+/* Num of possible traffic priority values */
+#define NUM_OF_PRIO            (8)
+
+#define MAX_NUM_VOQS_K2                (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
+#define MAX_NUM_VOQS_BB                (NUM_OF_TCS * MAX_NUM_PORTS_BB)
+#define MAX_NUM_VOQS           (MAX_NUM_VOQS_K2)
+#define MAX_PHYS_VOQS          (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
+
+/* CIDs */
+#define NUM_OF_CONNECTION_TYPES        (8)
+#define NUM_OF_LCIDS           (320)
+#define NUM_OF_LTIDS           (320)
+
+/*****************/
+/* CDU CONSTANTS */
+/*****************/
+
+#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT              (17)
+#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK             (0x1ffff)
+
+/*****************/
+/* DQ CONSTANTS  */
+/*****************/
+
+/* DEMS */
+#define DQ_DEMS_LEGACY                 0
+
+/* XCM agg val selection */
+#define DQ_XCM_AGG_VAL_SEL_WORD2  0
+#define DQ_XCM_AGG_VAL_SEL_WORD3  1
+#define DQ_XCM_AGG_VAL_SEL_WORD4  2
+#define DQ_XCM_AGG_VAL_SEL_WORD5  3
+#define DQ_XCM_AGG_VAL_SEL_REG3   4
+#define DQ_XCM_AGG_VAL_SEL_REG4   5
+#define DQ_XCM_AGG_VAL_SEL_REG5   6
+#define DQ_XCM_AGG_VAL_SEL_REG6   7
+
+/* XCM agg val selection */
+#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \
+       DQ_XCM_AGG_VAL_SEL_WORD2
+#define DQ_XCM_ETH_TX_BD_CONS_CMD \
+       DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_CORE_TX_BD_CONS_CMD \
+       DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ETH_TX_BD_PROD_CMD \
+       DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_TX_BD_PROD_CMD \
+       DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_SPQ_PROD_CMD \
+       DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD            DQ_XCM_AGG_VAL_SEL_WORD5
+
+/* XCM agg counter flag selection */
+#define DQ_XCM_AGG_FLG_SHIFT_BIT14  0
+#define DQ_XCM_AGG_FLG_SHIFT_BIT15  1
+#define DQ_XCM_AGG_FLG_SHIFT_CF12   2
+#define DQ_XCM_AGG_FLG_SHIFT_CF13   3
+#define DQ_XCM_AGG_FLG_SHIFT_CF18   4
+#define DQ_XCM_AGG_FLG_SHIFT_CF19   5
+#define DQ_XCM_AGG_FLG_SHIFT_CF22   6
+#define DQ_XCM_AGG_FLG_SHIFT_CF23   7
+
+/* XCM agg counter flag selection */
+#define DQ_XCM_ETH_DQ_CF_CMD           (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_DQ_CF_CMD          (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD       (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_TERMINATE_CMD      (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD       (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_CORE_SLOW_PATH_CMD      (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD          (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF23)
+
+/*****************/
+/* QM CONSTANTS  */
+/*****************/
+
+/* number of TX queues in the QM */
+#define MAX_QM_TX_QUEUES_K2    512
+#define MAX_QM_TX_QUEUES_BB    448
+#define MAX_QM_TX_QUEUES       MAX_QM_TX_QUEUES_K2
+
+/* number of Other queues in the QM */
+#define MAX_QM_OTHER_QUEUES_BB 64
+#define MAX_QM_OTHER_QUEUES_K2 128
+#define MAX_QM_OTHER_QUEUES    MAX_QM_OTHER_QUEUES_K2
+
+/* number of queues in a PF queue group */
+#define QM_PF_QUEUE_GROUP_SIZE 8
+
+/* base number of Tx PQs in the CM PQ representation.
+ * should be used when storing PQ IDs in CM PQ registers and context
+ */
+#define CM_TX_PQ_BASE  0x200
+
+/* QM registers data */
+#define QM_LINE_CRD_REG_WIDTH          16
+#define QM_LINE_CRD_REG_SIGN_BIT       (1 << (QM_LINE_CRD_REG_WIDTH - 1))
+#define QM_BYTE_CRD_REG_WIDTH          24
+#define QM_BYTE_CRD_REG_SIGN_BIT       (1 << (QM_BYTE_CRD_REG_WIDTH - 1))
+#define QM_WFQ_CRD_REG_WIDTH           32
+#define QM_WFQ_CRD_REG_SIGN_BIT                (1 << (QM_WFQ_CRD_REG_WIDTH - 1))
+#define QM_RL_CRD_REG_WIDTH            32
+#define QM_RL_CRD_REG_SIGN_BIT         (1 << (QM_RL_CRD_REG_WIDTH - 1))
+
+/*****************/
+/* CAU CONSTANTS */
+/*****************/
+
+#define CAU_FSM_ETH_RX  0
+#define CAU_FSM_ETH_TX  1
+
+/* Number of Protocol Indices per Status Block */
+#define PIS_PER_SB    12
+
+#define CAU_HC_STOPPED_STATE   3
+#define CAU_HC_DISABLE_STATE   4
+#define CAU_HC_ENABLE_STATE    0
+
+/*****************/
+/* IGU CONSTANTS */
+/*****************/
+
+#define MAX_SB_PER_PATH_K2     (368)
+#define MAX_SB_PER_PATH_BB     (288)
+#define MAX_TOT_SB_PER_PATH \
+       MAX_SB_PER_PATH_K2
+
+#define MAX_SB_PER_PF_MIMD     129
+#define MAX_SB_PER_PF_SIMD     64
+#define MAX_SB_PER_VF          64
+
+/* Memory addresses on the BAR for the IGU Sub Block */
+#define IGU_MEM_BASE                   0x0000
+
+#define IGU_MEM_MSIX_BASE              0x0000
+#define IGU_MEM_MSIX_UPPER             0x0101
+#define IGU_MEM_MSIX_RESERVED_UPPER    0x01ff
+
+#define IGU_MEM_PBA_MSIX_BASE          0x0200
+#define IGU_MEM_PBA_MSIX_UPPER         0x0202
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER        0x03ff
+
+#define IGU_CMD_INT_ACK_BASE           0x0400
+#define IGU_CMD_INT_ACK_UPPER          (IGU_CMD_INT_ACK_BASE + \
+                                        MAX_TOT_SB_PER_PATH -  \
+                                        1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
+
+#define IGU_CMD_ATTN_BIT_UPD_UPPER     0x05f0
+#define IGU_CMD_ATTN_BIT_SET_UPPER     0x05f1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER     0x05f2
+
+#define IGU_REG_SISR_MDPC_WMASK_UPPER          0x05f3
+#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER      0x05f4
+#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER      0x05f5
+#define IGU_REG_SISR_MDPC_WOMASK_UPPER         0x05f6
+
+#define IGU_CMD_PROD_UPD_BASE                  0x0600
+#define IGU_CMD_PROD_UPD_UPPER                 (IGU_CMD_PROD_UPD_BASE +\
+                                                MAX_TOT_SB_PER_PATH - \
+                                                1)
+#define IGU_CMD_PROD_UPD_RESERVED_UPPER                0x07ff
+
+/*****************/
+/* PXP CONSTANTS */
+/*****************/
+
+/* PTT and GTT */
+#define PXP_NUM_PF_WINDOWS             12
+#define PXP_PER_PF_ENTRY_SIZE          8
+#define PXP_NUM_GLOBAL_WINDOWS         243
+#define PXP_GLOBAL_ENTRY_SIZE          4
+#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH        4
+#define PXP_PF_WINDOW_ADMIN_START      0
+#define PXP_PF_WINDOW_ADMIN_LENGTH     0x1000
+#define PXP_PF_WINDOW_ADMIN_END                (PXP_PF_WINDOW_ADMIN_START + \
+                                        PXP_PF_WINDOW_ADMIN_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_START       0
+#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH      (PXP_NUM_PF_WINDOWS * \
+                                                PXP_PER_PF_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \
+                                        PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_START       0x200
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH      (PXP_NUM_GLOBAL_WINDOWS * \
+                                                PXP_GLOBAL_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \
+               (PXP_PF_WINDOW_ADMIN_GLOBAL_START + \
+                PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1)
+#define PXP_PF_GLOBAL_PRETEND_ADDR     0x1f0
+#define PXP_PF_ME_OPAQUE_MASK_ADDR     0xf4
+#define PXP_PF_ME_OPAQUE_ADDR          0x1f8
+#define PXP_PF_ME_CONCRETE_ADDR                0x1fc
+
+#define PXP_EXTERNAL_BAR_PF_WINDOW_START       0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM         PXP_NUM_PF_WINDOWS
+#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \
+       (PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \
+        PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_PF_WINDOW_END \
+       (PXP_EXTERNAL_BAR_PF_WINDOW_START + \
+        PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1)
+
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \
+       (PXP_EXTERNAL_BAR_PF_WINDOW_END + 1)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM             PXP_NUM_GLOBAL_WINDOWS
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE     0x1000
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \
+       (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \
+        PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \
+       (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
+        PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER        1024
+
+/* ILT Records */
+#define PXP_NUM_ILT_RECORDS_BB 7600
+#define PXP_NUM_ILT_RECORDS_K2 11000
+#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
+
+/******************/
+/* PBF CONSTANTS  */
+/******************/
+
+/* Number of PBF command queue lines. Each line is 32B. */
+#define PBF_MAX_CMD_LINES 3328
+
+/* Number of BTB blocks. Each block is 256B. */
+#define BTB_MAX_BLOCKS 1440
+
+/*****************/
+/* PRS CONSTANTS */
+/*****************/
+
+/* Async data KCQ CQE */
+struct async_data {
+       __le32  cid;
+       __le16  itid;
+       u8      error_code;
+       u8      fw_debug_param;
+};
+
+struct regpair {
+       __le32  lo;
+       __le32  hi;
+};
+
+/* Event Data Union */
+union event_ring_data {
+       u8                              bytes[8];
+       struct async_data               async_info;
+};
+
+/* Event Ring Entry */
+struct event_ring_entry {
+       u8                      protocol_id;
+       u8                      opcode;
+       __le16                  reserved0;
+       __le16                  echo;
+       u8                      fw_return_code;
+       u8                      flags;
+#define EVENT_RING_ENTRY_ASYNC_MASK      0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT     0
+#define EVENT_RING_ENTRY_RESERVED1_MASK  0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
+       union event_ring_data   data;
+};
+
+/* Multi function mode */
+enum mf_mode {
+       SF,
+       MF_OVLAN,
+       MF_NPAR,
+       MAX_MF_MODE
+};
+
+/* Per-protocol connection types */
+enum protocol_type {
+       PROTOCOLID_RESERVED1,
+       PROTOCOLID_RESERVED2,
+       PROTOCOLID_RESERVED3,
+       PROTOCOLID_CORE,
+       PROTOCOLID_ETH,
+       PROTOCOLID_RESERVED4,
+       PROTOCOLID_RESERVED5,
+       PROTOCOLID_PREROCE,
+       PROTOCOLID_COMMON,
+       PROTOCOLID_RESERVED6,
+       MAX_PROTOCOL_TYPE
+};
+
+/* status block structure */
+struct cau_pi_entry {
+       u32 prod;
+#define CAU_PI_ENTRY_PROD_VAL_MASK    0xFFFF
+#define CAU_PI_ENTRY_PROD_VAL_SHIFT   0
+#define CAU_PI_ENTRY_PI_TIMESET_MASK  0x7F
+#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
+#define CAU_PI_ENTRY_FSM_SEL_MASK     0x1
+#define CAU_PI_ENTRY_FSM_SEL_SHIFT    23
+#define CAU_PI_ENTRY_RESERVED_MASK    0xFF
+#define CAU_PI_ENTRY_RESERVED_SHIFT   24
+};
+
+/* status block structure */
+struct cau_sb_entry {
+       u32 data;
+#define CAU_SB_ENTRY_SB_PROD_MASK      0xFFFFFF
+#define CAU_SB_ENTRY_SB_PROD_SHIFT     0
+#define CAU_SB_ENTRY_STATE0_MASK       0xF
+#define CAU_SB_ENTRY_STATE0_SHIFT      24
+#define CAU_SB_ENTRY_STATE1_MASK       0xF
+#define CAU_SB_ENTRY_STATE1_SHIFT      28
+       u32 params;
+#define CAU_SB_ENTRY_SB_TIMESET0_MASK  0x7F
+#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
+#define CAU_SB_ENTRY_SB_TIMESET1_MASK  0x7F
+#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
+#define CAU_SB_ENTRY_TIMER_RES0_MASK   0x3
+#define CAU_SB_ENTRY_TIMER_RES0_SHIFT  14
+#define CAU_SB_ENTRY_TIMER_RES1_MASK   0x3
+#define CAU_SB_ENTRY_TIMER_RES1_SHIFT  16
+#define CAU_SB_ENTRY_VF_NUMBER_MASK    0xFF
+#define CAU_SB_ENTRY_VF_NUMBER_SHIFT   18
+#define CAU_SB_ENTRY_VF_VALID_MASK     0x1
+#define CAU_SB_ENTRY_VF_VALID_SHIFT    26
+#define CAU_SB_ENTRY_PF_NUMBER_MASK    0xF
+#define CAU_SB_ENTRY_PF_NUMBER_SHIFT   27
+#define CAU_SB_ENTRY_TPH_MASK          0x1
+#define CAU_SB_ENTRY_TPH_SHIFT         31
+};
+
+/* core doorbell data */
+struct core_db_data {
+       u8 params;
+#define CORE_DB_DATA_DEST_MASK         0x3
+#define CORE_DB_DATA_DEST_SHIFT        0
+#define CORE_DB_DATA_AGG_CMD_MASK      0x3
+#define CORE_DB_DATA_AGG_CMD_SHIFT     2
+#define CORE_DB_DATA_BYPASS_EN_MASK    0x1
+#define CORE_DB_DATA_BYPASS_EN_SHIFT   4
+#define CORE_DB_DATA_RESERVED_MASK     0x1
+#define CORE_DB_DATA_RESERVED_SHIFT    5
+#define CORE_DB_DATA_AGG_VAL_SEL_MASK  0x3
+#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+       u8      agg_flags;
+       __le16  spq_prod;
+};
+
+/* Enum of doorbell aggregative command selection */
+enum db_agg_cmd_sel {
+       DB_AGG_CMD_NOP,
+       DB_AGG_CMD_SET,
+       DB_AGG_CMD_ADD,
+       DB_AGG_CMD_MAX,
+       MAX_DB_AGG_CMD_SEL
+};
+
+/* Enum of doorbell destination */
+enum db_dest {
+       DB_DEST_XCM,
+       DB_DEST_UCM,
+       DB_DEST_TCM,
+       DB_NUM_DESTINATIONS,
+       MAX_DB_DEST
+};
+
+/* Structure for doorbell address, in legacy mode */
+struct db_legacy_addr {
+       __le32 addr;
+#define DB_LEGACY_ADDR_RESERVED0_MASK  0x3
+#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_ADDR_DEMS_MASK       0x7
+#define DB_LEGACY_ADDR_DEMS_SHIFT      2
+#define DB_LEGACY_ADDR_ICID_MASK       0x7FFFFFF
+#define DB_LEGACY_ADDR_ICID_SHIFT      5
+};
+
+/* Igu interrupt command */
+enum igu_int_cmd {
+       IGU_INT_ENABLE  = 0,
+       IGU_INT_DISABLE = 1,
+       IGU_INT_NOP     = 2,
+       IGU_INT_NOP2    = 3,
+       MAX_IGU_INT_CMD
+};
+
+/* IGU producer or consumer update command */
+struct igu_prod_cons_update {
+       u32 sb_id_and_flags;
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK        0xFFFFFF
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT       0
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK     0x1
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT    24
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK      0x3
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT     25
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK  0x1
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK      0x1
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT     28
+#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK       0x3
+#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT      29
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK    0x1
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT   31
+       u32 reserved1;
+};
+
+/* Igu segments access for default status block only */
+enum igu_seg_access {
+       IGU_SEG_ACCESS_REG      = 0,
+       IGU_SEG_ACCESS_ATTN     = 1,
+       MAX_IGU_SEG_ACCESS
+};
+
+struct parsing_and_err_flags {
+       __le16 flags;
+#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK                      0x3
+#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT                     0
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK                  0x3
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT                 2
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK                    0x1
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT                   4
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK               0x1
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT              5
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK        0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT       6
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK                 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT                7
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK           0x1
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT          8
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK                  0x1
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT                 9
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK                0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT               10
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK                 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT                11
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK         0x1
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT        12
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK            0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT           13
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK  0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK          0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT         15
+};
+
+/* Concrete Function ID. */
+struct pxp_concrete_fid {
+       __le16 fid;
+#define PXP_CONCRETE_FID_PFID_MASK     0xF
+#define PXP_CONCRETE_FID_PFID_SHIFT    0
+#define PXP_CONCRETE_FID_PORT_MASK     0x3
+#define PXP_CONCRETE_FID_PORT_SHIFT    4
+#define PXP_CONCRETE_FID_PATH_MASK     0x1
+#define PXP_CONCRETE_FID_PATH_SHIFT    6
+#define PXP_CONCRETE_FID_VFVALID_MASK  0x1
+#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_CONCRETE_FID_VFID_MASK     0xFF
+#define PXP_CONCRETE_FID_VFID_SHIFT    8
+};
+
+struct pxp_pretend_concrete_fid {
+       __le16 fid;
+#define PXP_PRETEND_CONCRETE_FID_PFID_MASK      0xF
+#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT     0
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK  0x7
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK   0x1
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT  7
+#define PXP_PRETEND_CONCRETE_FID_VFID_MASK      0xFF
+#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT     8
+};
+
+union pxp_pretend_fid {
+       struct pxp_pretend_concrete_fid concrete_fid;
+       __le16                          opaque_fid;
+};
+
+/* Pxp Pretend Command Register. */
+struct pxp_pretend_cmd {
+       union pxp_pretend_fid   fid;
+       __le16                  control;
+#define PXP_PRETEND_CMD_PATH_MASK              0x1
+#define PXP_PRETEND_CMD_PATH_SHIFT             0
+#define PXP_PRETEND_CMD_USE_PORT_MASK          0x1
+#define PXP_PRETEND_CMD_USE_PORT_SHIFT         1
+#define PXP_PRETEND_CMD_PORT_MASK              0x3
+#define PXP_PRETEND_CMD_PORT_SHIFT             2
+#define PXP_PRETEND_CMD_RESERVED0_MASK         0xF
+#define PXP_PRETEND_CMD_RESERVED0_SHIFT        4
+#define PXP_PRETEND_CMD_RESERVED1_MASK         0xF
+#define PXP_PRETEND_CMD_RESERVED1_SHIFT        8
+#define PXP_PRETEND_CMD_PRETEND_PATH_MASK      0x1
+#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT     12
+#define PXP_PRETEND_CMD_PRETEND_PORT_MASK      0x1
+#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT     13
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK  0x1
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
+#define PXP_PRETEND_CMD_IS_CONCRETE_MASK       0x1
+#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT      15
+};
+
+/* PTT Record in PXP Admin Window. */
+struct pxp_ptt_entry {
+       __le32                  offset;
+#define PXP_PTT_ENTRY_OFFSET_MASK     0x7FFFFF
+#define PXP_PTT_ENTRY_OFFSET_SHIFT    0
+#define PXP_PTT_ENTRY_RESERVED0_MASK  0x1FF
+#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
+       struct pxp_pretend_cmd  pretend;
+};
+
+/* RSS hash type */
+enum rss_hash_type {
+       RSS_HASH_TYPE_DEFAULT   = 0,
+       RSS_HASH_TYPE_IPV4      = 1,
+       RSS_HASH_TYPE_TCP_IPV4  = 2,
+       RSS_HASH_TYPE_IPV6      = 3,
+       RSS_HASH_TYPE_TCP_IPV6  = 4,
+       RSS_HASH_TYPE_UDP_IPV4  = 5,
+       RSS_HASH_TYPE_UDP_IPV6  = 6,
+       MAX_RSS_HASH_TYPE
+};
+
+/* status block structure */
+struct status_block {
+       __le16  pi_array[PIS_PER_SB];
+       __le32  sb_num;
+#define STATUS_BLOCK_SB_NUM_MASK      0x1FF
+#define STATUS_BLOCK_SB_NUM_SHIFT     0
+#define STATUS_BLOCK_ZERO_PAD_MASK    0x7F
+#define STATUS_BLOCK_ZERO_PAD_SHIFT   9
+#define STATUS_BLOCK_ZERO_PAD2_MASK   0xFFFF
+#define STATUS_BLOCK_ZERO_PAD2_SHIFT  16
+       __le32 prod_index;
+#define STATUS_BLOCK_PROD_INDEX_MASK  0xFFFFFF
+#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD3_MASK   0xFF
+#define STATUS_BLOCK_ZERO_PAD3_SHIFT  24
+};
+
+#endif /* __COMMON_HSI__ */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
new file mode 100644 (file)
index 0000000..320b337
--- /dev/null
@@ -0,0 +1,279 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __ETH_COMMON__
+#define __ETH_COMMON__
+
+/********************/
+/* ETH FW CONSTANTS */
+/********************/
+#define ETH_CACHE_LINE_SIZE                 64
+
+#define ETH_MAX_RAMROD_PER_CON                          8
+#define ETH_TX_BD_PAGE_SIZE_BYTES                       4096
+#define ETH_RX_BD_PAGE_SIZE_BYTES                       4096
+#define ETH_RX_SGE_PAGE_SIZE_BYTES                      4096
+#define ETH_RX_CQE_PAGE_SIZE_BYTES                      4096
+#define ETH_RX_NUM_NEXT_PAGE_BDS                        2
+#define ETH_RX_NUM_NEXT_PAGE_SGES                       2
+
+#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT                          1
+#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET                       18
+#define ETH_TX_MAX_LSO_HDR_NBD                                          4
+#define ETH_TX_MIN_BDS_PER_LSO_PKT                                      3
+#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT       3
+#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT            2
+#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE          2
+#define ETH_TX_MAX_NON_LSO_PKT_LEN                  (9700 - (4 + 12 + 8))
+#define ETH_TX_MAX_LSO_HDR_BYTES                    510
+
+#define ETH_NUM_STATISTIC_COUNTERS                      MAX_NUM_VPORTS
+
+#define ETH_REG_CQE_PBL_SIZE                3
+
+/* num of MAC/VLAN filters */
+#define ETH_NUM_MAC_FILTERS                                     512
+#define ETH_NUM_VLAN_FILTERS                            512
+
+/* approx. multicast constants */
+#define ETH_MULTICAST_BIN_FROM_MAC_SEED     0
+#define ETH_MULTICAST_MAC_BINS                          256
+#define ETH_MULTICAST_MAC_BINS_IN_REGS          (ETH_MULTICAST_MAC_BINS / 32)
+
+/*  ethernet vport update constants */
+#define ETH_FILTER_RULES_COUNT                          10
+#define ETH_RSS_IND_TABLE_ENTRIES_NUM           128
+#define ETH_RSS_KEY_SIZE_REGS                       10
+#define ETH_RSS_ENGINE_NUM_K2               207
+#define ETH_RSS_ENGINE_NUM_BB               127
+
+/* TPA constants */
+#define ETH_TPA_MAX_AGGS_NUM              64
+#define ETH_TPA_CQE_START_SGL_SIZE        3
+#define ETH_TPA_CQE_CONT_SGL_SIZE         6
+#define ETH_TPA_CQE_END_SGL_SIZE          4
+
+/* Queue Zone sizes */
+#define TSTORM_QZONE_SIZE    0
+#define MSTORM_QZONE_SIZE    sizeof(struct mstorm_eth_queue_zone)
+#define USTORM_QZONE_SIZE    sizeof(struct ustorm_eth_queue_zone)
+#define XSTORM_QZONE_SIZE    0
+#define YSTORM_QZONE_SIZE    sizeof(struct ystorm_eth_queue_zone)
+#define PSTORM_QZONE_SIZE    0
+
+/* Interrupt coalescing TimeSet */
+struct coalescing_timeset {
+       u8      timeset;
+       u8      valid;
+};
+
+struct eth_tx_1st_bd_flags {
+       u8 bitfields;
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK  0x1
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK          0x1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT         1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK          0x1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT         2
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK   0x1
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT  3
+#define ETH_TX_1ST_BD_FLAGS_LSO_MASK              0x1
+#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT             4
+#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK         0x1
+#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT        5
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK     0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT    6
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK     0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT    7
+};
+
+/* The parsing information data fo rthe first tx bd of a given packet. */
+struct eth_tx_data_1st_bd {
+       __le16                          vlan;
+       u8                              nbds;
+       struct eth_tx_1st_bd_flags      bd_flags;
+       __le16                          fw_use_only;
+};
+
+/* The parsing information data for the second tx bd of a given packet. */
+struct eth_tx_data_2nd_bd {
+       __le16  tunn_ip_size;
+       __le16  bitfields;
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK     0x1FFF
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT    0
+#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK                 0x7
+#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT                13
+       __le16  bitfields2;
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK  0xF
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK       0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT      4
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK            0x3
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT           6
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK                 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT                8
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK           0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT          10
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK                  0x1
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT                 11
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK             0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT            12
+#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK                    0x1
+#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT                   13
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK       0x1
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT      14
+#define ETH_TX_DATA_2ND_BD_RESERVED1_MASK                 0x1
+#define ETH_TX_DATA_2ND_BD_RESERVED1_SHIFT                15
+};
+
+/* Regular ETH Rx FP CQE. */
+struct eth_fast_path_rx_reg_cqe {
+       u8      type;
+       u8      bitfields;
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK  0x7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK             0xF
+#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT            3
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK      0x1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT     7
+       __le16                          pkt_len;
+       struct parsing_and_err_flags    pars_flags;
+       __le16                          vlan_tag;
+       __le32                          rss_hash;
+       __le16                          len_on_bd;
+       u8                              placement_offset;
+       u8                              reserved;
+       __le16                          pbl[ETH_REG_CQE_PBL_SIZE];
+       u8                              reserved1[10];
+};
+
+/* The L4 pseudo checksum mode for Ethernet */
+enum eth_l4_pseudo_checksum_mode {
+       ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH,
+       ETH_L4_PSEUDO_CSUM_ZERO_LENGTH,
+       MAX_ETH_L4_PSEUDO_CHECKSUM_MODE
+};
+
+struct eth_rx_bd {
+       struct regpair addr;
+};
+
+/* regular ETH Rx SP CQE */
+struct eth_slow_path_rx_cqe {
+       u8      type;
+       u8      ramrod_cmd_id;
+       u8      error_flag;
+       u8      reserved[27];
+       __le16  echo;
+};
+
+/* union for all ETH Rx CQE types */
+union eth_rx_cqe {
+       struct eth_fast_path_rx_reg_cqe         fast_path_regular;
+       struct eth_slow_path_rx_cqe             slow_path;
+};
+
+/* ETH Rx CQE type */
+enum eth_rx_cqe_type {
+       ETH_RX_CQE_TYPE_UNUSED,
+       ETH_RX_CQE_TYPE_REGULAR,
+       ETH_RX_CQE_TYPE_SLOW_PATH,
+       MAX_ETH_RX_CQE_TYPE
+};
+
+/* ETH Rx producers data */
+struct eth_rx_prod_data {
+       __le16  bd_prod;
+       __le16  sge_prod;
+       __le16  cqe_prod;
+       __le16  reserved;
+};
+
+/* The first tx bd of a given packet */
+struct eth_tx_1st_bd {
+       struct regpair                  addr;
+       __le16                          nbytes;
+       struct eth_tx_data_1st_bd       data;
+};
+
+/* The second tx bd of a given packet */
+struct eth_tx_2nd_bd {
+       struct regpair                  addr;
+       __le16                          nbytes;
+       struct eth_tx_data_2nd_bd       data;
+};
+
+/* The parsing information data for the third tx bd of a given packet. */
+struct eth_tx_data_3rd_bd {
+       __le16  lso_mss;
+       u8      bitfields;
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK  0xF
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK         0xF
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT        4
+       u8      resereved0[3];
+};
+
+/* The third tx bd of a given packet */
+struct eth_tx_3rd_bd {
+       struct regpair                  addr;
+       __le16                          nbytes;
+       struct eth_tx_data_3rd_bd       data;
+};
+
+/* The common non-special TX BD ring element */
+struct eth_tx_bd {
+       struct regpair  addr;
+       __le16          nbytes;
+       __le16          reserved0;
+       __le32          reserved1;
+};
+
+union eth_tx_bd_types {
+       struct eth_tx_1st_bd    first_bd;
+       struct eth_tx_2nd_bd    second_bd;
+       struct eth_tx_3rd_bd    third_bd;
+       struct eth_tx_bd        reg_bd;
+};
+
+/* Mstorm Queue Zone */
+struct mstorm_eth_queue_zone {
+       struct eth_rx_prod_data rx_producers;
+       __le32                  reserved[2];
+};
+
+/* Ustorm Queue Zone */
+struct ustorm_eth_queue_zone {
+       struct coalescing_timeset       int_coalescing_timeset;
+       __le16                          reserved[3];
+};
+
+/* Ystorm Queue Zone */
+struct ystorm_eth_queue_zone {
+       struct coalescing_timeset       int_coalescing_timeset;
+       __le16                          reserved[3];
+};
+
+/* ETH doorbell data */
+struct eth_db_data {
+       u8 params;
+#define ETH_DB_DATA_DEST_MASK         0x3
+#define ETH_DB_DATA_DEST_SHIFT        0
+#define ETH_DB_DATA_AGG_CMD_MASK      0x3
+#define ETH_DB_DATA_AGG_CMD_SHIFT     2
+#define ETH_DB_DATA_BYPASS_EN_MASK    0x1
+#define ETH_DB_DATA_BYPASS_EN_SHIFT   4
+#define ETH_DB_DATA_RESERVED_MASK     0x1
+#define ETH_DB_DATA_RESERVED_SHIFT    5
+#define ETH_DB_DATA_AGG_VAL_SEL_MASK  0x3
+#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
+       u8      agg_flags;
+       __le16  bd_prod;
+};
+
+#endif /* __ETH_COMMON__ */
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
new file mode 100644 (file)
index 0000000..b920c36
--- /dev/null
@@ -0,0 +1,539 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_CHAIN_H
+#define _QED_CHAIN_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/qed/common_hsi.h>
+
+/* dma_addr_t manip */
+#define DMA_LO_LE(x)            cpu_to_le32(lower_32_bits(x))
+#define DMA_HI_LE(x)            cpu_to_le32(upper_32_bits(x))
+
+#define HILO_GEN(hi, lo, type)  ((((type)(hi)) << 32) + (lo))
+#define HILO_DMA(hi, lo)        HILO_GEN(hi, lo, dma_addr_t)
+#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
+#define HILO_DMA_REGPAIR(regpair)       (HILO_DMA(regpair.hi, regpair.lo))
+#define HILO_64_REGPAIR(regpair)        (HILO_64(regpair.hi, regpair.lo))
+
+enum qed_chain_mode {
+       /* Each Page contains a next pointer at its end */
+       QED_CHAIN_MODE_NEXT_PTR,
+
+       /* Chain is a single page (next ptr) is unrequired */
+       QED_CHAIN_MODE_SINGLE,
+
+       /* Page pointers are located in a side list */
+       QED_CHAIN_MODE_PBL,
+};
+
+enum qed_chain_use_mode {
+       QED_CHAIN_USE_TO_PRODUCE,               /* Chain starts empty */
+       QED_CHAIN_USE_TO_CONSUME,               /* Chain starts full */
+       QED_CHAIN_USE_TO_CONSUME_PRODUCE,       /* Chain starts empty */
+};
+
+struct qed_chain_next {
+       struct regpair  next_phys;
+       void            *next_virt;
+};
+
+struct qed_chain_pbl {
+       dma_addr_t      p_phys_table;
+       void            *p_virt_table;
+       u16             prod_page_idx;
+       u16             cons_page_idx;
+};
+
+struct qed_chain {
+       void                    *p_virt_addr;
+       dma_addr_t              p_phys_addr;
+       void                    *p_prod_elem;
+       void                    *p_cons_elem;
+       u16                     page_cnt;
+       enum qed_chain_mode     mode;
+       enum qed_chain_use_mode intended_use; /* used to produce/consume */
+       u16                     capacity; /*< number of _usable_ elements */
+       u16                     size; /* number of elements */
+       u16                     prod_idx;
+       u16                     cons_idx;
+       u16                     elem_per_page;
+       u16                     elem_per_page_mask;
+       u16                     elem_unusable;
+       u16                     usable_per_page;
+       u16                     elem_size;
+       u16                     next_page_mask;
+       struct qed_chain_pbl    pbl;
+};
+
+#define QED_CHAIN_PBL_ENTRY_SIZE        (8)
+#define QED_CHAIN_PAGE_SIZE             (0x1000)
+#define ELEMS_PER_PAGE(elem_size)       (QED_CHAIN_PAGE_SIZE / (elem_size))
+
+#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)     \
+       ((mode == QED_CHAIN_MODE_NEXT_PTR) ?         \
+        (1 + ((sizeof(struct qed_chain_next) - 1) / \
+              (elem_size))) : 0)
+
+#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
+       ((u32)(ELEMS_PER_PAGE(elem_size) -     \
+              UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
+
+#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
+       DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
+
+/* Accessors */
+static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
+{
+       return p_chain->prod_idx;
+}
+
+static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
+{
+       return p_chain->cons_idx;
+}
+
+static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
+{
+       u16 used;
+
+       /* we don't need to trancate upon assignmet, as we assign u32->u16 */
+       used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) -
+               (u32)p_chain->cons_idx;
+       if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+               used -= (used / p_chain->elem_per_page);
+
+       return p_chain->capacity - used;
+}
+
+static inline u8 qed_chain_is_full(struct qed_chain *p_chain)
+{
+       return qed_chain_get_elem_left(p_chain) == p_chain->capacity;
+}
+
+static inline u8 qed_chain_is_empty(struct qed_chain *p_chain)
+{
+       return qed_chain_get_elem_left(p_chain) == 0;
+}
+
+static inline u16 qed_chain_get_elem_per_page(
+       struct qed_chain *p_chain)
+{
+       return p_chain->elem_per_page;
+}
+
+static inline u16 qed_chain_get_usable_per_page(
+       struct qed_chain *p_chain)
+{
+       return p_chain->usable_per_page;
+}
+
+static inline u16 qed_chain_get_unusable_per_page(
+       struct qed_chain *p_chain)
+{
+       return p_chain->elem_unusable;
+}
+
+static inline u16 qed_chain_get_size(struct qed_chain *p_chain)
+{
+       return p_chain->size;
+}
+
+static inline dma_addr_t
+qed_chain_get_pbl_phys(struct qed_chain *p_chain)
+{
+       return p_chain->pbl.p_phys_table;
+}
+
+/**
+ * @brief qed_chain_advance_page -
+ *
+ * Advance the next element accros pages for a linked chain
+ *
+ * @param p_chain
+ * @param p_next_elem
+ * @param idx_to_inc
+ * @param page_to_inc
+ */
+static inline void
+qed_chain_advance_page(struct qed_chain *p_chain,
+                      void **p_next_elem,
+                      u16 *idx_to_inc,
+                      u16 *page_to_inc)
+
+{
+       switch (p_chain->mode) {
+       case QED_CHAIN_MODE_NEXT_PTR:
+       {
+               struct qed_chain_next *p_next = *p_next_elem;
+               *p_next_elem = p_next->next_virt;
+               *idx_to_inc += p_chain->elem_unusable;
+               break;
+       }
+       case QED_CHAIN_MODE_SINGLE:
+               *p_next_elem = p_chain->p_virt_addr;
+               break;
+
+       case QED_CHAIN_MODE_PBL:
+               /* It is assumed pages are sequential, next element needs
+                * to change only when passing going back to first from last.
+                */
+               if (++(*page_to_inc) == p_chain->page_cnt) {
+                       *page_to_inc = 0;
+                       *p_next_elem = p_chain->p_virt_addr;
+               }
+       }
+}
+
+#define is_unusable_idx(p, idx)        \
+       (((p)->idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_next_idx(p, idx) \
+       ((((p)->idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define test_ans_skip(p, idx)                          \
+       do {                                            \
+               if (is_unusable_idx(p, idx)) {          \
+                       (p)->idx += (p)->elem_unusable; \
+               }                                       \
+       } while (0)
+
+/**
+ * @brief qed_chain_return_multi_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ * @param num
+ */
+static inline void
+qed_chain_return_multi_produced(struct qed_chain *p_chain,
+                               u16 num)
+{
+       p_chain->cons_idx += num;
+       test_ans_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief qed_chain_return_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ */
+static inline void qed_chain_return_produced(struct qed_chain *p_chain)
+{
+       p_chain->cons_idx++;
+       test_ans_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief qed_chain_produce -
+ *
+ * A chain in which the driver "Produces" elements should use this to get
+ * a pointer to the next element which can be "Produced". It's driver
+ * responsibility to validate that the chain has room for new element.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to next element
+ */
+static inline void *qed_chain_produce(struct qed_chain *p_chain)
+{
+       void *ret = NULL;
+
+       if ((p_chain->prod_idx & p_chain->elem_per_page_mask) ==
+           p_chain->next_page_mask) {
+               qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+                                      &p_chain->prod_idx,
+                                      &p_chain->pbl.prod_page_idx);
+       }
+
+       ret = p_chain->p_prod_elem;
+       p_chain->prod_idx++;
+       p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
+                                       p_chain->elem_size);
+
+       return ret;
+}
+
+/**
+ * @brief qed_chain_get_capacity -
+ *
+ * Get the maximum number of BDs in chain
+ *
+ * @param p_chain
+ * @param num
+ *
+ * @return u16, number of unusable BDs
+ */
+static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain)
+{
+       return p_chain->capacity;
+}
+
+/**
+ * @brief qed_chain_recycle_consumed -
+ *
+ * Returns an element which was previously consumed;
+ * Increments producers so they could be written to FW.
+ *
+ * @param p_chain
+ */
+static inline void
+qed_chain_recycle_consumed(struct qed_chain *p_chain)
+{
+       test_ans_skip(p_chain, prod_idx);
+       p_chain->prod_idx++;
+}
+
+/**
+ * @brief qed_chain_consume -
+ *
+ * A Chain in which the driver utilizes data written by a different source
+ * (i.e., FW) should use this to access passed buffers.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to the next buffer written
+ */
+static inline void *qed_chain_consume(struct qed_chain *p_chain)
+{
+       void *ret = NULL;
+
+       if ((p_chain->cons_idx & p_chain->elem_per_page_mask) ==
+           p_chain->next_page_mask) {
+               qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+                                      &p_chain->cons_idx,
+                                      &p_chain->pbl.cons_page_idx);
+       }
+
+       ret = p_chain->p_cons_elem;
+       p_chain->cons_idx++;
+       p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
+                                       p_chain->elem_size);
+
+       return ret;
+}
+
+/**
+ * @brief qed_chain_reset - Resets the chain to its start state
+ *
+ * @param p_chain pointer to a previously allocted chain
+ */
+static inline void qed_chain_reset(struct qed_chain *p_chain)
+{
+       int i;
+
+       p_chain->prod_idx       = 0;
+       p_chain->cons_idx       = 0;
+       p_chain->p_cons_elem    = p_chain->p_virt_addr;
+       p_chain->p_prod_elem    = p_chain->p_virt_addr;
+
+       if (p_chain->mode == QED_CHAIN_MODE_PBL) {
+               p_chain->pbl.prod_page_idx      = p_chain->page_cnt - 1;
+               p_chain->pbl.cons_page_idx      = p_chain->page_cnt - 1;
+       }
+
+       switch (p_chain->intended_use) {
+       case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
+       case QED_CHAIN_USE_TO_PRODUCE:
+               /* Do nothing */
+               break;
+
+       case QED_CHAIN_USE_TO_CONSUME:
+               /* produce empty elements */
+               for (i = 0; i < p_chain->capacity; i++)
+                       qed_chain_recycle_consumed(p_chain);
+               break;
+       }
+}
+
+/**
+ * @brief qed_chain_init - Initalizes a basic chain struct
+ *
+ * @param p_chain
+ * @param p_virt_addr
+ * @param p_phys_addr  physical address of allocated buffer's beginning
+ * @param page_cnt     number of pages in the allocated buffer
+ * @param elem_size    size of each element in the chain
+ * @param intended_use
+ * @param mode
+ */
+static inline void qed_chain_init(struct qed_chain *p_chain,
+                                 void *p_virt_addr,
+                                 dma_addr_t p_phys_addr,
+                                 u16 page_cnt,
+                                 u8 elem_size,
+                                 enum qed_chain_use_mode intended_use,
+                                 enum qed_chain_mode mode)
+{
+       /* chain fixed parameters */
+       p_chain->p_virt_addr    = p_virt_addr;
+       p_chain->p_phys_addr    = p_phys_addr;
+       p_chain->elem_size      = elem_size;
+       p_chain->page_cnt       = page_cnt;
+       p_chain->mode           = mode;
+
+       p_chain->intended_use           = intended_use;
+       p_chain->elem_per_page          = ELEMS_PER_PAGE(elem_size);
+       p_chain->usable_per_page =
+               USABLE_ELEMS_PER_PAGE(elem_size, mode);
+       p_chain->capacity               = p_chain->usable_per_page * page_cnt;
+       p_chain->size                   = p_chain->elem_per_page * page_cnt;
+       p_chain->elem_per_page_mask     = p_chain->elem_per_page - 1;
+
+       p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
+
+       p_chain->next_page_mask = (p_chain->usable_per_page &
+                                  p_chain->elem_per_page_mask);
+
+       if (mode == QED_CHAIN_MODE_NEXT_PTR) {
+               struct qed_chain_next   *p_next;
+               u16                     i;
+
+               for (i = 0; i < page_cnt - 1; i++) {
+                       /* Increment mem_phy to the next page. */
+                       p_phys_addr += QED_CHAIN_PAGE_SIZE;
+
+                       /* Initialize the physical address of the next page. */
+                       p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
+                                                          elem_size *
+                                                          p_chain->
+                                                          usable_per_page);
+
+                       p_next->next_phys.lo    = DMA_LO_LE(p_phys_addr);
+                       p_next->next_phys.hi    = DMA_HI_LE(p_phys_addr);
+
+                       /* Initialize the virtual address of the next page. */
+                       p_next->next_virt = (void *)((u8 *)p_virt_addr +
+                                                    QED_CHAIN_PAGE_SIZE);
+
+                       /* Move to the next page. */
+                       p_virt_addr = p_next->next_virt;
+               }
+
+               /* Last page's next should point to beginning of the chain */
+               p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
+                                                  elem_size *
+                                                  p_chain->usable_per_page);
+
+               p_next->next_phys.lo    = DMA_LO_LE(p_chain->p_phys_addr);
+               p_next->next_phys.hi    = DMA_HI_LE(p_chain->p_phys_addr);
+               p_next->next_virt       = p_chain->p_virt_addr;
+       }
+       qed_chain_reset(p_chain);
+}
+
+/**
+ * @brief qed_chain_pbl_init - Initalizes a basic pbl chain
+ *        struct
+ * @param p_chain
+ * @param p_virt_addr  virtual address of allocated buffer's beginning
+ * @param p_phys_addr  physical address of allocated buffer's beginning
+ * @param page_cnt     number of pages in the allocated buffer
+ * @param elem_size    size of each element in the chain
+ * @param use_mode
+ * @param p_phys_pbl   pointer to a pre-allocated side table
+ *                      which will hold physical page addresses.
+ * @param p_virt_pbl   pointer to a pre allocated side table
+ *                      which will hold virtual page addresses.
+ */
+static inline void
+qed_chain_pbl_init(struct qed_chain *p_chain,
+                  void *p_virt_addr,
+                  dma_addr_t p_phys_addr,
+                  u16 page_cnt,
+                  u8 elem_size,
+                  enum qed_chain_use_mode use_mode,
+                  dma_addr_t p_phys_pbl,
+                  dma_addr_t *p_virt_pbl)
+{
+       dma_addr_t *p_pbl_dma = p_virt_pbl;
+       int i;
+
+       qed_chain_init(p_chain, p_virt_addr, p_phys_addr, page_cnt,
+                      elem_size, use_mode, QED_CHAIN_MODE_PBL);
+
+       p_chain->pbl.p_phys_table = p_phys_pbl;
+       p_chain->pbl.p_virt_table = p_virt_pbl;
+
+       /* Fill the PBL with physical addresses*/
+       for (i = 0; i < page_cnt; i++) {
+               *p_pbl_dma = p_phys_addr;
+               p_phys_addr += QED_CHAIN_PAGE_SIZE;
+               p_pbl_dma++;
+       }
+}
+
+/**
+ * @brief qed_chain_set_prod - sets the prod to the given
+ *        value
+ *
+ * @param prod_idx
+ * @param p_prod_elem
+ */
+static inline void qed_chain_set_prod(struct qed_chain *p_chain,
+                                     u16 prod_idx,
+                                     void *p_prod_elem)
+{
+       p_chain->prod_idx       = prod_idx;
+       p_chain->p_prod_elem    = p_prod_elem;
+}
+
+/**
+ * @brief qed_chain_get_elem -
+ *
+ * get a pointer to an element represented by absolute idx
+ *
+ * @param p_chain
+ * @assumption p_chain->size is a power of 2
+ *
+ * @return void*, a pointer to next element
+ */
+static inline void *qed_chain_sge_get_elem(struct qed_chain *p_chain,
+                                          u16 idx)
+{
+       void *ret = NULL;
+
+       if (idx >= p_chain->size)
+               return NULL;
+
+       ret = (u8 *)p_chain->p_virt_addr + p_chain->elem_size * idx;
+
+       return ret;
+}
+
+/**
+ * @brief qed_chain_sge_inc_cons_prod
+ *
+ * for sge chains, producer isn't increased serially, the ring
+ * is expected to be full at all times. Once elements are
+ * consumed, they are immediately produced.
+ *
+ * @param p_chain
+ * @param cnt
+ *
+ * @return inline void
+ */
+static inline void
+qed_chain_sge_inc_cons_prod(struct qed_chain *p_chain,
+                           u16 cnt)
+{
+       p_chain->prod_idx += cnt;
+       p_chain->cons_idx += cnt;
+}
+
+#endif
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
new file mode 100644 (file)
index 0000000..81ab178
--- /dev/null
@@ -0,0 +1,165 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_ETH_IF_H
+#define _QED_ETH_IF_H
+
+#include <linux/list.h>
+#include <linux/if_link.h>
+#include <linux/qed/eth_common.h>
+#include <linux/qed/qed_if.h>
+
+struct qed_dev_eth_info {
+       struct qed_dev_info common;
+
+       u8      num_queues;
+       u8      num_tc;
+
+       u8      port_mac[ETH_ALEN];
+       u8      num_vlan_filters;
+};
+
+struct qed_update_vport_rss_params {
+       u16     rss_ind_table[128];
+       u32     rss_key[10];
+};
+
+struct qed_update_vport_params {
+       u8 vport_id;
+       u8 update_vport_active_flg;
+       u8 vport_active_flg;
+       u8 update_rss_flg;
+       struct qed_update_vport_rss_params rss_params;
+};
+
+struct qed_stop_rxq_params {
+       u8 rss_id;
+       u8 rx_queue_id;
+       u8 vport_id;
+       bool eq_completion_only;
+};
+
+struct qed_stop_txq_params {
+       u8 rss_id;
+       u8 tx_queue_id;
+};
+
+enum qed_filter_rx_mode_type {
+       QED_FILTER_RX_MODE_TYPE_REGULAR,
+       QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
+       QED_FILTER_RX_MODE_TYPE_PROMISC,
+};
+
+enum qed_filter_xcast_params_type {
+       QED_FILTER_XCAST_TYPE_ADD,
+       QED_FILTER_XCAST_TYPE_DEL,
+       QED_FILTER_XCAST_TYPE_REPLACE,
+};
+
+struct qed_filter_ucast_params {
+       enum qed_filter_xcast_params_type type;
+       u8 vlan_valid;
+       u16 vlan;
+       u8 mac_valid;
+       unsigned char mac[ETH_ALEN];
+};
+
+struct qed_filter_mcast_params {
+       enum qed_filter_xcast_params_type type;
+       u8 num;
+       unsigned char mac[64][ETH_ALEN];
+};
+
+union qed_filter_type_params {
+       enum qed_filter_rx_mode_type accept_flags;
+       struct qed_filter_ucast_params ucast;
+       struct qed_filter_mcast_params mcast;
+};
+
+enum qed_filter_type {
+       QED_FILTER_TYPE_UCAST,
+       QED_FILTER_TYPE_MCAST,
+       QED_FILTER_TYPE_RX_MODE,
+       QED_MAX_FILTER_TYPES,
+};
+
+struct qed_filter_params {
+       enum qed_filter_type type;
+       union qed_filter_type_params filter;
+};
+
+struct qed_queue_start_common_params {
+       u8 rss_id;
+       u8 queue_id;
+       u8 vport_id;
+       u16 sb;
+       u16 sb_idx;
+};
+
+struct qed_eth_cb_ops {
+       struct qed_common_cb_ops common;
+};
+
+struct qed_eth_ops {
+       const struct qed_common_ops *common;
+
+       int (*fill_dev_info)(struct qed_dev *cdev,
+                            struct qed_dev_eth_info *info);
+
+       void (*register_ops)(struct qed_dev *cdev,
+                            struct qed_eth_cb_ops *ops,
+                            void *cookie);
+
+       int (*vport_start)(struct qed_dev *cdev,
+                          u8 vport_id, u16 mtu,
+                          u8 drop_ttl0_flg,
+                          u8 inner_vlan_removal_en_flg);
+
+       int (*vport_stop)(struct qed_dev *cdev,
+                         u8 vport_id);
+
+       int (*vport_update)(struct qed_dev *cdev,
+                           struct qed_update_vport_params *params);
+
+       int (*q_rx_start)(struct qed_dev *cdev,
+                         struct qed_queue_start_common_params *params,
+                         u16 bd_max_bytes,
+                         dma_addr_t bd_chain_phys_addr,
+                         dma_addr_t cqe_pbl_addr,
+                         u16 cqe_pbl_size,
+                         void __iomem **pp_prod);
+
+       int (*q_rx_stop)(struct qed_dev *cdev,
+                        struct qed_stop_rxq_params *params);
+
+       int (*q_tx_start)(struct qed_dev *cdev,
+                         struct qed_queue_start_common_params *params,
+                         dma_addr_t pbl_addr,
+                         u16 pbl_size,
+                         void __iomem **pp_doorbell);
+
+       int (*q_tx_stop)(struct qed_dev *cdev,
+                        struct qed_stop_txq_params *params);
+
+       int (*filter_config)(struct qed_dev *cdev,
+                            struct qed_filter_params *params);
+
+       int (*fastpath_stop)(struct qed_dev *cdev);
+
+       int (*eth_cqe_completion)(struct qed_dev *cdev,
+                                 u8 rss_id,
+                                 struct eth_slow_path_rx_cqe *cqe);
+
+       void (*get_vport_stats)(struct qed_dev *cdev,
+                               struct qed_eth_stats *stats);
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(u32 version);
+void qed_put_eth_ops(void);
+
+#endif
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
new file mode 100644 (file)
index 0000000..dc9a135
--- /dev/null
@@ -0,0 +1,498 @@
+/* QLogic qed NIC Driver
+ *
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_IF_H
+#define _QED_IF_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/qed_chain.h>
+
+#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
+                                           (void __iomem *)(reg_addr))
+
+#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
+
+#define QED_COALESCE_MAX 0xFF
+
+/* forward */
+struct qed_dev;
+
+struct qed_eth_pf_params {
+       /* The following parameters are used during HW-init
+        * and these parameters need to be passed as arguments
+        * to update_pf_params routine invoked before slowpath start
+        */
+       u16 num_cons;
+};
+
+struct qed_pf_params {
+       struct qed_eth_pf_params eth_pf_params;
+};
+
+enum qed_int_mode {
+       QED_INT_MODE_INTA,
+       QED_INT_MODE_MSIX,
+       QED_INT_MODE_MSI,
+       QED_INT_MODE_POLL,
+};
+
+struct qed_sb_info {
+       struct status_block     *sb_virt;
+       dma_addr_t              sb_phys;
+       u32                     sb_ack; /* Last given ack */
+       u16                     igu_sb_id;
+       void __iomem            *igu_addr;
+       u8                      flags;
+#define QED_SB_INFO_INIT        0x1
+#define QED_SB_INFO_SETUP       0x2
+
+       struct qed_dev          *cdev;
+};
+
+struct qed_dev_info {
+       unsigned long   pci_mem_start;
+       unsigned long   pci_mem_end;
+       unsigned int    pci_irq;
+       u8              num_hwfns;
+
+       u8              hw_mac[ETH_ALEN];
+       bool            is_mf;
+
+       /* FW version */
+       u16             fw_major;
+       u16             fw_minor;
+       u16             fw_rev;
+       u16             fw_eng;
+
+       /* MFW version */
+       u32             mfw_rev;
+
+       u32             flash_size;
+       u8              mf_mode;
+};
+
+enum qed_sb_type {
+       QED_SB_TYPE_L2_QUEUE,
+};
+
+enum qed_protocol {
+       QED_PROTOCOL_ETH,
+};
+
+struct qed_link_params {
+       bool    link_up;
+
+#define QED_LINK_OVERRIDE_SPEED_AUTONEG         BIT(0)
+#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS      BIT(1)
+#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED    BIT(2)
+#define QED_LINK_OVERRIDE_PAUSE_CONFIG          BIT(3)
+       u32     override_flags;
+       bool    autoneg;
+       u32     adv_speeds;
+       u32     forced_speed;
+#define QED_LINK_PAUSE_AUTONEG_ENABLE           BIT(0)
+#define QED_LINK_PAUSE_RX_ENABLE                BIT(1)
+#define QED_LINK_PAUSE_TX_ENABLE                BIT(2)
+       u32     pause_config;
+};
+
+struct qed_link_output {
+       bool    link_up;
+
+       u32     supported_caps;         /* In SUPPORTED defs */
+       u32     advertised_caps;        /* In ADVERTISED defs */
+       u32     lp_caps;                /* In ADVERTISED defs */
+       u32     speed;                  /* In Mb/s */
+       u8      duplex;                 /* In DUPLEX defs */
+       u8      port;                   /* In PORT defs */
+       bool    autoneg;
+       u32     pause_config;
+};
+
+#define QED_DRV_VER_STR_SIZE 12
+struct qed_slowpath_params {
+       u32     int_mode;
+       u8      drv_major;
+       u8      drv_minor;
+       u8      drv_rev;
+       u8      drv_eng;
+       u8      name[QED_DRV_VER_STR_SIZE];
+};
+
+#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
+
+struct qed_int_info {
+       struct msix_entry       *msix;
+       u8                      msix_cnt;
+
+       /* This should be updated by the protocol driver */
+       u8                      used_cnt;
+};
+
+struct qed_common_cb_ops {
+       void    (*link_update)(void                     *dev,
+                              struct qed_link_output   *link);
+};
+
+struct qed_common_ops {
+       struct qed_dev* (*probe)(struct pci_dev *dev,
+                                enum qed_protocol protocol,
+                                u32 dp_module, u8 dp_level);
+
+       void            (*remove)(struct qed_dev *cdev);
+
+       int             (*set_power_state)(struct qed_dev *cdev,
+                                          pci_power_t state);
+
+       void            (*set_id)(struct qed_dev *cdev,
+                                 char name[],
+                                 char ver_str[]);
+
+       /* Client drivers need to make this call before slowpath_start.
+        * PF params required for the call before slowpath_start is
+        * documented within the qed_pf_params structure definition.
+        */
+       void            (*update_pf_params)(struct qed_dev *cdev,
+                                           struct qed_pf_params *params);
+       int             (*slowpath_start)(struct qed_dev *cdev,
+                                         struct qed_slowpath_params *params);
+
+       int             (*slowpath_stop)(struct qed_dev *cdev);
+
+       /* Requests to use `cnt' interrupts for fastpath.
+        * upon success, returns number of interrupts allocated for fastpath.
+        */
+       int             (*set_fp_int)(struct qed_dev *cdev,
+                                     u16 cnt);
+
+       /* Fills `info' with pointers required for utilizing interrupts */
+       int             (*get_fp_int)(struct qed_dev *cdev,
+                                     struct qed_int_info *info);
+
+       u32             (*sb_init)(struct qed_dev *cdev,
+                                  struct qed_sb_info *sb_info,
+                                  void *sb_virt_addr,
+                                  dma_addr_t sb_phy_addr,
+                                  u16 sb_id,
+                                  enum qed_sb_type type);
+
+       u32             (*sb_release)(struct qed_dev *cdev,
+                                     struct qed_sb_info *sb_info,
+                                     u16 sb_id);
+
+       void            (*simd_handler_config)(struct qed_dev *cdev,
+                                              void *token,
+                                              int index,
+                                              void (*handler)(void *));
+
+       void            (*simd_handler_clean)(struct qed_dev *cdev,
+                                             int index);
+/**
+ * @brief set_link - set links according to params
+ *
+ * @param cdev
+ * @param params - values used to override the default link configuration
+ *
+ * @return 0 on success, error otherwise.
+ */
+       int             (*set_link)(struct qed_dev *cdev,
+                                   struct qed_link_params *params);
+
+/**
+ * @brief get_link - returns the current link state.
+ *
+ * @param cdev
+ * @param if_link - structure to be filled with current link configuration.
+ */
+       void            (*get_link)(struct qed_dev *cdev,
+                                   struct qed_link_output *if_link);
+
+/**
+ * @brief - drains chip in case Tx completions fail to arrive due to pause.
+ *
+ * @param cdev
+ */
+       int             (*drain)(struct qed_dev *cdev);
+
+/**
+ * @brief update_msglvl - update module debug level
+ *
+ * @param cdev
+ * @param dp_module
+ * @param dp_level
+ */
+       void            (*update_msglvl)(struct qed_dev *cdev,
+                                        u32 dp_module,
+                                        u8 dp_level);
+
+       int             (*chain_alloc)(struct qed_dev *cdev,
+                                      enum qed_chain_use_mode intended_use,
+                                      enum qed_chain_mode mode,
+                                      u16 num_elems,
+                                      size_t elem_size,
+                                      struct qed_chain *p_chain);
+
+       void            (*chain_free)(struct qed_dev *cdev,
+                                     struct qed_chain *p_chain);
+};
+
+/**
+ * @brief qed_get_protocol_version
+ *
+ * @param protocol
+ *
+ * @return version supported by qed for given protocol driver
+ */
+u32 qed_get_protocol_version(enum qed_protocol protocol);
+
+#define MASK_FIELD(_name, _value) \
+       ((_value) &= (_name ## _MASK))
+
+#define FIELD_VALUE(_name, _value) \
+       ((_value & _name ## _MASK) << _name ## _SHIFT)
+
+#define SET_FIELD(value, name, flag)                          \
+       do {                                                   \
+               (value) &= ~(name ## _MASK << name ## _SHIFT); \
+               (value) |= (((u64)flag) << (name ## _SHIFT));  \
+       } while (0)
+
+#define GET_FIELD(value, name) \
+       (((value) >> (name ## _SHIFT)) & name ## _MASK)
+
+/* Debug print definitions */
+#define DP_ERR(cdev, fmt, ...)                                              \
+               pr_err("[%s:%d(%s)]" fmt,                                    \
+                      __func__, __LINE__,                                   \
+                      DP_NAME(cdev) ? DP_NAME(cdev) : "",                   \
+                      ## __VA_ARGS__)                                       \
+
+#define DP_NOTICE(cdev, fmt, ...)                                    \
+       do {                                                          \
+               if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \
+                       pr_notice("[%s:%d(%s)]" fmt,                  \
+                                 __func__, __LINE__,                 \
+                                 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
+                                 ## __VA_ARGS__);                    \
+                                                                     \
+               }                                                     \
+       } while (0)
+
+#define DP_INFO(cdev, fmt, ...)                                              \
+       do {                                                          \
+               if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) {   \
+                       pr_notice("[%s:%d(%s)]" fmt,                  \
+                                 __func__, __LINE__,                 \
+                                 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
+                                 ## __VA_ARGS__);                    \
+               }                                                     \
+       } while (0)
+
+#define DP_VERBOSE(cdev, module, fmt, ...)                             \
+       do {                                                            \
+               if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \
+                            ((cdev)->dp_module & module))) {           \
+                       pr_notice("[%s:%d(%s)]" fmt,                    \
+                                 __func__, __LINE__,                   \
+                                 DP_NAME(cdev) ? DP_NAME(cdev) : "",   \
+                                 ## __VA_ARGS__);                      \
+               }                                                       \
+       } while (0)
+
+enum DP_LEVEL {
+       QED_LEVEL_VERBOSE       = 0x0,
+       QED_LEVEL_INFO          = 0x1,
+       QED_LEVEL_NOTICE        = 0x2,
+       QED_LEVEL_ERR           = 0x3,
+};
+
+#define QED_LOG_LEVEL_SHIFT     (30)
+#define QED_LOG_VERBOSE_MASK    (0x3fffffff)
+#define QED_LOG_INFO_MASK       (0x40000000)
+#define QED_LOG_NOTICE_MASK     (0x80000000)
+
+enum DP_MODULE {
+       QED_MSG_SPQ     = 0x10000,
+       QED_MSG_STATS   = 0x20000,
+       QED_MSG_DCB     = 0x40000,
+       QED_MSG_IOV     = 0x80000,
+       QED_MSG_SP      = 0x100000,
+       QED_MSG_STORAGE = 0x200000,
+       QED_MSG_CXT     = 0x800000,
+       QED_MSG_ILT     = 0x2000000,
+       QED_MSG_ROCE    = 0x4000000,
+       QED_MSG_DEBUG   = 0x8000000,
+       /* to be added...up to 0x8000000 */
+};
+
+struct qed_eth_stats {
+       u64     no_buff_discards;
+       u64     packet_too_big_discard;
+       u64     ttl0_discard;
+       u64     rx_ucast_bytes;
+       u64     rx_mcast_bytes;
+       u64     rx_bcast_bytes;
+       u64     rx_ucast_pkts;
+       u64     rx_mcast_pkts;
+       u64     rx_bcast_pkts;
+       u64     mftag_filter_discards;
+       u64     mac_filter_discards;
+       u64     tx_ucast_bytes;
+       u64     tx_mcast_bytes;
+       u64     tx_bcast_bytes;
+       u64     tx_ucast_pkts;
+       u64     tx_mcast_pkts;
+       u64     tx_bcast_pkts;
+       u64     tx_err_drop_pkts;
+       u64     tpa_coalesced_pkts;
+       u64     tpa_coalesced_events;
+       u64     tpa_aborts_num;
+       u64     tpa_not_coalesced_pkts;
+       u64     tpa_coalesced_bytes;
+
+       /* port */
+       u64     rx_64_byte_packets;
+       u64     rx_127_byte_packets;
+       u64     rx_255_byte_packets;
+       u64     rx_511_byte_packets;
+       u64     rx_1023_byte_packets;
+       u64     rx_1518_byte_packets;
+       u64     rx_1522_byte_packets;
+       u64     rx_2047_byte_packets;
+       u64     rx_4095_byte_packets;
+       u64     rx_9216_byte_packets;
+       u64     rx_16383_byte_packets;
+       u64     rx_crc_errors;
+       u64     rx_mac_crtl_frames;
+       u64     rx_pause_frames;
+       u64     rx_pfc_frames;
+       u64     rx_align_errors;
+       u64     rx_carrier_errors;
+       u64     rx_oversize_packets;
+       u64     rx_jabbers;
+       u64     rx_undersize_packets;
+       u64     rx_fragments;
+       u64     tx_64_byte_packets;
+       u64     tx_65_to_127_byte_packets;
+       u64     tx_128_to_255_byte_packets;
+       u64     tx_256_to_511_byte_packets;
+       u64     tx_512_to_1023_byte_packets;
+       u64     tx_1024_to_1518_byte_packets;
+       u64     tx_1519_to_2047_byte_packets;
+       u64     tx_2048_to_4095_byte_packets;
+       u64     tx_4096_to_9216_byte_packets;
+       u64     tx_9217_to_16383_byte_packets;
+       u64     tx_pause_frames;
+       u64     tx_pfc_frames;
+       u64     tx_lpi_entry_count;
+       u64     tx_total_collisions;
+       u64     brb_truncates;
+       u64     brb_discards;
+       u64     rx_mac_bytes;
+       u64     rx_mac_uc_packets;
+       u64     rx_mac_mc_packets;
+       u64     rx_mac_bc_packets;
+       u64     rx_mac_frames_ok;
+       u64     tx_mac_bytes;
+       u64     tx_mac_uc_packets;
+       u64     tx_mac_mc_packets;
+       u64     tx_mac_bc_packets;
+       u64     tx_mac_ctrl_frames;
+};
+
+#define QED_SB_IDX              0x0002
+
+#define RX_PI           0
+#define TX_PI(tc)       (RX_PI + 1 + tc)
+
+static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
+{
+       u32 prod = 0;
+       u16 rc = 0;
+
+       prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
+              STATUS_BLOCK_PROD_INDEX_MASK;
+       if (sb_info->sb_ack != prod) {
+               sb_info->sb_ack = prod;
+               rc |= QED_SB_IDX;
+       }
+
+       /* Let SB update */
+       mmiowb();
+       return rc;
+}
+
+/**
+ *
+ * @brief This function creates an update command for interrupts that is
+ *        written to the IGU.
+ *
+ * @param sb_info       - This is the structure allocated and
+ *                 initialized per status block. Assumption is
+ *                 that it was initialized using qed_sb_init
+ * @param int_cmd       - Enable/Disable/Nop
+ * @param upd_flg       - whether igu consumer should be
+ *                 updated.
+ *
+ * @return inline void
+ */
+static inline void qed_sb_ack(struct qed_sb_info *sb_info,
+                             enum igu_int_cmd int_cmd,
+                             u8 upd_flg)
+{
+       struct igu_prod_cons_update igu_ack = { 0 };
+
+       igu_ack.sb_id_and_flags =
+               ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+                (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+                (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+                (IGU_SEG_ACCESS_REG <<
+                 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+       DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags);
+
+       /* Both segments (interrupts & acks) are written to same place address;
+        * Need to guarantee all commands will be received (in-order) by HW.
+        */
+       mmiowb();
+       barrier();
+}
+
+static inline void __internal_ram_wr(void *p_hwfn,
+                                    void __iomem *addr,
+                                    int size,
+                                    u32 *data)
+
+{
+       unsigned int i;
+
+       for (i = 0; i < size / sizeof(*data); i++)
+               DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]);
+}
+
+static inline void internal_ram_wr(void __iomem *addr,
+                                  int size,
+                                  u32 *data)
+{
+       __internal_ram_wr(NULL, addr, size, data);
+}
+
+#endif
index f4265039a94c8f655a6a2d340bfbda547eb97704..2296e6b2f690760e5b1c2ab33091349be8d9ef58 100644 (file)
@@ -95,4 +95,15 @@ static inline void get_seccomp_filter(struct task_struct *tsk)
        return;
 }
 #endif /* CONFIG_SECCOMP_FILTER */
+
+#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
+extern long seccomp_get_filter(struct task_struct *task,
+                              unsigned long filter_off, void __user *data);
+#else
+static inline long seccomp_get_filter(struct task_struct *task,
+                                     unsigned long n, void __user *data)
+{
+       return -EINVAL;
+}
+#endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */
 #endif /* _LINUX_SECCOMP_H */
index 4398411236f16c3f87691162909dc6197fb62b08..24f4dfd94c517b3b387682509180dee161e0912d 100644 (file)
@@ -463,6 +463,15 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
        return delta_us;
 }
 
+static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
+                                   const struct skb_mstamp *t0)
+{
+       s32 diff = t1->stamp_jiffies - t0->stamp_jiffies;
+
+       if (!diff)
+               diff = t1->stamp_us - t0->stamp_us;
+       return diff > 0;
+}
 
 /** 
  *     struct sk_buff - socket buffer
index a8d90db9c4b058626b2d5ddcc84f3fa5d16561aa..9ef7795e65e40c5dfbee53726909fbcb2ce341b0 100644 (file)
@@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t);
 #ifndef __HAVE_ARCH_STRLCPY
 size_t strlcpy(char *, const char *, size_t);
 #endif
+#ifndef __HAVE_ARCH_STRSCPY
+ssize_t __must_check strscpy(char *, const char *, size_t);
+#endif
 #ifndef __HAVE_ARCH_STRCAT
 extern char * strcat(char *, const char *);
 #endif
index 86a7edaa679764013fc4c92eadf38de236d887ce..c906f453458119324414f984ee60056194093988 100644 (file)
@@ -194,6 +194,12 @@ struct tcp_sock {
        u32     window_clamp;   /* Maximal window to advertise          */
        u32     rcv_ssthresh;   /* Current window clamp                 */
 
+       /* Information of the most recently (s)acked skb */
+       struct tcp_rack {
+               struct skb_mstamp mstamp; /* (Re)sent time of the skb */
+               u8 advanced; /* mstamp advanced since last lost marking */
+               u8 reord;    /* reordering detected */
+       } rack;
        u16     advmss;         /* Advertised MSS                       */
        u8      unused;
        u8      nonagle     : 4,/* Disable Nagle algorithm?             */
@@ -217,6 +223,9 @@ struct tcp_sock {
        u32     mdev_max_us;    /* maximal mdev for the last rtt period */
        u32     rttvar_us;      /* smoothed mdev_max                    */
        u32     rtt_seq;        /* sequence number to update rttvar     */
+       struct rtt_meas {
+               u32 rtt, ts;    /* RTT in usec and sampling time in jiffies. */
+       } rtt_min[3];
 
        u32     packets_out;    /* Packets which are "in flight"        */
        u32     retrans_out;    /* Retransmitted packets out            */
@@ -280,8 +289,6 @@ struct tcp_sock {
        int     lost_cnt_hint;
        u32     retransmit_high;        /* L-bits may be on up to this seqno */
 
-       u32     lost_retrans_low;       /* Sent seq after any rxmit (lowest) */
-
        u32     prior_ssthresh; /* ssthresh saved at recovery start     */
        u32     high_seq;       /* snd_nxt at onset of congestion       */
 
@@ -385,8 +392,9 @@ static inline bool tcp_passive_fastopen(const struct sock *sk)
 static inline void fastopen_queue_tune(struct sock *sk, int backlog)
 {
        struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+       int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn);
 
-       queue->fastopenq.max_qlen = backlog;
+       queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn);
 }
 
 static inline void tcp_saved_syn_free(struct tcp_sock *tp)
index 3dd5a781da99f163930e8a0611f269d1946daf6b..bfb74723f151512780ceb404255cb43ef92c764d 100644 (file)
@@ -157,7 +157,7 @@ struct renesas_usbhs_driver_param {
         */
        int pio_dma_border; /* default is 64byte */
 
-       u32 type;
+       uintptr_t type;
        u32 enable_gpio;
 
        /*
index 07db532696df2d5c71ff5993ce3f4c03a2818940..cf3bc564ac033f76773e9c6ec12599df1776da8e 100644 (file)
 #include <net/ipv6.h>
 #include <net/net_namespace.h>
 
-#define UIP_802154_SHORTADDR_LEN       2  /* compressed ipv6 address length */
-#define UIP_IPH_LEN                    40 /* ipv6 fixed header size */
-#define UIP_PROTO_UDP                  17 /* ipv6 next header value for UDP */
-#define UIP_FRAGH_LEN                  8  /* ipv6 fragment header size */
-
 #define EUI64_ADDR_LEN         8
 
 #define LOWPAN_NHC_MAX_ID_LEN  1
+/* Maximum next header compression length which we currently support inclusive
+ * possible inline data.
+ */
+#define LOWPAN_NHC_MAX_HDR_LEN (sizeof(struct udphdr))
 /* Max IPHC Header len without IPv6 hdr specific inline data.
  * Useful for getting the "extra" bytes we need at worst case compression.
  *
  * LOWPAN_IPHC + CID + LOWPAN_NHC_MAX_ID_LEN
  */
 #define LOWPAN_IPHC_MAX_HEADER_LEN     (2 + 1 + LOWPAN_NHC_MAX_ID_LEN)
-
-/*
- * ipv6 address based on mac
- * second bit-flip (Universe/Local) is done according RFC2464
- */
-#define is_addr_mac_addr_based(a, m) \
-       ((((a)->s6_addr[8])  == (((m)[0]) ^ 0x02)) &&   \
-        (((a)->s6_addr[9])  == (m)[1]) &&              \
-        (((a)->s6_addr[10]) == (m)[2]) &&              \
-        (((a)->s6_addr[11]) == (m)[3]) &&              \
-        (((a)->s6_addr[12]) == (m)[4]) &&              \
-        (((a)->s6_addr[13]) == (m)[5]) &&              \
-        (((a)->s6_addr[14]) == (m)[6]) &&              \
-        (((a)->s6_addr[15]) == (m)[7]))
-
-/*
- * check whether we can compress the IID to 16 bits,
- * it's possible for unicast adresses with first 49 bits are zero only.
- */
-#define lowpan_is_iid_16_bit_compressable(a)   \
-       ((((a)->s6_addr16[4]) == 0) &&          \
-        (((a)->s6_addr[10]) == 0) &&           \
-        (((a)->s6_addr[11]) == 0xff) &&        \
-        (((a)->s6_addr[12]) == 0xfe) &&        \
-        (((a)->s6_addr[13]) == 0))
-
-/* check whether the 112-bit gid of the multicast address is mappable to: */
-
-/* 48 bits, FFXX::00XX:XXXX:XXXX */
-#define lowpan_is_mcast_addr_compressable48(a) \
-       ((((a)->s6_addr16[1]) == 0) &&          \
-        (((a)->s6_addr16[2]) == 0) &&          \
-        (((a)->s6_addr16[3]) == 0) &&          \
-        (((a)->s6_addr16[4]) == 0) &&          \
-        (((a)->s6_addr[10]) == 0))
-
-/* 32 bits, FFXX::00XX:XXXX */
-#define lowpan_is_mcast_addr_compressable32(a) \
-       ((((a)->s6_addr16[1]) == 0) &&          \
-        (((a)->s6_addr16[2]) == 0) &&          \
-        (((a)->s6_addr16[3]) == 0) &&          \
-        (((a)->s6_addr16[4]) == 0) &&          \
-        (((a)->s6_addr16[5]) == 0) &&          \
-        (((a)->s6_addr[12]) == 0))
-
-/* 8 bits, FF02::00XX */
-#define lowpan_is_mcast_addr_compressable8(a)  \
-       ((((a)->s6_addr[1])  == 2) &&           \
-        (((a)->s6_addr16[1]) == 0) &&          \
-        (((a)->s6_addr16[2]) == 0) &&          \
-        (((a)->s6_addr16[3]) == 0) &&          \
-        (((a)->s6_addr16[4]) == 0) &&          \
-        (((a)->s6_addr16[5]) == 0) &&          \
-        (((a)->s6_addr16[6]) == 0) &&          \
-        (((a)->s6_addr[14]) == 0))
-
-#define lowpan_is_addr_broadcast(a)    \
-       ((((a)[0]) == 0xFF) &&  \
-        (((a)[1]) == 0xFF) &&  \
-        (((a)[2]) == 0xFF) &&  \
-        (((a)[3]) == 0xFF) &&  \
-        (((a)[4]) == 0xFF) &&  \
-        (((a)[5]) == 0xFF) &&  \
-        (((a)[6]) == 0xFF) &&  \
-        (((a)[7]) == 0xFF))
+/* Maximum worst case IPHC header buffer size */
+#define LOWPAN_IPHC_MAX_HC_BUF_LEN     (sizeof(struct ipv6hdr) +       \
+                                        LOWPAN_IPHC_MAX_HEADER_LEN +   \
+                                        LOWPAN_NHC_MAX_HDR_LEN)
 
 #define LOWPAN_DISPATCH_IPV6           0x41 /* 01000001 = 65 */
 #define LOWPAN_DISPATCH_IPHC           0x60 /* 011xxxxx = ... */
@@ -150,69 +88,6 @@ static inline bool lowpan_is_iphc(u8 dispatch)
        return (dispatch & LOWPAN_DISPATCH_IPHC_MASK) == LOWPAN_DISPATCH_IPHC;
 }
 
-#define LOWPAN_FRAG_TIMEOUT    (HZ * 60)       /* time-out 60 sec */
-
-#define LOWPAN_FRAG1_HEAD_SIZE 0x4
-#define LOWPAN_FRAGN_HEAD_SIZE 0x5
-
-/*
- * Values of fields within the IPHC encoding first byte
- * (C stands for compressed and I for inline)
- */
-#define LOWPAN_IPHC_TF         0x18
-
-#define LOWPAN_IPHC_FL_C       0x10
-#define LOWPAN_IPHC_TC_C       0x08
-#define LOWPAN_IPHC_NH_C       0x04
-#define LOWPAN_IPHC_TTL_1      0x01
-#define LOWPAN_IPHC_TTL_64     0x02
-#define LOWPAN_IPHC_TTL_255    0x03
-#define LOWPAN_IPHC_TTL_I      0x00
-
-
-/* Values of fields within the IPHC encoding second byte */
-#define LOWPAN_IPHC_CID                0x80
-
-#define LOWPAN_IPHC_ADDR_00    0x00
-#define LOWPAN_IPHC_ADDR_01    0x01
-#define LOWPAN_IPHC_ADDR_02    0x02
-#define LOWPAN_IPHC_ADDR_03    0x03
-
-#define LOWPAN_IPHC_SAC                0x40
-#define LOWPAN_IPHC_SAM                0x30
-
-#define LOWPAN_IPHC_SAM_BIT    4
-
-#define LOWPAN_IPHC_M          0x08
-#define LOWPAN_IPHC_DAC                0x04
-#define LOWPAN_IPHC_DAM_00     0x00
-#define LOWPAN_IPHC_DAM_01     0x01
-#define LOWPAN_IPHC_DAM_10     0x02
-#define LOWPAN_IPHC_DAM_11     0x03
-
-#define LOWPAN_IPHC_DAM_BIT    0
-/*
- * LOWPAN_UDP encoding (works together with IPHC)
- */
-#define LOWPAN_NHC_UDP_MASK            0xF8
-#define LOWPAN_NHC_UDP_ID              0xF0
-#define LOWPAN_NHC_UDP_CHECKSUMC       0x04
-#define LOWPAN_NHC_UDP_CHECKSUMI       0x00
-
-#define LOWPAN_NHC_UDP_4BIT_PORT       0xF0B0
-#define LOWPAN_NHC_UDP_4BIT_MASK       0xFFF0
-#define LOWPAN_NHC_UDP_8BIT_PORT       0xF000
-#define LOWPAN_NHC_UDP_8BIT_MASK       0xFF00
-
-/* values for port compression, _with checksum_ ie bit 5 set to 0 */
-#define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */
-#define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline,
-                                       dest = 0xF0 + 8 bit inline */
-#define LOWPAN_NHC_UDP_CS_P_10 0xF2 /* source = 0xF0 + 8bit inline,
-                                       dest = 16 bit inline */
-#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
-#define LOWPAN_NHC_UDP_CS_C    0x04 /* checksum elided */
-
 #define LOWPAN_PRIV_SIZE(llpriv_size)  \
        (sizeof(struct lowpan_priv) + llpriv_size)
 
@@ -250,7 +125,7 @@ struct lowpan_802154_cb *lowpan_802154_cb(const struct sk_buff *skb)
 #ifdef DEBUG
 /* print data in line */
 static inline void raw_dump_inline(const char *caller, char *msg,
-                                  unsigned char *buf, int len)
+                                  const unsigned char *buf, int len)
 {
        if (msg)
                pr_debug("%s():%s: ", caller, msg);
@@ -265,7 +140,7 @@ static inline void raw_dump_inline(const char *caller, char *msg,
  * ...
  */
 static inline void raw_dump_table(const char *caller, char *msg,
-                                 unsigned char *buf, int len)
+                                 const unsigned char *buf, int len)
 {
        if (msg)
                pr_debug("%s():%s:\n", caller, msg);
@@ -274,24 +149,25 @@ static inline void raw_dump_table(const char *caller, char *msg,
 }
 #else
 static inline void raw_dump_table(const char *caller, char *msg,
-                                 unsigned char *buf, int len) { }
+                                 const unsigned char *buf, int len) { }
 static inline void raw_dump_inline(const char *caller, char *msg,
-                                  unsigned char *buf, int len) { }
+                                  const unsigned char *buf, int len) { }
 #endif
 
-static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val)
-{
-       if (unlikely(!pskb_may_pull(skb, 1)))
-               return -EINVAL;
-
-       *val = skb->data[0];
-       skb_pull(skb, 1);
-
-       return 0;
-}
-
-static inline bool lowpan_fetch_skb(struct sk_buff *skb,
-               void *data, const unsigned int len)
+/**
+ * lowpan_fetch_skb - getting inline data from 6LoWPAN header
+ *
+ * This function will pull data from sk buffer and put it into data to
+ * remove the 6LoWPAN inline data. This function returns true if the
+ * sk buffer is too small to pull the amount of data which is specified
+ * by len.
+ *
+ * @skb: the buffer where the inline data should be pulled from.
+ * @data: destination buffer for the inline data.
+ * @len: amount of data which should be pulled in bytes.
+ */
+static inline bool lowpan_fetch_skb(struct sk_buff *skb, void *data,
+                                   unsigned int len)
 {
        if (unlikely(!pskb_may_pull(skb, len)))
                return true;
@@ -311,14 +187,42 @@ static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data,
 
 void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype);
 
-int
-lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
-                        const u8 *saddr, const u8 saddr_type,
-                        const u8 saddr_len, const u8 *daddr,
-                        const u8 daddr_type, const u8 daddr_len,
-                        u8 iphc0, u8 iphc1);
-int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
-                       unsigned short type, const void *_daddr,
-                       const void *_saddr, unsigned int len);
+/**
+ * lowpan_header_decompress - replace 6LoWPAN header with IPv6 header
+ *
+ * This function replaces the IPHC 6LoWPAN header which should be pointed at
+ * skb->data and skb_network_header, with the IPv6 header.
+ * It would be nice that the caller have the necessary headroom of IPv6 header
+ * and greatest Transport layer header, this would reduce the overhead for
+ * reallocate headroom.
+ *
+ * @skb: the buffer which should be manipulate.
+ * @dev: the lowpan net device pointer.
+ * @daddr: destination lladdr of mac header which is used for compression
+ *     methods.
+ * @saddr: source lladdr of mac header which is used for compression
+ *     methods.
+ */
+int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
+                            const void *daddr, const void *saddr);
+
+/**
+ * lowpan_header_compress - replace IPv6 header with 6LoWPAN header
+ *
+ * This function replaces the IPv6 header which should be pointed at
+ * skb->data and skb_network_header, with the IPHC 6LoWPAN header.
+ * The caller need to be sure that the sk buffer is not shared and at have
+ * at least a headroom which is smaller or equal LOWPAN_IPHC_MAX_HEADER_LEN,
+ * which is the IPHC "more bytes than IPv6 header" at worst case.
+ *
+ * @skb: the buffer which should be manipulate.
+ * @dev: the lowpan net device pointer.
+ * @daddr: destination lladdr of mac header which is used for compression
+ *     methods.
+ * @saddr: source lladdr of mac header which is used for compression
+ *     methods.
+ */
+int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
+                          const void *daddr, const void *saddr);
 
 #endif /* __6LOWPAN_H__ */
index cb1b9bbda332116b6e2173b011ff9fd58f83f431..b36d837c701ec9fe94280a91df3cf1e359ad50af 100644 (file)
@@ -64,7 +64,7 @@ struct unix_sock {
        struct socket_wq        peer_wq;
 };
 
-static inline struct unix_sock *unix_sk(struct sock *sk)
+static inline struct unix_sock *unix_sk(const struct sock *sk)
 {
        return (struct unix_sock *)sk;
 }
index a26ff28ca878930f663db6b681dadbe138cf97b0..0205b80cc90b2d6d569c1ce517266fcb96ff2bf7 100644 (file)
@@ -46,6 +46,7 @@
 #define HCI_DEV_RESUME                 6
 #define HCI_DEV_OPEN                   7
 #define HCI_DEV_CLOSE                  8
+#define HCI_DEV_SETUP                  9
 
 /* HCI notify events */
 #define HCI_NOTIFY_CONN_ADD            1
@@ -170,6 +171,15 @@ enum {
         * during the hdev->setup vendor callback.
         */
        HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
+
+       /* When this quirk is set, the enabling of diagnostic mode is
+        * not persistent over HCI Reset. Every time the controller
+        * is brought up it needs to be reprogrammed.
+        *
+        * This quirk can be set before hci_register_dev is called or
+        * during the hdev->setup vendor callback.
+        */
+       HCI_QUIRK_NON_PERSISTENT_DIAG,
 };
 
 /* HCI device flags */
index f28470e5968202bd2ad7646bbefa7d9ebd646f88..1878d0a96333cef013fbad0b71defa0bb274d58b 100644 (file)
@@ -398,6 +398,7 @@ struct hci_dev {
        int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
        void (*notify)(struct hci_dev *hdev, unsigned int evt);
        void (*hw_error)(struct hci_dev *hdev, u8 code);
+       int (*post_init)(struct hci_dev *hdev);
        int (*set_diag)(struct hci_dev *hdev, bool enable);
        int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
 };
@@ -470,6 +471,7 @@ struct hci_conn {
        struct delayed_work auto_accept_work;
        struct delayed_work idle_work;
        struct delayed_work le_conn_timeout;
+       struct work_struct  le_scan_cleanup;
 
        struct device   dev;
        struct dentry   *debugfs;
@@ -792,6 +794,30 @@ static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
        return NULL;
 }
 
+static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev,
+                                                      bdaddr_t *ba,
+                                                      __u8 ba_type)
+{
+       struct hci_conn_hash *h = &hdev->conn_hash;
+       struct hci_conn  *c;
+
+       rcu_read_lock();
+
+       list_for_each_entry_rcu(c, &h->list, list) {
+               if (c->type != LE_LINK)
+                      continue;
+
+               if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) {
+                       rcu_read_unlock();
+                       return c;
+               }
+       }
+
+       rcu_read_unlock();
+
+       return NULL;
+}
+
 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
                                                        __u8 type, __u16 state)
 {
@@ -1016,9 +1042,6 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev);
 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
                                                  bdaddr_t *addr,
                                                  u8 addr_type);
-struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
-                                                   bdaddr_t *addr,
-                                                   u8 addr_type);
 
 void hci_uuids_clear(struct hci_dev *hdev);
 
@@ -1458,7 +1481,7 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
 void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
 bool mgmt_powering_down(struct hci_dev *hdev);
 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
-void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk);
+void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent);
 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
                   bool persistent);
 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
index e00588625bc298874fd7011026e7dcf6ab60e3c0..98ccbdef646f9acde302f0b87c8a3ac309be6d6e 100644 (file)
@@ -198,6 +198,7 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
 }
 
 struct switchdev_trans;
+struct switchdev_obj;
 struct switchdev_obj_port_fdb;
 
 struct dsa_switch_driver {
@@ -327,9 +328,9 @@ struct dsa_switch_driver {
                                struct switchdev_trans *trans);
        int     (*port_fdb_del)(struct dsa_switch *ds, int port,
                                const struct switchdev_obj_port_fdb *fdb);
-       int     (*port_fdb_getnext)(struct dsa_switch *ds, int port,
-                                   unsigned char *addr, u16 *vid,
-                                   bool *is_static);
+       int     (*port_fdb_dump)(struct dsa_switch *ds, int port,
+                                struct switchdev_obj_port_fdb *fdb,
+                                int (*cb)(struct switchdev_obj *obj));
 };
 
 void register_switch_driver(struct dsa_switch_driver *type);
index af9d5382f6cbae8c38d45106f5702bd1e66c5671..ce009710120ca8b541615b237a329ee089ec357b 100644 (file)
@@ -60,6 +60,38 @@ static inline struct metadata_dst *tun_rx_dst(int md_size)
        return tun_dst;
 }
 
+static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
+{
+       struct metadata_dst *md_dst = skb_metadata_dst(skb);
+       int md_size = md_dst->u.tun_info.options_len;
+       struct metadata_dst *new_md;
+
+       if (!md_dst)
+               return ERR_PTR(-EINVAL);
+
+       new_md = metadata_dst_alloc(md_size, GFP_ATOMIC);
+       if (!new_md)
+               return ERR_PTR(-ENOMEM);
+
+       memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
+              sizeof(struct ip_tunnel_info) + md_size);
+       skb_dst_drop(skb);
+       dst_hold(&new_md->dst);
+       skb_dst_set(skb, &new_md->dst);
+       return new_md;
+}
+
+static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb)
+{
+       struct metadata_dst *dst;
+
+       dst = tun_dst_unclone(skb);
+       if (IS_ERR(dst))
+               return NULL;
+
+       return &dst->u.tun_info;
+}
+
 static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
                                                 __be16 flags,
                                                 __be64 tunnel_id,
index 3208a65d1c280aaa893e0084108c21f62912e79b..481fe1c9044cfd8b49585139e24df16b0716debf 100644 (file)
@@ -43,7 +43,9 @@ struct inet_connection_sock_af_ops {
        int         (*conn_request)(struct sock *sk, struct sk_buff *skb);
        struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
                                      struct request_sock *req,
-                                     struct dst_entry *dst);
+                                     struct dst_entry *dst,
+                                     struct request_sock *req_unhash,
+                                     bool *own_req);
        u16         net_header_len;
        u16         net_frag_header_len;
        u16         sockaddr_len;
@@ -268,15 +270,13 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
                                            struct sock *newsk,
                                            const struct request_sock *req);
 
-static inline void inet_csk_reqsk_queue_add(struct sock *sk,
-                                           struct request_sock *req,
-                                           struct sock *child)
-{
-       reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
-}
-
+void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
+                             struct sock *child);
 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
                                   unsigned long timeout);
+struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
+                                        struct request_sock *req,
+                                        bool own_req);
 
 static inline void inet_csk_reqsk_queue_added(struct sock *sk)
 {
@@ -299,6 +299,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
 }
 
 void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
+void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
 
 void inet_csk_destroy_sock(struct sock *sk);
 void inet_csk_prepare_forced_close(struct sock *sk);
@@ -312,7 +313,7 @@ static inline unsigned int inet_csk_listen_poll(const struct sock *sk)
                        (POLLIN | POLLRDNORM) : 0;
 }
 
-int inet_csk_listen_start(struct sock *sk, const int nr_table_entries);
+int inet_csk_listen_start(struct sock *sk, int backlog);
 void inet_csk_listen_stop(struct sock *sk);
 
 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
index 6683ada25fefae509e95d57bfd3dcee2a6845640..de2e3ade61028cc9937861a6218e3f26ff4a1321 100644 (file)
@@ -205,8 +205,8 @@ void inet_put_port(struct sock *sk);
 
 void inet_hashinfo_init(struct inet_hashinfo *h);
 
-int inet_ehash_insert(struct sock *sk, struct sock *osk);
-void __inet_hash_nolisten(struct sock *sk, struct sock *osk);
+bool inet_ehash_insert(struct sock *sk, struct sock *osk);
+bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
 void __inet_hash(struct sock *sk, struct sock *osk);
 void inet_hash(struct sock *sk);
 void inet_unhash(struct sock *sk);
index e581fc69129dceebe796bf7e27ccaa48dd7af0ff..c9b3eb70f340d48ffe60105622bee367c7ea848f 100644 (file)
@@ -113,12 +113,12 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
 void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
                          bool rearm);
 
-static void inline inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
+static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
 {
        __inet_twsk_schedule(tw, timeo, false);
 }
 
-static void inline inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
+static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
 {
        __inet_twsk_schedule(tw, timeo, true);
 }
index 44a19a1711048edf24e56c4f298f86a7cdd77789..774d85b2d5d97734b79eadea20f090ebf684b057 100644 (file)
  * @l3mdev_get_rtable: Get cached IPv4 rtable (dst_entry) for device
  *
  * @l3mdev_get_saddr: Get source address for a flow
+ *
+ * @l3mdev_get_rt6_dst: Get cached IPv6 rt6_info (dst_entry) for device
  */
 
 struct l3mdev_ops {
        u32             (*l3mdev_fib_table)(const struct net_device *dev);
+
+       /* IPv4 ops */
        struct rtable * (*l3mdev_get_rtable)(const struct net_device *dev,
                                             const struct flowi4 *fl4);
        void            (*l3mdev_get_saddr)(struct net_device *dev,
                                            struct flowi4 *fl4);
+
+       /* IPv6 ops */
+       struct dst_entry * (*l3mdev_get_rt6_dst)(const struct net_device *dev,
+                                                const struct flowi6 *fl6);
 };
 
 #ifdef CONFIG_NET_L3_MASTER_DEV
@@ -123,6 +131,31 @@ static inline void l3mdev_get_saddr(struct net *net, int ifindex,
        }
 }
 
+static inline struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev,
+                                                  const struct flowi6 *fl6)
+{
+       if (netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rt6_dst)
+               return dev->l3mdev_ops->l3mdev_get_rt6_dst(dev, fl6);
+
+       return NULL;
+}
+
+static inline
+struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net,
+                                       const struct flowi6 *fl6)
+{
+       struct dst_entry *dst = NULL;
+       struct net_device *dev;
+
+       dev = dev_get_by_index(net, fl6->flowi6_oif);
+       if (dev) {
+               dst = l3mdev_get_rt6_dst(dev, fl6);
+               dev_put(dev);
+       }
+
+       return dst;
+}
+
 #else
 
 static inline int l3mdev_master_ifindex_rcu(struct net_device *dev)
@@ -171,6 +204,19 @@ static inline void l3mdev_get_saddr(struct net *net, int ifindex,
                                    struct flowi4 *fl4)
 {
 }
+
+static inline
+struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev,
+                                    const struct flowi6 *fl6)
+{
+       return NULL;
+}
+static inline
+struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net,
+                                       const struct flowi6 *fl6)
+{
+       return NULL;
+}
 #endif
 
 #endif /* _NET_L3MDEV_H_ */
index 5718765cbd95ff7d8d608a4bda3639b0b6e1c7df..da574bbdc33393fc927ba7297f7d67d0cf59b06e 100644 (file)
@@ -276,6 +276,16 @@ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
        __put_unaligned_memmove64(swab64p(le64_src), be64_dst);
 }
 
+/**
+ * ieee802154_le16_to_be16 - copies and convert le16 to be16
+ * @be16_dst: be16 destination pointer
+ * @le16_src: le16 source pointer
+ */
+static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src)
+{
+       __put_unaligned_memmove16(swab16p(le16_src), be16_dst);
+}
+
 /**
  * ieee802154_alloc_hw - Allocate a new hardware device
  *
index 4757997f76edf4f8871a1b2d8407954b5c17abb5..179253f9dcfd986ef806331044bc4973f1cc7d6e 100644 (file)
@@ -18,7 +18,7 @@
 
 struct mpls_iptunnel_encap {
        u32     label[MAX_NEW_LABELS];
-       u32     labels;
+       u     labels;
 };
 
 static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
index d642f68a7c73708a99e0fd7a12388ac55313dee5..fde4068eec0b2963ca7155503ecae631e925fba9 100644 (file)
@@ -183,10 +183,6 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
 
 void nf_ct_free_hashtable(void *hash, unsigned int size);
 
-struct nf_conntrack_tuple_hash *
-__nf_conntrack_find(struct net *net, u16 zone,
-                   const struct nf_conntrack_tuple *tuple);
-
 int nf_conntrack_hash_check_insert(struct nf_conn *ct);
 bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
 
index 62308713dd7fa1704409e7db22fca725cc0200b9..f72be38860a747f39c6f4166365ab6768e69b88f 100644 (file)
@@ -20,10 +20,20 @@ struct ctnl_timeout {
 };
 
 struct nf_conn_timeout {
-       struct ctnl_timeout     *timeout;
+       struct ctnl_timeout __rcu *timeout;
 };
 
-#define NF_CT_TIMEOUT_EXT_DATA(__t) (unsigned int *) &((__t)->timeout->data)
+static inline unsigned int *
+nf_ct_timeout_data(struct nf_conn_timeout *t)
+{
+       struct ctnl_timeout *timeout;
+
+       timeout = rcu_dereference(t->timeout);
+       if (timeout == NULL)
+               return NULL;
+
+       return (unsigned int *)timeout->data;
+}
 
 static inline
 struct nf_conn_timeout *nf_ct_timeout_find(const struct nf_conn *ct)
@@ -47,7 +57,7 @@ struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
        if (timeout_ext == NULL)
                return NULL;
 
-       timeout_ext->timeout = timeout;
+       rcu_assign_pointer(timeout_ext->timeout, timeout);
 
        return timeout_ext;
 #else
@@ -64,10 +74,13 @@ nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct,
        unsigned int *timeouts;
 
        timeout_ext = nf_ct_timeout_find(ct);
-       if (timeout_ext)
-               timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
-       else
+       if (timeout_ext) {
+               timeouts = nf_ct_timeout_data(timeout_ext);
+               if (unlikely(!timeouts))
+                       timeouts = l4proto->get_timeouts(net);
+       } else {
                timeouts = l4proto->get_timeouts(net);
+       }
 
        return timeouts;
 #else
index e8635854a55bd8771d4290adbcdd7e05417e8c69..9c5638ad872e39d2d01cca6f86c1620e499c916c 100644 (file)
@@ -32,7 +32,7 @@ void nf_register_queue_handler(const struct nf_queue_handler *qh);
 void nf_unregister_queue_handler(void);
 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
 
-bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
+void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
 void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
 
 static inline void init_hashrandom(u32 *jhash_initval)
diff --git a/include/net/netfilter/nfnetlink_queue.h b/include/net/netfilter/nfnetlink_queue.h
deleted file mode 100644 (file)
index aff88ba..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef _NET_NFNL_QUEUE_H_
-#define _NET_NFNL_QUEUE_H_
-
-#include <linux/netfilter/nf_conntrack_common.h>
-
-struct nf_conn;
-
-#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
-struct nf_conn *nfqnl_ct_get(struct sk_buff *entskb, size_t *size,
-                            enum ip_conntrack_info *ctinfo);
-struct nf_conn *nfqnl_ct_parse(const struct sk_buff *skb,
-                              const struct nlattr *attr,
-                              enum ip_conntrack_info *ctinfo);
-int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct,
-                enum ip_conntrack_info ctinfo);
-void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
-                        enum ip_conntrack_info ctinfo, int diff);
-int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
-                       u32 portid, u32 report);
-#else
-inline struct nf_conn *
-nfqnl_ct_get(struct sk_buff *entskb, size_t *size, enum ip_conntrack_info *ctinfo)
-{
-       return NULL;
-}
-
-inline struct nf_conn *nfqnl_ct_parse(const struct sk_buff *skb,
-                                     const struct nlattr *attr,
-                                     enum ip_conntrack_info *ctinfo)
-{
-       return NULL;
-}
-
-inline int
-nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo)
-{
-       return 0;
-}
-
-inline void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
-                               enum ip_conntrack_info ctinfo, int diff)
-{
-}
-
-inline int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
-                              u32 portid, u32 report)
-{
-       return 0;
-}
-#endif /* NF_CONNTRACK */
-#endif
index 46d336abca925b21938a96bf4c4ee972b21a722e..c68926b4899c36e77c38b6244dfb8d126685b905 100644 (file)
@@ -74,7 +74,6 @@ struct netns_ipv4 {
        int sysctl_icmp_ratelimit;
        int sysctl_icmp_ratemask;
        int sysctl_icmp_errors_use_inbound_ifaddr;
-       int sysctl_icmp_redirects_use_orig_daddr;
 
        struct local_ports ip_local_ports;
 
index 2e73748956d590ffe9cfd536dea119c189ee197f..a0dde04eb178015ac97d17a6243cbe50cb83a885 100644 (file)
@@ -186,25 +186,6 @@ static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
        return queue->rskq_accept_head == NULL;
 }
 
-static inline void reqsk_queue_add(struct request_sock_queue *queue,
-                                  struct request_sock *req,
-                                  struct sock *parent,
-                                  struct sock *child)
-{
-       spin_lock(&queue->rskq_lock);
-       req->sk = child;
-       sk_acceptq_added(parent);
-
-       if (queue->rskq_accept_head == NULL)
-               queue->rskq_accept_head = req;
-       else
-               queue->rskq_accept_tail->dl_next = req;
-
-       queue->rskq_accept_tail = req;
-       req->dl_next = NULL;
-       spin_unlock(&queue->rskq_lock);
-}
-
 static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
                                                      struct sock *parent)
 {
index aff6ceb891a98e3f7cf7d0c5959b90012db43621..2f87c1ba13de639df7b733ad3905c7e47b8d901b 100644 (file)
@@ -124,7 +124,8 @@ struct rtnl_af_ops {
        int                     (*fill_link_af)(struct sk_buff *skb,
                                                const struct net_device *dev,
                                                u32 ext_filter_mask);
-       size_t                  (*get_link_af_size)(const struct net_device *dev);
+       size_t                  (*get_link_af_size)(const struct net_device *dev,
+                                                   u32 ext_filter_mask);
 
        int                     (*validate_link_af)(const struct net_device *dev,
                                                    const struct nlattr *attr);
index 64a75458d22cc2fc4e972601972cf9ff9cf0a57a..aeed5c95f3caedcdb4c10668c67764d8557e9369 100644 (file)
@@ -843,6 +843,14 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
        if (sk_rcvqueues_full(sk, limit))
                return -ENOBUFS;
 
+       /*
+        * If the skb was allocated from pfmemalloc reserves, only
+        * allow SOCK_MEMALLOC sockets to use it as this socket is
+        * helping free memory
+        */
+       if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
+               return -ENOMEM;
+
        __sk_add_backlog(sk, skb);
        sk->sk_backlog.len += skb->truesize;
        return 0;
index 1ce70830357d7b94af5904877cec6c59e67f350e..bc865e244efea0c4757a182274ad7ca9f9cc3266 100644 (file)
 #include <linux/netdevice.h>
 #include <linux/notifier.h>
 #include <linux/list.h>
+#include <net/ip_fib.h>
 
 #define SWITCHDEV_F_NO_RECURSE         BIT(0)
 #define SWITCHDEV_F_SKIP_EOPNOTSUPP    BIT(1)
+#define SWITCHDEV_F_DEFER              BIT(2)
 
 struct switchdev_trans_item {
        struct list_head list;
@@ -58,8 +60,6 @@ struct switchdev_attr {
        } u;
 };
 
-struct fib_info;
-
 enum switchdev_obj_id {
        SWITCHDEV_OBJ_ID_UNDEFINED,
        SWITCHDEV_OBJ_ID_PORT_VLAN,
@@ -69,6 +69,7 @@ enum switchdev_obj_id {
 
 struct switchdev_obj {
        enum switchdev_obj_id id;
+       u32 flags;
 };
 
 /* SWITCHDEV_OBJ_ID_PORT_VLAN */
@@ -87,7 +88,7 @@ struct switchdev_obj_ipv4_fib {
        struct switchdev_obj obj;
        u32 dst;
        int dst_len;
-       struct fib_info *fi;
+       struct fib_info fi;
        u8 tos;
        u8 type;
        u32 nlflags;
@@ -100,7 +101,7 @@ struct switchdev_obj_ipv4_fib {
 /* SWITCHDEV_OBJ_ID_PORT_FDB */
 struct switchdev_obj_port_fdb {
        struct switchdev_obj obj;
-       const unsigned char *addr;
+       unsigned char addr[ETH_ALEN];
        u16 vid;
        u16 ndm_state;
 };
@@ -132,7 +133,7 @@ struct switchdev_ops {
        int     (*switchdev_port_attr_get)(struct net_device *dev,
                                           struct switchdev_attr *attr);
        int     (*switchdev_port_attr_set)(struct net_device *dev,
-                                          struct switchdev_attr *attr,
+                                          const struct switchdev_attr *attr,
                                           struct switchdev_trans *trans);
        int     (*switchdev_port_obj_add)(struct net_device *dev,
                                          const struct switchdev_obj *obj,
@@ -167,10 +168,11 @@ switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info)
 
 #ifdef CONFIG_NET_SWITCHDEV
 
+void switchdev_deferred_process(void);
 int switchdev_port_attr_get(struct net_device *dev,
                            struct switchdev_attr *attr);
 int switchdev_port_attr_set(struct net_device *dev,
-                           struct switchdev_attr *attr);
+                           const struct switchdev_attr *attr);
 int switchdev_port_obj_add(struct net_device *dev,
                           const struct switchdev_obj *obj);
 int switchdev_port_obj_del(struct net_device *dev,
@@ -208,6 +210,10 @@ void switchdev_port_fwd_mark_set(struct net_device *dev,
 
 #else
 
+static inline void switchdev_deferred_process(void)
+{
+}
+
 static inline int switchdev_port_attr_get(struct net_device *dev,
                                          struct switchdev_attr *attr)
 {
@@ -215,7 +221,7 @@ static inline int switchdev_port_attr_get(struct net_device *dev,
 }
 
 static inline int switchdev_port_attr_set(struct net_device *dev,
-                                         struct switchdev_attr *attr)
+                                         const struct switchdev_attr *attr)
 {
        return -EOPNOTSUPP;
 }
index a6be56d5f0e3757cb0b0f6f5d6caf2c63ea66203..f80e74c5ad18b22c274ecd7e75b6a23ffe7268b4 100644 (file)
@@ -279,6 +279,7 @@ extern int sysctl_tcp_limit_output_bytes;
 extern int sysctl_tcp_challenge_ack_limit;
 extern unsigned int sysctl_tcp_notsent_lowat;
 extern int sysctl_tcp_min_tso_segs;
+extern int sysctl_tcp_min_rtt_wlen;
 extern int sysctl_tcp_autocorking;
 extern int sysctl_tcp_invalid_ratelimit;
 extern int sysctl_tcp_pacing_ss_ratio;
@@ -456,7 +457,9 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
                                  struct request_sock *req,
-                                 struct dst_entry *dst);
+                                 struct dst_entry *dst,
+                                 struct request_sock *req_unhash,
+                                 bool *own_req);
 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 int tcp_connect(struct sock *sk);
@@ -566,6 +569,7 @@ void tcp_resume_early_retransmit(struct sock *sk);
 void tcp_rearm_rto(struct sock *sk);
 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
 void tcp_reset(struct sock *sk);
+void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
 
 /* tcp_timer.c */
 void tcp_init_xmit_timers(struct sock *);
@@ -671,6 +675,12 @@ static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
        return dst_metric_locked(dst, RTAX_CC_ALGO);
 }
 
+/* Minimum RTT in usec. ~0 means not available. */
+static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
+{
+       return tp->rtt_min[0].rtt;
+}
+
 /* Compute the actual receive window we are currently advertising.
  * Rcv_nxt can be after the window if our peer push more data
  * than the offered window.
@@ -1716,7 +1726,7 @@ struct tcp_request_sock_ops {
        __u32 (*init_seq)(const struct sk_buff *skb);
        int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
                           struct flowi *fl, struct request_sock *req,
-                          u16 queue_mapping, struct tcp_fastopen_cookie *foc,
+                          struct tcp_fastopen_cookie *foc,
                           bool attach_req);
 };
 
@@ -1743,6 +1753,19 @@ int tcpv4_offload_init(void);
 void tcp_v4_init(void);
 void tcp_init(void);
 
+/* tcp_recovery.c */
+
+/* Flags to enable various loss recovery features. See below */
+extern int sysctl_tcp_recovery;
+
+/* Use TCP RACK to detect (some) tail and retransmit losses */
+#define TCP_RACK_LOST_RETRANS  0x1
+
+extern int tcp_rack_mark_lost(struct sock *sk);
+
+extern void tcp_rack_advance(struct tcp_sock *tp,
+                            const struct skb_mstamp *xmit_time, u8 sacked);
+
 /*
  * Save and compile IPv4 options, return a pointer to it
  */
index 47e5444f7d15bd971a337ad78960b094b771ec46..b7be852bfe9d5719aa86df7d788224a64fbce775 100644 (file)
@@ -8,6 +8,7 @@ struct tso_t {
        void *data;
        size_t size;
        u16 ip_id;
+       bool ipv6;
        u32 tcp_seq;
 };
 
index 9df61f1edb0f8048472bee5136f81097e47ac569..3094618d382f4d661dd14f9ceb4f4180a8f2c39d 100644 (file)
  *     SA_RESTORER     0x04000000
  */
 
+#if !defined MINSIGSTKSZ || !defined SIGSTKSZ
 #define MINSIGSTKSZ    2048
 #define SIGSTKSZ       8192
+#endif
 
 #ifndef __ASSEMBLY__
 typedef struct {
index f7b2db44eb4b07a910d0097e63a657c1e4a37816..70d89230b6416c6e59858d6936695adf71770e85 100644 (file)
@@ -263,6 +263,7 @@ header-y += minix_fs.h
 header-y += mman.h
 header-y += mmtimer.h
 header-y += mpls.h
+header-y += mpls_iptunnel.h
 header-y += mqueue.h
 header-y += mroute6.h
 header-y += mroute.h
index 564f1f091991b7f1f7b40f06c433b7a663787103..2e032426cfb78c34c3e795d230e9120e1c6a168b 100644 (file)
@@ -287,6 +287,17 @@ enum bpf_func_id {
         * Return: realm if != 0
         */
        BPF_FUNC_get_route_realm,
+
+       /**
+        * bpf_perf_event_output(ctx, map, index, data, size) - output perf raw sample
+        * @ctx: struct pt_regs*
+        * @map: pointer to perf_event_array map
+        * @index: index of event in the map
+        * @data: data on stack to be output as raw data
+        * @size: size of data
+        * Return: 0 on success
+        */
+       BPF_FUNC_perf_event_output,
        __BPF_FUNC_MAX_ID,
 };
 
index 89ddb9dc9bdf7ca8bd191c9dedf7019f24573931..7a291dc1ff15a01bb97e304ca6dfeb27a9796794 100644 (file)
 #include <linux/types.h>
 #include <linux/can.h>
 
+struct bcm_timeval {
+       long tv_sec;
+       long tv_usec;
+};
+
 /**
  * struct bcm_msg_head - head of messages to/from the broadcast manager
  * @opcode:    opcode, see enum below.
@@ -62,7 +67,7 @@ struct bcm_msg_head {
        __u32 opcode;
        __u32 flags;
        __u32 count;
-       struct timeval ival1, ival2;
+       struct bcm_timeval ival1, ival2;
        canid_t can_id;
        __u32 nframes;
        struct can_frame frames[0];
index e3b6217f34f1138644bc6d2ffc20448d68301a89..a7aea8418abbbc3ff46c39f9d1b955c3814cc5ce 100644 (file)
@@ -550,6 +550,7 @@ enum {
                                 * on/off switch
                                 */
        IFLA_VF_STATS,          /* network device statistics */
+       IFLA_VF_TRUST,          /* Trust VF */
        __IFLA_VF_MAX,
 };
 
@@ -611,6 +612,11 @@ enum {
 
 #define IFLA_VF_STATS_MAX (__IFLA_VF_STATS_MAX - 1)
 
+struct ifla_vf_trust {
+       __u32 vf;
+       __u32 setting;
+};
+
 /* VF ports management section
  *
  *     Nested layout of set/get msg is:
index 90c2c9575bac371e95a69874c4b217d66a8b9ea6..fb21f0c717a12ef0dad7dc4ccbb33e2f10e8288b 100644 (file)
@@ -51,6 +51,8 @@ enum nfulnl_attr_type {
        NFULA_HWTYPE,                   /* hardware type */
        NFULA_HWHEADER,                 /* hardware header */
        NFULA_HWLEN,                    /* hardware header length */
+       NFULA_CT,                       /* nf_conntrack_netlink.h */
+       NFULA_CT_INFO,                  /* enum ip_conntrack_info */
 
        __NFULA_MAX
 };
@@ -93,5 +95,6 @@ enum nfulnl_attr_config {
 
 #define NFULNL_CFG_F_SEQ       0x0001
 #define NFULNL_CFG_F_SEQ_GLOBAL        0x0002
+#define NFULNL_CFG_F_CONNTRACK 0x0004
 
 #endif /* _NFNETLINK_LOG_H */
index 4036e1b1980ff2b113104315bd1bf49db30a5fcb..28ccedd000f5720f6f41660ea6f049b9fd9f145c 100644 (file)
@@ -323,10 +323,10 @@ enum ovs_key_attr {
        OVS_KEY_ATTR_MPLS,      /* array of struct ovs_key_mpls.
                                 * The implementation may restrict
                                 * the accepted length of the array. */
-       OVS_KEY_ATTR_CT_STATE,  /* u8 bitmask of OVS_CS_F_* */
+       OVS_KEY_ATTR_CT_STATE,  /* u32 bitmask of OVS_CS_F_* */
        OVS_KEY_ATTR_CT_ZONE,   /* u16 connection tracking zone. */
        OVS_KEY_ATTR_CT_MARK,   /* u32 connection tracking mark */
-       OVS_KEY_ATTR_CT_LABEL /* 16-octet connection tracking label */
+       OVS_KEY_ATTR_CT_LABELS, /* 16-octet connection tracking label */
 
 #ifdef __KERNEL__
        OVS_KEY_ATTR_TUNNEL_INFO,  /* struct ip_tunnel_info */
@@ -441,9 +441,9 @@ struct ovs_key_nd {
        __u8    nd_tll[ETH_ALEN];
 };
 
-#define OVS_CT_LABEL_LEN       16
-struct ovs_key_ct_label {
-       __u8    ct_label[OVS_CT_LABEL_LEN];
+#define OVS_CT_LABELS_LEN      16
+struct ovs_key_ct_labels {
+       __u8    ct_labels[OVS_CT_LABELS_LEN];
 };
 
 /* OVS_KEY_ATTR_CT_STATE flags */
@@ -451,9 +451,9 @@ struct ovs_key_ct_label {
 #define OVS_CS_F_ESTABLISHED       0x02 /* Part of an existing connection. */
 #define OVS_CS_F_RELATED           0x04 /* Related to an established
                                         * connection. */
-#define OVS_CS_F_INVALID           0x20 /* Could not track connection. */
-#define OVS_CS_F_REPLY_DIR         0x40 /* Flow is in the reply direction. */
-#define OVS_CS_F_TRACKED           0x80 /* Conntrack has occurred. */
+#define OVS_CS_F_REPLY_DIR         0x08 /* Flow is in the reply direction. */
+#define OVS_CS_F_INVALID           0x10 /* Could not track connection. */
+#define OVS_CS_F_TRACKED           0x20 /* Conntrack has occurred. */
 
 /**
  * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands.
@@ -620,22 +620,25 @@ struct ovs_action_hash {
 
 /**
  * enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action.
- * @OVS_CT_ATTR_FLAGS: u32 connection tracking flags.
+ * @OVS_CT_ATTR_COMMIT: If present, commits the connection to the conntrack
+ * table. This allows future packets for the same connection to be identified
+ * as 'established' or 'related'. The flow key for the current packet will
+ * retain the pre-commit connection state.
  * @OVS_CT_ATTR_ZONE: u16 connection tracking zone.
  * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the
  * mask, the corresponding bit in the value is copied to the connection
  * tracking mark field in the connection.
- * @OVS_CT_ATTR_LABEL: %OVS_CT_LABEL_LEN value followed by %OVS_CT_LABEL_LEN
+ * @OVS_CT_ATTR_LABEL: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN
  * mask. For each bit set in the mask, the corresponding bit in the value is
  * copied to the connection tracking label field in the connection.
  * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG.
  */
 enum ovs_ct_attr {
        OVS_CT_ATTR_UNSPEC,
-       OVS_CT_ATTR_FLAGS,      /* u8 bitmask of OVS_CT_F_*. */
+       OVS_CT_ATTR_COMMIT,     /* No argument, commits connection. */
        OVS_CT_ATTR_ZONE,       /* u16 zone id. */
        OVS_CT_ATTR_MARK,       /* mark to associate with this connection. */
-       OVS_CT_ATTR_LABEL,      /* label to associate with this connection. */
+       OVS_CT_ATTR_LABELS,     /* labels to associate with this connection. */
        OVS_CT_ATTR_HELPER,     /* netlink helper to assist detection of
                                   related connections. */
        __OVS_CT_ATTR_MAX
@@ -643,14 +646,6 @@ enum ovs_ct_attr {
 
 #define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1)
 
-/*
- * OVS_CT_ATTR_FLAGS flags - bitmask of %OVS_CT_F_*
- * @OVS_CT_F_COMMIT: Commits the flow to the conntrack table. This allows
- * future packets for the same connection to be identified as 'established'
- * or 'related'.
- */
-#define OVS_CT_F_COMMIT                0x01
-
 /**
  * enum ovs_action_attr - Action types.
  *
@@ -707,7 +702,7 @@ enum ovs_action_attr {
                                       * data immediately followed by a mask.
                                       * The data must be zero for the unmasked
                                       * bits. */
-       OVS_ACTION_ATTR_CT,           /* One nested OVS_CT_ATTR_* . */
+       OVS_ACTION_ATTR_CT,           /* Nested OVS_CT_ATTR_* . */
 
        __OVS_ACTION_ATTR_MAX,        /* Nothing past this will be accepted
                                       * from userspace. */
index 2881145cda86cda91621da082ebec83ae490f3c5..d3c4176153611c25a3f3baf091fde5738b7078ee 100644 (file)
@@ -110,6 +110,7 @@ enum perf_sw_ids {
        PERF_COUNT_SW_ALIGNMENT_FAULTS          = 7,
        PERF_COUNT_SW_EMULATION_FAULTS          = 8,
        PERF_COUNT_SW_DUMMY                     = 9,
+       PERF_COUNT_SW_BPF_OUTPUT                = 10,
 
        PERF_COUNT_SW_MAX,                      /* non-ABI */
 };
index a7a69798661440b33f13c74e6560aabdb335ed1a..fb810650900029cf1a933a7eeec033e6dc7269d6 100644 (file)
@@ -64,6 +64,8 @@ struct ptrace_peeksiginfo_args {
 #define PTRACE_GETSIGMASK      0x420a
 #define PTRACE_SETSIGMASK      0x420b
 
+#define PTRACE_SECCOMP_GET_FILTER      0x420c
+
 /* Read signals from a shared (process wide) queue */
 #define PTRACE_PEEKSIGINFO_SHARED      (1 << 0)
 
index 4db0b3ccb497ec7558b36b13ff28792ad3d61ee9..123a5af4e8bb54b0cf33d9558d7fa0b1bb8a31f6 100644 (file)
@@ -160,7 +160,7 @@ struct rtattr {
 
 /* Macros to handle rtattributes */
 
-#define RTA_ALIGNTO    4
+#define RTA_ALIGNTO    4U
 #define RTA_ALIGN(len) ( ((len)+RTA_ALIGNTO-1) & ~(RTA_ALIGNTO-1) )
 #define RTA_OK(rta,len) ((len) >= (int)sizeof(struct rtattr) && \
                         (rta)->rta_len >= sizeof(struct rtattr) && \
index 9ce083960a2575df0bd2a4e31ac3c8b881012880..f18490985fc8e5f39d10ed442d302293ac0e7699 100644 (file)
@@ -107,5 +107,13 @@ struct sched_watchdog {
 #define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
 #define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
 #define SHUTDOWN_watchdog   4  /* Restart because watchdog time expired.     */
+/*
+ * Domain asked to perform 'soft reset' for it. The expected behavior is to
+ * reset internal Xen state for the domain returning it to the point where it
+ * was created but leaving the domain's memory contents and vCPU contexts
+ * intact. This will allow the domain to start over and set up all Xen specific
+ * interfaces again.
+ */
+#define SHUTDOWN_soft_reset 5
 
 #endif /* __XEN_PUBLIC_SCHED_H__ */
index f2d9e698c7538e61fa8dec26465608643770e06a..3f4c99e06c6bbf8a5e28f882633e8161f3974929 100644 (file)
@@ -292,14 +292,23 @@ static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd)
 
        attr = perf_event_attrs(event);
        if (IS_ERR(attr))
-               return (void *)attr;
+               goto err;
 
-       if (attr->type != PERF_TYPE_RAW &&
-           attr->type != PERF_TYPE_HARDWARE) {
-               perf_event_release_kernel(event);
-               return ERR_PTR(-EINVAL);
-       }
-       return event;
+       if (attr->inherit)
+               goto err;
+
+       if (attr->type == PERF_TYPE_RAW)
+               return event;
+
+       if (attr->type == PERF_TYPE_HARDWARE)
+               return event;
+
+       if (attr->type == PERF_TYPE_SOFTWARE &&
+           attr->config == PERF_COUNT_SW_BPF_OUTPUT)
+               return event;
+err:
+       perf_event_release_kernel(event);
+       return ERR_PTR(-EINVAL);
 }
 
 static void perf_event_fd_array_put_ptr(void *ptr)
index f640e5f7afbd7cece735fcbc1fe34a6d0355337a..687dd6ca574d4c3267b1290cc9f12702ad9346c1 100644 (file)
@@ -520,6 +520,7 @@ void bpf_prog_put(struct bpf_prog *prog)
 {
        if (atomic_dec_and_test(&prog->aux->refcnt)) {
                free_used_maps(prog->aux);
+               bpf_prog_uncharge_memlock(prog);
                bpf_prog_free(prog);
        }
 }
index 1d6b97be79e1dd6000e626a74bf001e6cab5835d..b56cf51f8d426ceec23ef5e03f24fd19e58814c7 100644 (file)
@@ -245,6 +245,7 @@ static const struct {
 } func_limit[] = {
        {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
        {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
+       {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
 };
 
 static void print_verifier_state(struct verifier_env *env)
@@ -910,7 +911,7 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id)
                 * don't allow any other map type to be passed into
                 * the special func;
                 */
-               if (bool_map != bool_func)
+               if (bool_func && bool_map != bool_func)
                        return -EINVAL;
        }
 
index b11756f9b6dcfdf2673b2a396ac0e0de5c980101..64754bfecd700dd4a0fc7f6738f2d07a718100c6 100644 (file)
@@ -5286,9 +5286,15 @@ void perf_output_sample(struct perf_output_handle *handle,
 
        if (sample_type & PERF_SAMPLE_RAW) {
                if (data->raw) {
-                       perf_output_put(handle, data->raw->size);
-                       __output_copy(handle, data->raw->data,
-                                          data->raw->size);
+                       u32 raw_size = data->raw->size;
+                       u32 real_size = round_up(raw_size + sizeof(u32),
+                                                sizeof(u64)) - sizeof(u32);
+                       u64 zero = 0;
+
+                       perf_output_put(handle, real_size);
+                       __output_copy(handle, data->raw->data, raw_size);
+                       if (real_size - raw_size)
+                               __output_copy(handle, &zero, real_size - raw_size);
                } else {
                        struct {
                                u32     size;
@@ -5420,8 +5426,7 @@ void perf_prepare_sample(struct perf_event_header *header,
                else
                        size += sizeof(u32);
 
-               WARN_ON_ONCE(size & (sizeof(u64)-1));
-               header->size += size;
+               header->size += round_up(size, sizeof(u64));
        }
 
        if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
index de41a68fc038df70b8578e6ec1174fbcf53a1f0f..e25a83b67ccea18568f932636b72bd0e9ba182d0 100644 (file)
@@ -22,7 +22,6 @@
 
 /**
  * handle_bad_irq - handle spurious and unhandled irqs
- * @irq:       the interrupt number
  * @desc:      description of the interrupt
  *
  * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
@@ -35,6 +34,7 @@ void handle_bad_irq(struct irq_desc *desc)
        kstat_incr_irqs_this_cpu(desc);
        ack_bad_irq(irq);
 }
+EXPORT_SYMBOL_GPL(handle_bad_irq);
 
 /*
  * Special, empty irq handler:
index 7e6512b9dc1ff2682394cdd0fea9a8c6d01cf6e9..be9149f62eb86e63ac06194d4eeaa4065823ea62 100644 (file)
@@ -228,11 +228,7 @@ static void msi_domain_update_chip_ops(struct msi_domain_info *info)
 {
        struct irq_chip *chip = info->chip;
 
-       BUG_ON(!chip);
-       if (!chip->irq_mask)
-               chip->irq_mask = pci_msi_mask_irq;
-       if (!chip->irq_unmask)
-               chip->irq_unmask = pci_msi_unmask_irq;
+       BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
        if (!chip->irq_set_affinity)
                chip->irq_set_affinity = msi_domain_set_affinity;
 }
index e3a8c9577ba641c38747a0925a7896eca7ef4d79..a50ddc9417ff5fbdccf837f9f43b716200ceb013 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
+#include <linux/mutex.h>
 
 #include "internals.h"
 
@@ -323,18 +324,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
 
 void register_irq_proc(unsigned int irq, struct irq_desc *desc)
 {
+       static DEFINE_MUTEX(register_lock);
        char name [MAX_NAMELEN];
 
-       if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
+       if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
                return;
 
+       /*
+        * irq directories are registered only when a handler is
+        * added, not when the descriptor is created, so multiple
+        * tasks might try to register at the same time.
+        */
+       mutex_lock(&register_lock);
+
+       if (desc->dir)
+               goto out_unlock;
+
        memset(name, 0, MAX_NAMELEN);
        sprintf(name, "%d", irq);
 
        /* create /proc/irq/1234 */
        desc->dir = proc_mkdir(name, root_irq_dir);
        if (!desc->dir)
-               return;
+               goto out_unlock;
 
 #ifdef CONFIG_SMP
        /* create /proc/irq/<irq>/smp_affinity */
@@ -355,6 +367,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
 
        proc_create_data("spurious", 0444, desc->dir,
                         &irq_spurious_proc_fops, (void *)(long)irq);
+
+out_unlock:
+       mutex_unlock(&register_lock);
 }
 
 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
index 787320de68e02425e8506363681b8875b6ee6d57..b760bae64cf123b16863b6180a68a099abca51d1 100644 (file)
@@ -1016,6 +1016,11 @@ int ptrace_request(struct task_struct *child, long request,
                break;
        }
 #endif
+
+       case PTRACE_SECCOMP_GET_FILTER:
+               ret = seccomp_get_filter(child, addr, datavp);
+               break;
+
        default:
                break;
        }
index 615953141951747dba2715ac32fed23b5d256627..10a8faa1b0d4a5f737bd9008eb8f9a7e817b6ec9 100644 (file)
@@ -2517,11 +2517,11 @@ static struct rq *finish_task_switch(struct task_struct *prev)
         * If a task dies, then it sets TASK_DEAD in tsk->state and calls
         * schedule one last time. The schedule call will never return, and
         * the scheduled task must drop that reference.
-        * The test for TASK_DEAD must occur while the runqueue locks are
-        * still held, otherwise prev could be scheduled on another cpu, die
-        * there before we look at prev->state, and then the reference would
-        * be dropped twice.
-        *              Manfred Spraul <manfred@colorfullife.com>
+        *
+        * We must observe prev->state before clearing prev->on_cpu (in
+        * finish_lock_switch), otherwise a concurrent wakeup can get prev
+        * running on another CPU and we could rave with its RUNNING -> DEAD
+        * transition, resulting in a double drop.
         */
        prev_state = prev->state;
        vtime_task_switch(prev);
index 68cda117574c3aed0e1849a6c07eb28e1f3369c9..6d2a119c7ad9f63338ffb0e9e92efcbc269c2141 100644 (file)
@@ -1078,9 +1078,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
         * After ->on_cpu is cleared, the task can be moved to a different CPU.
         * We must ensure this doesn't happen until the switch is completely
         * finished.
+        *
+        * Pairs with the control dependency and rmb in try_to_wake_up().
         */
-       smp_wmb();
-       prev->on_cpu = 0;
+       smp_store_release(&prev->on_cpu, 0);
 #endif
 #ifdef CONFIG_DEBUG_SPINLOCK
        /* this is a valid case when another task releases the spinlock */
index 06858a74bb9c14795a2dc847cac484e88c75b40d..580ac2d4024ffbdb29960ccdd86424fa730b1e78 100644 (file)
@@ -347,6 +347,7 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
 {
        struct seccomp_filter *sfilter;
        int ret;
+       const bool save_orig = config_enabled(CONFIG_CHECKPOINT_RESTORE);
 
        if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
                return ERR_PTR(-EINVAL);
@@ -370,7 +371,7 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
                return ERR_PTR(-ENOMEM);
 
        ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
-                                       seccomp_check_filter, false);
+                                       seccomp_check_filter, save_orig);
        if (ret < 0) {
                kfree(sfilter);
                return ERR_PTR(ret);
@@ -867,3 +868,76 @@ long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
        /* prctl interface doesn't have flags, so they are always zero. */
        return do_seccomp(op, 0, uargs);
 }
+
+#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
+long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
+                       void __user *data)
+{
+       struct seccomp_filter *filter;
+       struct sock_fprog_kern *fprog;
+       long ret;
+       unsigned long count = 0;
+
+       if (!capable(CAP_SYS_ADMIN) ||
+           current->seccomp.mode != SECCOMP_MODE_DISABLED) {
+               return -EACCES;
+       }
+
+       spin_lock_irq(&task->sighand->siglock);
+       if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       filter = task->seccomp.filter;
+       while (filter) {
+               filter = filter->prev;
+               count++;
+       }
+
+       if (filter_off >= count) {
+               ret = -ENOENT;
+               goto out;
+       }
+       count -= filter_off;
+
+       filter = task->seccomp.filter;
+       while (filter && count > 1) {
+               filter = filter->prev;
+               count--;
+       }
+
+       if (WARN_ON(count != 1 || !filter)) {
+               /* The filter tree shouldn't shrink while we're using it. */
+               ret = -ENOENT;
+               goto out;
+       }
+
+       fprog = filter->prog->orig_prog;
+       if (!fprog) {
+               /* This must be a new non-cBPF filter, since we save every
+                * every cBPF filter's orig_prog above when
+                * CONFIG_CHECKPOINT_RESTORE is enabled.
+                */
+               ret = -EMEDIUMTYPE;
+               goto out;
+       }
+
+       ret = fprog->len;
+       if (!data)
+               goto out;
+
+       get_seccomp_filter(task);
+       spin_unlock_irq(&task->sighand->siglock);
+
+       if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
+               ret = -EFAULT;
+
+       put_seccomp_filter(task);
+       return ret;
+
+out:
+       spin_unlock_irq(&task->sighand->siglock);
+       return ret;
+}
+#endif
index 841b72f720e88041a99ded8852381555c307fb43..3a38775b50c2243ea83341a9034a0eb4e3a502a3 100644 (file)
@@ -217,7 +217,7 @@ static void clocksource_watchdog(unsigned long data)
                        continue;
 
                /* Check the deviation from the watchdog clocksource. */
-               if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
+               if (abs64(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
                        pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n",
                                cs->name);
                        pr_warn("                      '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
index 3739ac6aa47355e7234cf0ee2fc60ebc3adce979..44d2cc0436f4968a32fb61772e19ca701fccbfb0 100644 (file)
@@ -1251,7 +1251,7 @@ void __init timekeeping_init(void)
        set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
        tk_set_wall_to_mono(tk, tmp);
 
-       timekeeping_update(tk, TK_MIRROR);
+       timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
 
        write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
index 0fe96c7c8803c759b963411198fa18153a539f46..4228fd3682c3d45894f0f3bbc13a715331a24e7c 100644 (file)
@@ -199,6 +199,11 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
        if (!event)
                return -ENOENT;
 
+       /* make sure event is local and doesn't have pmu::count */
+       if (event->oncpu != smp_processor_id() ||
+           event->pmu->count)
+               return -EINVAL;
+
        /*
         * we don't know if the function is run successfully by the
         * return value. It can be judged in other places, such as
@@ -207,14 +212,58 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
        return perf_event_read_local(event);
 }
 
-const struct bpf_func_proto bpf_perf_event_read_proto = {
+static const struct bpf_func_proto bpf_perf_event_read_proto = {
        .func           = bpf_perf_event_read,
-       .gpl_only       = false,
+       .gpl_only       = true,
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_CONST_MAP_PTR,
        .arg2_type      = ARG_ANYTHING,
 };
 
+static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
+{
+       struct pt_regs *regs = (struct pt_regs *) (long) r1;
+       struct bpf_map *map = (struct bpf_map *) (long) r2;
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       void *data = (void *) (long) r4;
+       struct perf_sample_data sample_data;
+       struct perf_event *event;
+       struct perf_raw_record raw = {
+               .size = size,
+               .data = data,
+       };
+
+       if (unlikely(index >= array->map.max_entries))
+               return -E2BIG;
+
+       event = (struct perf_event *)array->ptrs[index];
+       if (unlikely(!event))
+               return -ENOENT;
+
+       if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
+                    event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
+               return -EINVAL;
+
+       if (unlikely(event->oncpu != smp_processor_id()))
+               return -EOPNOTSUPP;
+
+       perf_sample_data_init(&sample_data, 0, 0);
+       sample_data.raw = &raw;
+       perf_event_output(event, &sample_data, regs);
+       return 0;
+}
+
+static const struct bpf_func_proto bpf_perf_event_output_proto = {
+       .func           = bpf_perf_event_output,
+       .gpl_only       = true,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_CONST_MAP_PTR,
+       .arg3_type      = ARG_ANYTHING,
+       .arg4_type      = ARG_PTR_TO_STACK,
+       .arg5_type      = ARG_CONST_STACK_SIZE,
+};
+
 static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
 {
        switch (func_id) {
@@ -242,6 +291,8 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
                return &bpf_get_smp_processor_id_proto;
        case BPF_FUNC_perf_event_read:
                return &bpf_perf_event_read_proto;
+       case BPF_FUNC_perf_event_output:
+               return &bpf_perf_event_output_proto;
        default:
                return NULL;
        }
index ca71582fcfab29ec708746eab636c325b9caef15..bcb14cafe007148b15edb5cfed5adc041a6d966c 100644 (file)
@@ -1458,13 +1458,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
        timer_stats_timer_set_start_info(&dwork->timer);
 
        dwork->wq = wq;
+       /* timer isn't guaranteed to run in this cpu, record earlier */
+       if (cpu == WORK_CPU_UNBOUND)
+               cpu = raw_smp_processor_id();
        dwork->cpu = cpu;
        timer->expires = jiffies + delay;
 
-       if (unlikely(cpu != WORK_CPU_UNBOUND))
-               add_timer_on(timer, cpu);
-       else
-               add_timer(timer);
+       add_timer_on(timer, cpu);
 }
 
 /**
index 2e491ac15622a559c88ba12a4067eeb5ca704115..f0df318104e7272ef97641421c938069a047a85e 100644 (file)
@@ -220,6 +220,7 @@ config ZLIB_INFLATE
 
 config ZLIB_DEFLATE
        tristate
+       select BITREVERSE
 
 config LZO_COMPRESS
        tristate
index 13d1e84ddb80e983a325011fb8dd8b6ff9ce0600..84775ba873b9efd978fa006be56e58057b34031f 100644 (file)
 #include <linux/bug.h>
 #include <linux/errno.h>
 
+#include <asm/byteorder.h>
+#include <asm/word-at-a-time.h>
+#include <asm/page.h>
+
 #ifndef __HAVE_ARCH_STRNCASECMP
 /**
  * strncasecmp - Case insensitive, length-limited string comparison
@@ -146,6 +150,91 @@ size_t strlcpy(char *dest, const char *src, size_t size)
 EXPORT_SYMBOL(strlcpy);
 #endif
 
+#ifndef __HAVE_ARCH_STRSCPY
+/**
+ * strscpy - Copy a C-string into a sized buffer
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @count: Size of destination buffer
+ *
+ * Copy the string, or as much of it as fits, into the dest buffer.
+ * The routine returns the number of characters copied (not including
+ * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough.
+ * The behavior is undefined if the string buffers overlap.
+ * The destination buffer is always NUL terminated, unless it's zero-sized.
+ *
+ * Preferred to strlcpy() since the API doesn't require reading memory
+ * from the src string beyond the specified "count" bytes, and since
+ * the return value is easier to error-check than strlcpy()'s.
+ * In addition, the implementation is robust to the string changing out
+ * from underneath it, unlike the current strlcpy() implementation.
+ *
+ * Preferred to strncpy() since it always returns a valid string, and
+ * doesn't unnecessarily force the tail of the destination buffer to be
+ * zeroed.  If the zeroing is desired, it's likely cleaner to use strscpy()
+ * with an overflow test, then just memset() the tail of the dest buffer.
+ */
+ssize_t strscpy(char *dest, const char *src, size_t count)
+{
+       const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+       size_t max = count;
+       long res = 0;
+
+       if (count == 0)
+               return -E2BIG;
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+       /*
+        * If src is unaligned, don't cross a page boundary,
+        * since we don't know if the next page is mapped.
+        */
+       if ((long)src & (sizeof(long) - 1)) {
+               size_t limit = PAGE_SIZE - ((long)src & (PAGE_SIZE - 1));
+               if (limit < max)
+                       max = limit;
+       }
+#else
+       /* If src or dest is unaligned, don't do word-at-a-time. */
+       if (((long) dest | (long) src) & (sizeof(long) - 1))
+               max = 0;
+#endif
+
+       while (max >= sizeof(unsigned long)) {
+               unsigned long c, data;
+
+               c = *(unsigned long *)(src+res);
+               if (has_zero(c, &data, &constants)) {
+                       data = prep_zero_mask(c, data, &constants);
+                       data = create_zero_mask(data);
+                       *(unsigned long *)(dest+res) = c & zero_bytemask(data);
+                       return res + find_zero(data);
+               }
+               *(unsigned long *)(dest+res) = c;
+               res += sizeof(unsigned long);
+               count -= sizeof(unsigned long);
+               max -= sizeof(unsigned long);
+       }
+
+       while (count) {
+               char c;
+
+               c = src[res];
+               dest[res] = c;
+               if (!c)
+                       return res;
+               res++;
+               count--;
+       }
+
+       /* Hit buffer length without finding a NUL; force NUL-termination. */
+       if (res)
+               dest[res-1] = '\0';
+
+       return -E2BIG;
+}
+EXPORT_SYMBOL(strscpy);
+#endif
+
 #ifndef __HAVE_ARCH_STRCAT
 /**
  * strcat - Append one %NUL-terminated string to another
index 72940fb38666811b80c146bc085a1c84fc0e7ecc..1cc5467cf36ce7852f7a0474d5fd3237b3dfff10 100644 (file)
@@ -2473,6 +2473,21 @@ ssize_t generic_perform_write(struct file *file,
                                                iov_iter_count(i));
 
 again:
+               /*
+                * Bring in the user page that we will copy from _first_.
+                * Otherwise there's a nasty deadlock on copying from the
+                * same page as we're writing to, without it being marked
+                * up-to-date.
+                *
+                * Not only is this an optimisation, but it is also required
+                * to check that the address is actually valid, when atomic
+                * usercopies are used, below.
+                */
+               if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
+                       status = -EFAULT;
+                       break;
+               }
+
                status = a_ops->write_begin(file, mapping, pos, bytes, flags,
                                                &page, &fsdata);
                if (unlikely(status < 0))
@@ -2480,17 +2495,8 @@ again:
 
                if (mapping_writably_mapped(mapping))
                        flush_dcache_page(page);
-               /*
-                * 'page' is now locked.  If we are trying to copy from a
-                * mapping of 'page' in userspace, the copy might fault and
-                * would need PageUptodate() to complete.  But, page can not be
-                * made Uptodate without acquiring the page lock, which we hold.
-                * Deadlock.  Avoid with pagefault_disable().  Fix up below with
-                * iov_iter_fault_in_readable().
-                */
-               pagefault_disable();
+
                copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
-               pagefault_enable();
                flush_dcache_page(page);
 
                status = a_ops->write_end(file, mapping, pos, bytes, copied,
@@ -2513,14 +2519,6 @@ again:
                         */
                        bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
                                                iov_iter_single_seg_count(i));
-                       /*
-                        * This is the fallback to recover if the copy from
-                        * userspace above faults.
-                        */
-                       if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
-                               status = -EFAULT;
-                               break;
-                       }
                        goto again;
                }
                pos += copied;
index 1fedbde68f595c2b83d5aa84962a667c1ada120a..d9b5c817dce8e0a99aab1853eafc572e5b70015f 100644 (file)
@@ -3387,6 +3387,7 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
        ret = page_counter_memparse(args, "-1", &threshold);
        if (ret)
                return ret;
+       threshold <<= PAGE_SHIFT;
 
        mutex_lock(&memcg->thresholds_lock);
 
index 9cb27470fee991cb874676bb0cbc0f694b5e1d36..deb679c31f2ab897cafebf72643aec4f66233308 100644 (file)
@@ -2426,6 +2426,8 @@ void unmap_mapping_range(struct address_space *mapping,
        if (details.last_index < details.first_index)
                details.last_index = ULONG_MAX;
 
+
+       /* DAX uses i_mmap_lock to serialise file truncate vs page fault */
        i_mmap_lock_write(mapping);
        if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
index 60cd846a9a4401f73a6a51539507ecc22fb308d4..24682f6f4cfd1d84d7245faea0da78e7fd17e716 100644 (file)
@@ -89,8 +89,8 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
        while (!list_empty(pages)) {
                page = list_to_page(pages);
                list_del(&page->lru);
-               if (add_to_page_cache_lru(page, mapping,
-                                       page->index, GFP_KERNEL)) {
+               if (add_to_page_cache_lru(page, mapping, page->index,
+                               GFP_KERNEL & mapping_gfp_mask(mapping))) {
                        read_cache_pages_invalidate_page(mapping, page);
                        continue;
                }
@@ -127,8 +127,8 @@ static int read_pages(struct address_space *mapping, struct file *filp,
        for (page_idx = 0; page_idx < nr_pages; page_idx++) {
                struct page *page = list_to_page(pages);
                list_del(&page->lru);
-               if (!add_to_page_cache_lru(page, mapping,
-                                       page->index, GFP_KERNEL)) {
+               if (!add_to_page_cache_lru(page, mapping, page->index,
+                               GFP_KERNEL & mapping_gfp_mask(mapping))) {
                        mapping->a_ops->readpage(filp, page);
                }
                page_cache_release(page);
index 4f5cd974e11a0adbb8a601cc92b9866ab6d67d55..fbf14485a0498bf181e81f43bc69a0522e67afd5 100644 (file)
@@ -1363,15 +1363,16 @@ static cpumask_var_t cpu_stat_off;
 
 static void vmstat_update(struct work_struct *w)
 {
-       if (refresh_cpu_vm_stats())
+       if (refresh_cpu_vm_stats()) {
                /*
                 * Counters were updated so we expect more updates
                 * to occur in the future. Keep on running the
                 * update worker thread.
                 */
-               schedule_delayed_work(this_cpu_ptr(&vmstat_work),
+               schedule_delayed_work_on(smp_processor_id(),
+                       this_cpu_ptr(&vmstat_work),
                        round_jiffies_relative(sysctl_stat_interval));
-       else {
+       else {
                /*
                 * We did not update any counters so the app may be in
                 * a mode where it does not cause counter updates.
index 78c8a495b57174565aaeda4e6a626b16bb82c240..346b5c1a91851efd16b22695a7a1259c1cca9139 100644 (file)
 #include <linux/bitops.h>
 #include <linux/if_arp.h>
 #include <linux/netdevice.h>
+
 #include <net/6lowpan.h>
 #include <net/ipv6.h>
-#include <net/af_ieee802154.h>
+
+/* special link-layer handling */
+#include <net/mac802154.h>
 
 #include "nhc.h"
 
+/* Values of fields within the IPHC encoding first byte */
+#define LOWPAN_IPHC_TF_MASK    0x18
+#define LOWPAN_IPHC_TF_00      0x00
+#define LOWPAN_IPHC_TF_01      0x08
+#define LOWPAN_IPHC_TF_10      0x10
+#define LOWPAN_IPHC_TF_11      0x18
+
+#define LOWPAN_IPHC_NH         0x04
+
+#define LOWPAN_IPHC_HLIM_MASK  0x03
+#define LOWPAN_IPHC_HLIM_00    0x00
+#define LOWPAN_IPHC_HLIM_01    0x01
+#define LOWPAN_IPHC_HLIM_10    0x02
+#define LOWPAN_IPHC_HLIM_11    0x03
+
+/* Values of fields within the IPHC encoding second byte */
+#define LOWPAN_IPHC_CID                0x80
+
+#define LOWPAN_IPHC_SAC                0x40
+
+#define LOWPAN_IPHC_SAM_MASK   0x30
+#define LOWPAN_IPHC_SAM_00     0x00
+#define LOWPAN_IPHC_SAM_01     0x10
+#define LOWPAN_IPHC_SAM_10     0x20
+#define LOWPAN_IPHC_SAM_11     0x30
+
+#define LOWPAN_IPHC_M          0x08
+
+#define LOWPAN_IPHC_DAC                0x04
+
+#define LOWPAN_IPHC_DAM_MASK   0x03
+#define LOWPAN_IPHC_DAM_00     0x00
+#define LOWPAN_IPHC_DAM_01     0x01
+#define LOWPAN_IPHC_DAM_10     0x02
+#define LOWPAN_IPHC_DAM_11     0x03
+
+/* ipv6 address based on mac
+ * second bit-flip (Universe/Local) is done according RFC2464
+ */
+#define is_addr_mac_addr_based(a, m) \
+       ((((a)->s6_addr[8])  == (((m)[0]) ^ 0x02)) &&   \
+        (((a)->s6_addr[9])  == (m)[1]) &&              \
+        (((a)->s6_addr[10]) == (m)[2]) &&              \
+        (((a)->s6_addr[11]) == (m)[3]) &&              \
+        (((a)->s6_addr[12]) == (m)[4]) &&              \
+        (((a)->s6_addr[13]) == (m)[5]) &&              \
+        (((a)->s6_addr[14]) == (m)[6]) &&              \
+        (((a)->s6_addr[15]) == (m)[7]))
+
+/* check whether we can compress the IID to 16 bits,
+ * it's possible for unicast addresses with first 49 bits are zero only.
+ */
+#define lowpan_is_iid_16_bit_compressable(a)   \
+       ((((a)->s6_addr16[4]) == 0) &&          \
+        (((a)->s6_addr[10]) == 0) &&           \
+        (((a)->s6_addr[11]) == 0xff) &&        \
+        (((a)->s6_addr[12]) == 0xfe) &&        \
+        (((a)->s6_addr[13]) == 0))
+
+/* check whether the 112-bit gid of the multicast address is mappable to: */
+
+/* 48 bits, FFXX::00XX:XXXX:XXXX */
+#define lowpan_is_mcast_addr_compressable48(a) \
+       ((((a)->s6_addr16[1]) == 0) &&          \
+        (((a)->s6_addr16[2]) == 0) &&          \
+        (((a)->s6_addr16[3]) == 0) &&          \
+        (((a)->s6_addr16[4]) == 0) &&          \
+        (((a)->s6_addr[10]) == 0))
+
+/* 32 bits, FFXX::00XX:XXXX */
+#define lowpan_is_mcast_addr_compressable32(a) \
+       ((((a)->s6_addr16[1]) == 0) &&          \
+        (((a)->s6_addr16[2]) == 0) &&          \
+        (((a)->s6_addr16[3]) == 0) &&          \
+        (((a)->s6_addr16[4]) == 0) &&          \
+        (((a)->s6_addr16[5]) == 0) &&          \
+        (((a)->s6_addr[12]) == 0))
+
+/* 8 bits, FF02::00XX */
+#define lowpan_is_mcast_addr_compressable8(a)  \
+       ((((a)->s6_addr[1])  == 2) &&           \
+        (((a)->s6_addr16[1]) == 0) &&          \
+        (((a)->s6_addr16[2]) == 0) &&          \
+        (((a)->s6_addr16[3]) == 0) &&          \
+        (((a)->s6_addr16[4]) == 0) &&          \
+        (((a)->s6_addr16[5]) == 0) &&          \
+        (((a)->s6_addr16[6]) == 0) &&          \
+        (((a)->s6_addr[14]) == 0))
+
+static inline void iphc_uncompress_eui64_lladdr(struct in6_addr *ipaddr,
+                                               const void *lladdr)
+{
+       /* fe:80::XXXX:XXXX:XXXX:XXXX
+        *        \_________________/
+        *              hwaddr
+        */
+       ipaddr->s6_addr[0] = 0xFE;
+       ipaddr->s6_addr[1] = 0x80;
+       memcpy(&ipaddr->s6_addr[8], lladdr, EUI64_ADDR_LEN);
+       /* second bit-flip (Universe/Local)
+        * is done according RFC2464
+        */
+       ipaddr->s6_addr[8] ^= 0x02;
+}
+
+static inline void iphc_uncompress_802154_lladdr(struct in6_addr *ipaddr,
+                                                const void *lladdr)
+{
+       const struct ieee802154_addr *addr = lladdr;
+       u8 eui64[EUI64_ADDR_LEN] = { };
+
+       switch (addr->mode) {
+       case IEEE802154_ADDR_LONG:
+               ieee802154_le64_to_be64(eui64, &addr->extended_addr);
+               iphc_uncompress_eui64_lladdr(ipaddr, eui64);
+               break;
+       case IEEE802154_ADDR_SHORT:
+               /* fe:80::ff:fe00:XXXX
+                *                \__/
+                *             short_addr
+                *
+                * Universe/Local bit is zero.
+                */
+               ipaddr->s6_addr[0] = 0xFE;
+               ipaddr->s6_addr[1] = 0x80;
+               ipaddr->s6_addr[11] = 0xFF;
+               ipaddr->s6_addr[12] = 0xFE;
+               ieee802154_le16_to_be16(&ipaddr->s6_addr16[7],
+                                       &addr->short_addr);
+               break;
+       default:
+               /* should never handled and filtered by 802154 6lowpan */
+               WARN_ON_ONCE(1);
+               break;
+       }
+}
+
 /* Uncompress address function for source and
  * destination address(non-multicast).
  *
- * address_mode is sam value or dam value.
+ * address_mode is the masked value for sam or dam value
  */
-static int uncompress_addr(struct sk_buff *skb,
-                          struct in6_addr *ipaddr, const u8 address_mode,
-                          const u8 *lladdr, const u8 addr_type,
-                          const u8 addr_len)
+static int uncompress_addr(struct sk_buff *skb, const struct net_device *dev,
+                          struct in6_addr *ipaddr, u8 address_mode,
+                          const void *lladdr)
 {
        bool fail;
 
        switch (address_mode) {
-       case LOWPAN_IPHC_ADDR_00:
+       /* SAM and DAM are the same here */
+       case LOWPAN_IPHC_DAM_00:
                /* for global link addresses */
                fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
                break;
-       case LOWPAN_IPHC_ADDR_01:
+       case LOWPAN_IPHC_SAM_01:
+       case LOWPAN_IPHC_DAM_01:
                /* fe:80::XXXX:XXXX:XXXX:XXXX */
                ipaddr->s6_addr[0] = 0xFE;
                ipaddr->s6_addr[1] = 0x80;
                fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8);
                break;
-       case LOWPAN_IPHC_ADDR_02:
+       case LOWPAN_IPHC_SAM_10:
+       case LOWPAN_IPHC_DAM_10:
                /* fe:80::ff:fe00:XXXX */
                ipaddr->s6_addr[0] = 0xFE;
                ipaddr->s6_addr[1] = 0x80;
@@ -86,38 +228,16 @@ static int uncompress_addr(struct sk_buff *skb,
                ipaddr->s6_addr[12] = 0xFE;
                fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2);
                break;
-       case LOWPAN_IPHC_ADDR_03:
+       case LOWPAN_IPHC_SAM_11:
+       case LOWPAN_IPHC_DAM_11:
                fail = false;
-               switch (addr_type) {
-               case IEEE802154_ADDR_LONG:
-                       /* fe:80::XXXX:XXXX:XXXX:XXXX
-                        *        \_________________/
-                        *              hwaddr
-                        */
-                       ipaddr->s6_addr[0] = 0xFE;
-                       ipaddr->s6_addr[1] = 0x80;
-                       memcpy(&ipaddr->s6_addr[8], lladdr, addr_len);
-                       /* second bit-flip (Universe/Local)
-                        * is done according RFC2464
-                        */
-                       ipaddr->s6_addr[8] ^= 0x02;
-                       break;
-               case IEEE802154_ADDR_SHORT:
-                       /* fe:80::ff:fe00:XXXX
-                        *                \__/
-                        *             short_addr
-                        *
-                        * Universe/Local bit is zero.
-                        */
-                       ipaddr->s6_addr[0] = 0xFE;
-                       ipaddr->s6_addr[1] = 0x80;
-                       ipaddr->s6_addr[11] = 0xFF;
-                       ipaddr->s6_addr[12] = 0xFE;
-                       ipaddr->s6_addr16[7] = htons(*((u16 *)lladdr));
+               switch (lowpan_priv(dev)->lltype) {
+               case LOWPAN_LLTYPE_IEEE802154:
+                       iphc_uncompress_802154_lladdr(ipaddr, lladdr);
                        break;
                default:
-                       pr_debug("Invalid addr_type set\n");
-                       return -EINVAL;
+                       iphc_uncompress_eui64_lladdr(ipaddr, lladdr);
+                       break;
                }
                break;
        default:
@@ -141,24 +261,25 @@ static int uncompress_addr(struct sk_buff *skb,
  */
 static int uncompress_context_based_src_addr(struct sk_buff *skb,
                                             struct in6_addr *ipaddr,
-                                            const u8 sam)
+                                            u8 address_mode)
 {
-       switch (sam) {
-       case LOWPAN_IPHC_ADDR_00:
+       switch (address_mode) {
+       case LOWPAN_IPHC_SAM_00:
                /* unspec address ::
                 * Do nothing, address is already ::
                 */
                break;
-       case LOWPAN_IPHC_ADDR_01:
+       case LOWPAN_IPHC_SAM_01:
                /* TODO */
-       case LOWPAN_IPHC_ADDR_02:
+       case LOWPAN_IPHC_SAM_10:
                /* TODO */
-       case LOWPAN_IPHC_ADDR_03:
+       case LOWPAN_IPHC_SAM_11:
                /* TODO */
-               netdev_warn(skb->dev, "SAM value 0x%x not supported\n", sam);
+               netdev_warn(skb->dev, "SAM value 0x%x not supported\n",
+                           address_mode);
                return -EINVAL;
        default:
-               pr_debug("Invalid sam value: 0x%x\n", sam);
+               pr_debug("Invalid sam value: 0x%x\n", address_mode);
                return -EINVAL;
        }
 
@@ -174,11 +295,11 @@ static int uncompress_context_based_src_addr(struct sk_buff *skb,
  */
 static int lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
                                             struct in6_addr *ipaddr,
-                                            const u8 dam)
+                                            u8 address_mode)
 {
        bool fail;
 
-       switch (dam) {
+       switch (address_mode) {
        case LOWPAN_IPHC_DAM_00:
                /* 00:  128 bits.  The full address
                 * is carried in-line.
@@ -210,7 +331,7 @@ static int lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
                fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[15], 1);
                break;
        default:
-               pr_debug("DAM value has a wrong value: 0x%x\n", dam);
+               pr_debug("DAM value has a wrong value: 0x%x\n", address_mode);
                return -EINVAL;
        }
 
@@ -225,77 +346,142 @@ static int lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
        return 0;
 }
 
-/* TTL uncompression values */
-static const u8 lowpan_ttl_values[] = { 0, 1, 64, 255 };
-
-int
-lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
-                        const u8 *saddr, const u8 saddr_type,
-                        const u8 saddr_len, const u8 *daddr,
-                        const u8 daddr_type, const u8 daddr_len,
-                        u8 iphc0, u8 iphc1)
+/* get the ecn values from iphc tf format and set it to ipv6hdr */
+static inline void lowpan_iphc_tf_set_ecn(struct ipv6hdr *hdr, const u8 *tf)
 {
-       struct ipv6hdr hdr = {};
-       u8 tmp, num_context = 0;
-       int err;
+       /* get the two higher bits which is ecn */
+       u8 ecn = tf[0] & 0xc0;
 
-       raw_dump_table(__func__, "raw skb data dump uncompressed",
-                      skb->data, skb->len);
+       /* ECN takes 0x30 in hdr->flow_lbl[0] */
+       hdr->flow_lbl[0] |= (ecn >> 2);
+}
 
-       /* another if the CID flag is set */
-       if (iphc1 & LOWPAN_IPHC_CID) {
-               pr_debug("CID flag is set, increase header with one\n");
-               if (lowpan_fetch_skb(skb, &num_context, sizeof(num_context)))
-                       return -EINVAL;
-       }
+/* get the dscp values from iphc tf format and set it to ipv6hdr */
+static inline void lowpan_iphc_tf_set_dscp(struct ipv6hdr *hdr, const u8 *tf)
+{
+       /* DSCP is at place after ECN */
+       u8 dscp = tf[0] & 0x3f;
 
-       hdr.version = 6;
+       /* The four highest bits need to be set at hdr->priority */
+       hdr->priority |= ((dscp & 0x3c) >> 2);
+       /* The two lower bits is part of hdr->flow_lbl[0] */
+       hdr->flow_lbl[0] |= ((dscp & 0x03) << 6);
+}
 
-       /* Traffic Class and Flow Label */
-       switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
-       /* Traffic Class and FLow Label carried in-line
-        * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
+/* get the flow label values from iphc tf format and set it to ipv6hdr */
+static inline void lowpan_iphc_tf_set_lbl(struct ipv6hdr *hdr, const u8 *lbl)
+{
+       /* flow label is always some array started with lower nibble of
+        * flow_lbl[0] and followed with two bytes afterwards. Inside inline
+        * data the flow_lbl position can be different, which will be handled
+        * by lbl pointer. E.g. case "01" vs "00" the traffic class is 8 bit
+        * shifted, the different lbl pointer will handle that.
+        *
+        * The flow label will started at lower nibble of flow_lbl[0], the
+        * higher nibbles are part of DSCP + ECN.
         */
-       case 0: /* 00b */
-               if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
+       hdr->flow_lbl[0] |= lbl[0] & 0x0f;
+       memcpy(&hdr->flow_lbl[1], &lbl[1], 2);
+}
+
+/* lowpan_iphc_tf_decompress - decompress the traffic class.
+ *     This function will return zero on success, a value lower than zero if
+ *     failed.
+ */
+static int lowpan_iphc_tf_decompress(struct sk_buff *skb, struct ipv6hdr *hdr,
+                                    u8 val)
+{
+       u8 tf[4];
+
+       /* Traffic Class and Flow Label */
+       switch (val) {
+       case LOWPAN_IPHC_TF_00:
+               /* ECN + DSCP + 4-bit Pad + Flow Label (4 bytes) */
+               if (lowpan_fetch_skb(skb, tf, 4))
                        return -EINVAL;
 
-               memcpy(&hdr.flow_lbl, &skb->data[0], 3);
-               skb_pull(skb, 3);
-               hdr.priority = ((tmp >> 2) & 0x0f);
-               hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) |
-                                       (hdr.flow_lbl[0] & 0x0f);
+               /*                      1                   2                   3
+                *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+                * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+                * |ECN|   DSCP    |  rsv  |             Flow Label                |
+                * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+                */
+               lowpan_iphc_tf_set_ecn(hdr, tf);
+               lowpan_iphc_tf_set_dscp(hdr, tf);
+               lowpan_iphc_tf_set_lbl(hdr, &tf[1]);
                break;
-       /* Traffic class carried in-line
-        * ECN + DSCP (1 byte), Flow Label is elided
-        */
-       case 2: /* 10b */
-               if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
+       case LOWPAN_IPHC_TF_01:
+               /* ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided. */
+               if (lowpan_fetch_skb(skb, tf, 3))
                        return -EINVAL;
 
-               hdr.priority = ((tmp >> 2) & 0x0f);
-               hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
+               /*                     1                   2
+                * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
+                * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+                * |ECN|rsv|             Flow Label                |
+                * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+                */
+               lowpan_iphc_tf_set_ecn(hdr, tf);
+               lowpan_iphc_tf_set_lbl(hdr, &tf[0]);
                break;
-       /* Flow Label carried in-line
-        * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
-        */
-       case 1: /* 01b */
-               if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
+       case LOWPAN_IPHC_TF_10:
+               /* ECN + DSCP (1 byte), Flow Label is elided. */
+               if (lowpan_fetch_skb(skb, tf, 1))
                        return -EINVAL;
 
-               hdr.flow_lbl[0] = (tmp & 0x0F) | ((tmp >> 2) & 0x30);
-               memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
-               skb_pull(skb, 2);
+               /*  0 1 2 3 4 5 6 7
+                * +-+-+-+-+-+-+-+-+
+                * |ECN|   DSCP    |
+                * +-+-+-+-+-+-+-+-+
+                */
+               lowpan_iphc_tf_set_ecn(hdr, tf);
+               lowpan_iphc_tf_set_dscp(hdr, tf);
                break;
-       /* Traffic Class and Flow Label are elided */
-       case 3: /* 11b */
+       case LOWPAN_IPHC_TF_11:
+               /* Traffic Class and Flow Label are elided */
                break;
        default:
-               break;
+               WARN_ON_ONCE(1);
+               return -EINVAL;
        }
 
+       return 0;
+}
+
+/* TTL uncompression values */
+static const u8 lowpan_ttl_values[] = {
+       [LOWPAN_IPHC_HLIM_01] = 1,
+       [LOWPAN_IPHC_HLIM_10] = 64,
+       [LOWPAN_IPHC_HLIM_11] = 255,
+};
+
+int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
+                            const void *daddr, const void *saddr)
+{
+       struct ipv6hdr hdr = {};
+       u8 iphc0, iphc1;
+       int err;
+
+       raw_dump_table(__func__, "raw skb data dump uncompressed",
+                      skb->data, skb->len);
+
+       if (lowpan_fetch_skb(skb, &iphc0, sizeof(iphc0)) ||
+           lowpan_fetch_skb(skb, &iphc1, sizeof(iphc1)))
+               return -EINVAL;
+
+       /* another if the CID flag is set */
+       if (iphc1 & LOWPAN_IPHC_CID)
+               return -ENOTSUPP;
+
+       hdr.version = 6;
+
+       err = lowpan_iphc_tf_decompress(skb, &hdr,
+                                       iphc0 & LOWPAN_IPHC_TF_MASK);
+       if (err < 0)
+               return err;
+
        /* Next Header */
-       if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
+       if (!(iphc0 & LOWPAN_IPHC_NH)) {
                /* Next header is carried inline */
                if (lowpan_fetch_skb(skb, &hdr.nexthdr, sizeof(hdr.nexthdr)))
                        return -EINVAL;
@@ -305,35 +491,30 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
        }
 
        /* Hop Limit */
-       if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I) {
-               hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
+       if ((iphc0 & LOWPAN_IPHC_HLIM_MASK) != LOWPAN_IPHC_HLIM_00) {
+               hdr.hop_limit = lowpan_ttl_values[iphc0 & LOWPAN_IPHC_HLIM_MASK];
        } else {
                if (lowpan_fetch_skb(skb, &hdr.hop_limit,
                                     sizeof(hdr.hop_limit)))
                        return -EINVAL;
        }
 
-       /* Extract SAM to the tmp variable */
-       tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
-
        if (iphc1 & LOWPAN_IPHC_SAC) {
                /* Source address context based uncompression */
                pr_debug("SAC bit is set. Handle context based source address.\n");
-               err = uncompress_context_based_src_addr(skb, &hdr.saddr, tmp);
+               err = uncompress_context_based_src_addr(skb, &hdr.saddr,
+                                                       iphc1 & LOWPAN_IPHC_SAM_MASK);
        } else {
                /* Source address uncompression */
                pr_debug("source address stateless compression\n");
-               err = uncompress_addr(skb, &hdr.saddr, tmp, saddr,
-                                     saddr_type, saddr_len);
+               err = uncompress_addr(skb, dev, &hdr.saddr,
+                                     iphc1 & LOWPAN_IPHC_SAM_MASK, saddr);
        }
 
        /* Check on error of previous branch */
        if (err)
                return -EINVAL;
 
-       /* Extract DAM to the tmp variable */
-       tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03;
-
        /* check for Multicast Compression */
        if (iphc1 & LOWPAN_IPHC_M) {
                if (iphc1 & LOWPAN_IPHC_DAC) {
@@ -341,22 +522,22 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
                        /* TODO: implement this */
                } else {
                        err = lowpan_uncompress_multicast_daddr(skb, &hdr.daddr,
-                                                               tmp);
+                                                               iphc1 & LOWPAN_IPHC_DAM_MASK);
 
                        if (err)
                                return -EINVAL;
                }
        } else {
-               err = uncompress_addr(skb, &hdr.daddr, tmp, daddr,
-                                     daddr_type, daddr_len);
+               err = uncompress_addr(skb, dev, &hdr.daddr,
+                                     iphc1 & LOWPAN_IPHC_DAM_MASK, daddr);
                pr_debug("dest: stateless compression mode %d dest %pI6c\n",
-                        tmp, &hdr.daddr);
+                        iphc1 & LOWPAN_IPHC_DAM_MASK, &hdr.daddr);
                if (err)
                        return -EINVAL;
        }
 
        /* Next header data uncompression */
-       if (iphc0 & LOWPAN_IPHC_NH_C) {
+       if (iphc0 & LOWPAN_IPHC_NH) {
                err = lowpan_nhc_do_uncompression(skb, dev, &hdr);
                if (err < 0)
                        return err;
@@ -397,42 +578,176 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
 }
 EXPORT_SYMBOL_GPL(lowpan_header_decompress);
 
-static u8 lowpan_compress_addr_64(u8 **hc_ptr, u8 shift,
-                                 const struct in6_addr *ipaddr,
-                                 const unsigned char *lladdr)
+static const u8 lowpan_iphc_dam_to_sam_value[] = {
+       [LOWPAN_IPHC_DAM_00] = LOWPAN_IPHC_SAM_00,
+       [LOWPAN_IPHC_DAM_01] = LOWPAN_IPHC_SAM_01,
+       [LOWPAN_IPHC_DAM_10] = LOWPAN_IPHC_SAM_10,
+       [LOWPAN_IPHC_DAM_11] = LOWPAN_IPHC_SAM_11,
+};
+
+static u8 lowpan_compress_addr_64(u8 **hc_ptr, const struct in6_addr *ipaddr,
+                                 const unsigned char *lladdr, bool sam)
 {
-       u8 val = 0;
+       u8 dam = LOWPAN_IPHC_DAM_00;
 
        if (is_addr_mac_addr_based(ipaddr, lladdr)) {
-               val = 3; /* 0-bits */
+               dam = LOWPAN_IPHC_DAM_11; /* 0-bits */
                pr_debug("address compression 0 bits\n");
        } else if (lowpan_is_iid_16_bit_compressable(ipaddr)) {
                /* compress IID to 16 bits xxxx::XXXX */
                lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr16[7], 2);
-               val = 2; /* 16-bits */
+               dam = LOWPAN_IPHC_DAM_10; /* 16-bits */
                raw_dump_inline(NULL, "Compressed ipv6 addr is (16 bits)",
                                *hc_ptr - 2, 2);
        } else {
                /* do not compress IID => xxxx::IID */
                lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr16[4], 8);
-               val = 1; /* 64-bits */
+               dam = LOWPAN_IPHC_DAM_01; /* 64-bits */
                raw_dump_inline(NULL, "Compressed ipv6 addr is (64 bits)",
                                *hc_ptr - 8, 8);
        }
 
-       return rol8(val, shift);
+       if (sam)
+               return lowpan_iphc_dam_to_sam_value[dam];
+       else
+               return dam;
 }
 
-int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
-                          unsigned short type, const void *_daddr,
-                          const void *_saddr, unsigned int len)
+/* lowpan_iphc_get_tc - get the ECN + DCSP fields in hc format */
+static inline u8 lowpan_iphc_get_tc(const struct ipv6hdr *hdr)
 {
-       u8 tmp, iphc0, iphc1, *hc_ptr;
+       u8 dscp, ecn;
+
+       /* hdr->priority contains the higher bits of dscp, lower are part of
+        * flow_lbl[0]. Note ECN, DCSP is swapped in ipv6 hdr.
+        */
+       dscp = (hdr->priority << 2) | ((hdr->flow_lbl[0] & 0xc0) >> 6);
+       /* ECN is at the two lower bits from first nibble of flow_lbl[0] */
+       ecn = (hdr->flow_lbl[0] & 0x30);
+       /* for pretty debug output, also shift ecn to get the ecn value */
+       pr_debug("ecn 0x%02x dscp 0x%02x\n", ecn >> 4, dscp);
+       /* ECN is at 0x30 now, shift it to have ECN + DCSP */
+       return (ecn << 2) | dscp;
+}
+
+/* lowpan_iphc_is_flow_lbl_zero - check if flow label is zero */
+static inline bool lowpan_iphc_is_flow_lbl_zero(const struct ipv6hdr *hdr)
+{
+       return ((!(hdr->flow_lbl[0] & 0x0f)) &&
+               !hdr->flow_lbl[1] && !hdr->flow_lbl[2]);
+}
+
+/* lowpan_iphc_tf_compress - compress the traffic class which is set by
+ *     ipv6hdr. Return the corresponding format identifier which is used.
+ */
+static u8 lowpan_iphc_tf_compress(u8 **hc_ptr, const struct ipv6hdr *hdr)
+{
+       /* get ecn dscp data in a byteformat as: ECN(hi) + DSCP(lo) */
+       u8 tc = lowpan_iphc_get_tc(hdr), tf[4], val;
+
+       /* printout the traffic class in hc format */
+       pr_debug("tc 0x%02x\n", tc);
+
+       if (lowpan_iphc_is_flow_lbl_zero(hdr)) {
+               if (!tc) {
+                       /* 11:  Traffic Class and Flow Label are elided. */
+                       val = LOWPAN_IPHC_TF_11;
+               } else {
+                       /* 10:  ECN + DSCP (1 byte), Flow Label is elided.
+                        *
+                        *  0 1 2 3 4 5 6 7
+                        * +-+-+-+-+-+-+-+-+
+                        * |ECN|   DSCP    |
+                        * +-+-+-+-+-+-+-+-+
+                        */
+                       lowpan_push_hc_data(hc_ptr, &tc, sizeof(tc));
+                       val = LOWPAN_IPHC_TF_10;
+               }
+       } else {
+               /* check if dscp is zero, it's after the first two bit */
+               if (!(tc & 0x3f)) {
+                       /* 01:  ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
+                        *
+                        *                     1                   2
+                        * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
+                        * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+                        * |ECN|rsv|             Flow Label                |
+                        * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+                        */
+                       memcpy(&tf[0], &hdr->flow_lbl[0], 3);
+                       /* zero the highest 4-bits, contains DCSP + ECN */
+                       tf[0] &= ~0xf0;
+                       /* set ECN */
+                       tf[0] |= (tc & 0xc0);
+
+                       lowpan_push_hc_data(hc_ptr, tf, 3);
+                       val = LOWPAN_IPHC_TF_01;
+               } else {
+                       /* 00:  ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
+                        *
+                        *                      1                   2                   3
+                        *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+                        * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+                        * |ECN|   DSCP    |  rsv  |             Flow Label                |
+                        * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+                        */
+                       memcpy(&tf[0], &tc, sizeof(tc));
+                       /* highest nibble of flow_lbl[0] is part of DSCP + ECN
+                        * which will be the 4-bit pad and will be filled with
+                        * zeros afterwards.
+                        */
+                       memcpy(&tf[1], &hdr->flow_lbl[0], 3);
+                       /* zero the 4-bit pad, which is reserved */
+                       tf[1] &= ~0xf0;
+
+                       lowpan_push_hc_data(hc_ptr, tf, 4);
+                       val = LOWPAN_IPHC_TF_00;
+               }
+       }
+
+       return val;
+}
+
+static u8 lowpan_iphc_mcast_addr_compress(u8 **hc_ptr,
+                                         const struct in6_addr *ipaddr)
+{
+       u8 val;
+
+       if (lowpan_is_mcast_addr_compressable8(ipaddr)) {
+               pr_debug("compressed to 1 octet\n");
+               /* use last byte */
+               lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[15], 1);
+               val = LOWPAN_IPHC_DAM_11;
+       } else if (lowpan_is_mcast_addr_compressable32(ipaddr)) {
+               pr_debug("compressed to 4 octets\n");
+               /* second byte + the last three */
+               lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[1], 1);
+               lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[13], 3);
+               val = LOWPAN_IPHC_DAM_10;
+       } else if (lowpan_is_mcast_addr_compressable48(ipaddr)) {
+               pr_debug("compressed to 6 octets\n");
+               /* second byte + the last five */
+               lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[1], 1);
+               lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[11], 5);
+               val = LOWPAN_IPHC_DAM_01;
+       } else {
+               pr_debug("using full address\n");
+               lowpan_push_hc_data(hc_ptr, ipaddr->s6_addr, 16);
+               val = LOWPAN_IPHC_DAM_00;
+       }
+
+       return val;
+}
+
+int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
+                          const void *daddr, const void *saddr)
+{
+       u8 iphc0, iphc1, *hc_ptr;
        struct ipv6hdr *hdr;
-       u8 head[100] = {};
+       u8 head[LOWPAN_IPHC_MAX_HC_BUF_LEN] = {};
        int ret, addr_type;
 
-       if (type != ETH_P_IPV6)
+       if (skb->protocol != htons(ETH_P_IPV6))
                return -EINVAL;
 
        hdr = ipv6_hdr(skb);
@@ -456,63 +771,26 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
 
        /* TODO: context lookup */
 
-       raw_dump_inline(__func__, "saddr",
-                       (unsigned char *)_saddr, IEEE802154_ADDR_LEN);
-       raw_dump_inline(__func__, "daddr",
-                       (unsigned char *)_daddr, IEEE802154_ADDR_LEN);
+       raw_dump_inline(__func__, "saddr", saddr, EUI64_ADDR_LEN);
+       raw_dump_inline(__func__, "daddr", daddr, EUI64_ADDR_LEN);
 
        raw_dump_table(__func__, "sending raw skb network uncompressed packet",
                       skb->data, skb->len);
 
-       /* Traffic class, flow label
-        * If flow label is 0, compress it. If traffic class is 0, compress it
-        * We have to process both in the same time as the offset of traffic
-        * class depends on the presence of version and flow label
-        */
-
-       /* hc format of TC is ECN | DSCP , original one is DSCP | ECN */
-       tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4);
-       tmp = ((tmp & 0x03) << 6) | (tmp >> 2);
-
-       if (((hdr->flow_lbl[0] & 0x0F) == 0) &&
-           (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) {
-               /* flow label can be compressed */
-               iphc0 |= LOWPAN_IPHC_FL_C;
-               if ((hdr->priority == 0) &&
-                   ((hdr->flow_lbl[0] & 0xF0) == 0)) {
-                       /* compress (elide) all */
-                       iphc0 |= LOWPAN_IPHC_TC_C;
-               } else {
-                       /* compress only the flow label */
-                       *hc_ptr = tmp;
-                       hc_ptr += 1;
-               }
-       } else {
-               /* Flow label cannot be compressed */
-               if ((hdr->priority == 0) &&
-                   ((hdr->flow_lbl[0] & 0xF0) == 0)) {
-                       /* compress only traffic class */
-                       iphc0 |= LOWPAN_IPHC_TC_C;
-                       *hc_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F);
-                       memcpy(hc_ptr + 1, &hdr->flow_lbl[1], 2);
-                       hc_ptr += 3;
-               } else {
-                       /* compress nothing */
-                       memcpy(hc_ptr, hdr, 4);
-                       /* replace the top byte with new ECN | DSCP format */
-                       *hc_ptr = tmp;
-                       hc_ptr += 4;
-               }
-       }
+       /* Traffic Class, Flow Label compression */
+       iphc0 |= lowpan_iphc_tf_compress(&hc_ptr, hdr);
 
        /* NOTE: payload length is always compressed */
 
        /* Check if we provide the nhc format for nexthdr and compression
         * functionality. If not nexthdr is handled inline and not compressed.
         */
-       ret = lowpan_nhc_check_compression(skb, hdr, &hc_ptr, &iphc0);
-       if (ret < 0)
-               return ret;
+       ret = lowpan_nhc_check_compression(skb, hdr, &hc_ptr);
+       if (ret == -ENOENT)
+               lowpan_push_hc_data(&hc_ptr, &hdr->nexthdr,
+                                   sizeof(hdr->nexthdr));
+       else
+               iphc0 |= LOWPAN_IPHC_NH;
 
        /* Hop limit
         * if 1:   compress, encoding is 01
@@ -522,13 +800,13 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
         */
        switch (hdr->hop_limit) {
        case 1:
-               iphc0 |= LOWPAN_IPHC_TTL_1;
+               iphc0 |= LOWPAN_IPHC_HLIM_01;
                break;
        case 64:
-               iphc0 |= LOWPAN_IPHC_TTL_64;
+               iphc0 |= LOWPAN_IPHC_HLIM_10;
                break;
        case 255:
-               iphc0 |= LOWPAN_IPHC_TTL_255;
+               iphc0 |= LOWPAN_IPHC_HLIM_11;
                break;
        default:
                lowpan_push_hc_data(&hc_ptr, &hdr->hop_limit,
@@ -542,9 +820,8 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
                iphc1 |= LOWPAN_IPHC_SAC;
        } else {
                if (addr_type & IPV6_ADDR_LINKLOCAL) {
-                       iphc1 |= lowpan_compress_addr_64(&hc_ptr,
-                                                        LOWPAN_IPHC_SAM_BIT,
-                                                        &hdr->saddr, _saddr);
+                       iphc1 |= lowpan_compress_addr_64(&hc_ptr, &hdr->saddr,
+                                                        saddr, true);
                        pr_debug("source address unicast link-local %pI6c iphc1 0x%02x\n",
                                 &hdr->saddr, iphc1);
                } else {
@@ -558,38 +835,12 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
        if (addr_type & IPV6_ADDR_MULTICAST) {
                pr_debug("destination address is multicast: ");
                iphc1 |= LOWPAN_IPHC_M;
-               if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
-                       pr_debug("compressed to 1 octet\n");
-                       iphc1 |= LOWPAN_IPHC_DAM_11;
-                       /* use last byte */
-                       lowpan_push_hc_data(&hc_ptr,
-                                           &hdr->daddr.s6_addr[15], 1);
-               } else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) {
-                       pr_debug("compressed to 4 octets\n");
-                       iphc1 |= LOWPAN_IPHC_DAM_10;
-                       /* second byte + the last three */
-                       lowpan_push_hc_data(&hc_ptr,
-                                           &hdr->daddr.s6_addr[1], 1);
-                       lowpan_push_hc_data(&hc_ptr,
-                                           &hdr->daddr.s6_addr[13], 3);
-               } else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) {
-                       pr_debug("compressed to 6 octets\n");
-                       iphc1 |= LOWPAN_IPHC_DAM_01;
-                       /* second byte + the last five */
-                       lowpan_push_hc_data(&hc_ptr,
-                                           &hdr->daddr.s6_addr[1], 1);
-                       lowpan_push_hc_data(&hc_ptr,
-                                           &hdr->daddr.s6_addr[11], 5);
-               } else {
-                       pr_debug("using full address\n");
-                       iphc1 |= LOWPAN_IPHC_DAM_00;
-                       lowpan_push_hc_data(&hc_ptr, hdr->daddr.s6_addr, 16);
-               }
+               iphc1 |= lowpan_iphc_mcast_addr_compress(&hc_ptr, &hdr->daddr);
        } else {
                if (addr_type & IPV6_ADDR_LINKLOCAL) {
                        /* TODO: context lookup */
-                       iphc1 |= lowpan_compress_addr_64(&hc_ptr,
-                               LOWPAN_IPHC_DAM_BIT, &hdr->daddr, _daddr);
+                       iphc1 |= lowpan_compress_addr_64(&hc_ptr, &hdr->daddr,
+                                                        daddr, false);
                        pr_debug("dest address unicast link-local %pI6c "
                                 "iphc1 0x%02x\n", &hdr->daddr, iphc1);
                } else {
@@ -599,7 +850,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
        }
 
        /* next header compression */
-       if (iphc0 & LOWPAN_IPHC_NH_C) {
+       if (iphc0 & LOWPAN_IPHC_NH) {
                ret = lowpan_nhc_do_compression(skb, hdr, &hc_ptr);
                if (ret < 0)
                        return ret;
index fd20fc51a7c49ffb9c2f0782973df08e1f8535c6..7008d53e455c5254db5879bb044b0a4b6d706775 100644 (file)
@@ -95,23 +95,20 @@ static struct lowpan_nhc *lowpan_nhc_by_nhcid(const struct sk_buff *skb)
 }
 
 int lowpan_nhc_check_compression(struct sk_buff *skb,
-                                const struct ipv6hdr *hdr, u8 **hc_ptr,
-                                u8 *iphc0)
+                                const struct ipv6hdr *hdr, u8 **hc_ptr)
 {
        struct lowpan_nhc *nhc;
+       int ret = 0;
 
        spin_lock_bh(&lowpan_nhc_lock);
 
        nhc = lowpan_nexthdr_nhcs[hdr->nexthdr];
-       if (nhc && nhc->compress)
-               *iphc0 |= LOWPAN_IPHC_NH_C;
-       else
-               lowpan_push_hc_data(hc_ptr, &hdr->nexthdr,
-                                   sizeof(hdr->nexthdr));
+       if (!(nhc && nhc->compress))
+               ret = -ENOENT;
 
        spin_unlock_bh(&lowpan_nhc_lock);
 
-       return 0;
+       return ret;
 }
 
 int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
@@ -157,7 +154,8 @@ out:
        return ret;
 }
 
-int lowpan_nhc_do_uncompression(struct sk_buff *skb, struct net_device *dev,
+int lowpan_nhc_do_uncompression(struct sk_buff *skb,
+                               const struct net_device *dev,
                                struct ipv6hdr *hdr)
 {
        struct lowpan_nhc *nhc;
index c249f17fa37b8d0c5654c5abde57b38ba977a453..8030414001361bbdd7e3bac62d468f1032f61fda 100644 (file)
@@ -86,19 +86,16 @@ struct lowpan_nhc *lowpan_nhc_by_nexthdr(u8 nexthdr);
 
 /**
  * lowpan_nhc_check_compression - checks if we support compression format. If
- *     we support the nhc by nexthdr field, the 6LoWPAN iphc NHC bit will be
- *     set. If we don't support nexthdr will be added as inline data to the
- *     6LoWPAN header.
+ *     we support the nhc by nexthdr field, the function will return 0. If we
+ *     don't support the nhc by nexthdr this function will return -ENOENT.
  *
  * @skb: skb of 6LoWPAN header to read nhc and replace header.
  * @hdr: ipv6hdr to check the nexthdr value
  * @hc_ptr: pointer for 6LoWPAN header which should increment at the end of
  *         replaced header.
- * @iphc0: iphc0 pointer to set the 6LoWPAN NHC bit
  */
 int lowpan_nhc_check_compression(struct sk_buff *skb,
-                                const struct ipv6hdr *hdr, u8 **hc_ptr,
-                                u8 *iphc0);
+                                const struct ipv6hdr *hdr, u8 **hc_ptr);
 
 /**
  * lowpan_nhc_do_compression - calling compress callback for nhc
@@ -119,7 +116,8 @@ int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
  * @dev: netdevice for print logging information.
  * @hdr: ipv6hdr for setting nexthdr value.
  */
-int lowpan_nhc_do_uncompression(struct sk_buff *skb, struct net_device *dev,
+int lowpan_nhc_do_uncompression(struct sk_buff *skb,
+                               const struct net_device *dev,
                                struct ipv6hdr *hdr);
 
 /**
index 72d0b57eb6e591941cf21ebcacc0c1a5783d9f7a..69537a2eaab1317b33a535066a81c7c118aa3d96 100644 (file)
 
 #include "nhc.h"
 
-#define LOWPAN_NHC_UDP_IDLEN   1
+#define LOWPAN_NHC_UDP_MASK            0xF8
+#define LOWPAN_NHC_UDP_ID              0xF0
+#define LOWPAN_NHC_UDP_IDLEN           1
+
+#define LOWPAN_NHC_UDP_4BIT_PORT       0xF0B0
+#define LOWPAN_NHC_UDP_4BIT_MASK       0xFFF0
+#define LOWPAN_NHC_UDP_8BIT_PORT       0xF000
+#define LOWPAN_NHC_UDP_8BIT_MASK       0xFF00
+
+/* values for port compression, _with checksum_ ie bit 5 set to 0 */
+
+/* all inline */
+#define LOWPAN_NHC_UDP_CS_P_00 0xF0
+/* source 16bit inline, dest = 0xF0 + 8 bit inline */
+#define LOWPAN_NHC_UDP_CS_P_01 0xF1
+/* source = 0xF0 + 8bit inline, dest = 16 bit inline */
+#define LOWPAN_NHC_UDP_CS_P_10 0xF2
+/* source & dest = 0xF0B + 4bit inline */
+#define LOWPAN_NHC_UDP_CS_P_11 0xF3
+/* checksum elided */
+#define LOWPAN_NHC_UDP_CS_C    0x04
 
 static int udp_uncompress(struct sk_buff *skb, size_t needed)
 {
index db73b8a1433f86bfadd60aa9daae1b59d42e6192..d85af2385486c60f7cf10f84ca2b4e08ff5dbeb2 100644 (file)
@@ -21,8 +21,6 @@
 #include <net/ip6_route.h>
 #include <net/addrconf.h>
 
-#include <net/af_ieee802154.h> /* to get the address type */
-
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
@@ -272,7 +270,6 @@ static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
                           struct l2cap_chan *chan)
 {
        const u8 *saddr, *daddr;
-       u8 iphc0, iphc1;
        struct lowpan_dev *dev;
        struct lowpan_peer *peer;
 
@@ -287,22 +284,7 @@ static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
        saddr = peer->eui64_addr;
        daddr = dev->netdev->dev_addr;
 
-       /* at least two bytes will be used for the encoding */
-       if (skb->len < 2)
-               return -EINVAL;
-
-       if (lowpan_fetch_skb_u8(skb, &iphc0))
-               return -EINVAL;
-
-       if (lowpan_fetch_skb_u8(skb, &iphc1))
-               return -EINVAL;
-
-       return lowpan_header_decompress(skb, netdev,
-                                       saddr, IEEE802154_ADDR_LONG,
-                                       EUI64_ADDR_LEN, daddr,
-                                       IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
-                                       iphc0, iphc1);
-
+       return lowpan_header_decompress(skb, netdev, daddr, saddr);
 }
 
 static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
@@ -314,15 +296,17 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
        if (!netif_running(dev))
                goto drop;
 
-       if (dev->type != ARPHRD_6LOWPAN)
+       if (dev->type != ARPHRD_6LOWPAN || !skb->len)
                goto drop;
 
+       skb_reset_network_header(skb);
+
        skb = skb_share_check(skb, GFP_ATOMIC);
        if (!skb)
                goto drop;
 
        /* check that it's our buffer */
-       if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
+       if (lowpan_is_ipv6(*skb_network_header(skb))) {
                /* Copy the packet so that the IPv6 header is
                 * properly aligned.
                 */
@@ -334,7 +318,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
                local_skb->protocol = htons(ETH_P_IPV6);
                local_skb->pkt_type = PACKET_HOST;
 
-               skb_reset_network_header(local_skb);
                skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
 
                if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
@@ -347,38 +330,34 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
 
                consume_skb(local_skb);
                consume_skb(skb);
-       } else {
-               switch (skb->data[0] & 0xe0) {
-               case LOWPAN_DISPATCH_IPHC:      /* ipv6 datagram */
-                       local_skb = skb_clone(skb, GFP_ATOMIC);
-                       if (!local_skb)
-                               goto drop;
+       } else if (lowpan_is_iphc(*skb_network_header(skb))) {
+               local_skb = skb_clone(skb, GFP_ATOMIC);
+               if (!local_skb)
+                       goto drop;
 
-                       ret = iphc_decompress(local_skb, dev, chan);
-                       if (ret < 0) {
-                               kfree_skb(local_skb);
-                               goto drop;
-                       }
+               ret = iphc_decompress(local_skb, dev, chan);
+               if (ret < 0) {
+                       kfree_skb(local_skb);
+                       goto drop;
+               }
 
-                       local_skb->protocol = htons(ETH_P_IPV6);
-                       local_skb->pkt_type = PACKET_HOST;
-                       local_skb->dev = dev;
+               local_skb->protocol = htons(ETH_P_IPV6);
+               local_skb->pkt_type = PACKET_HOST;
+               local_skb->dev = dev;
 
-                       if (give_skb_to_upper(local_skb, dev)
-                                       != NET_RX_SUCCESS) {
-                               kfree_skb(local_skb);
-                               goto drop;
-                       }
+               if (give_skb_to_upper(local_skb, dev)
+                               != NET_RX_SUCCESS) {
+                       kfree_skb(local_skb);
+                       goto drop;
+               }
 
-                       dev->stats.rx_bytes += skb->len;
-                       dev->stats.rx_packets++;
+               dev->stats.rx_bytes += skb->len;
+               dev->stats.rx_packets++;
 
-                       consume_skb(local_skb);
-                       consume_skb(skb);
-                       break;
-               default:
-                       break;
-               }
+               consume_skb(local_skb);
+               consume_skb(skb);
+       } else {
+               goto drop;
        }
 
        return NET_RX_SUCCESS;
@@ -492,8 +471,7 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev,
                status = 1;
        }
 
-       lowpan_header_compress(skb, netdev, ETH_P_IPV6, daddr,
-                              dev->netdev->dev_addr, skb->len);
+       lowpan_header_compress(skb, netdev, daddr, dev->netdev->dev_addr);
 
        err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0);
        if (err < 0)
@@ -1135,7 +1113,7 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
                return -ENOENT;
 
        hci_dev_lock(hdev);
-       hcon = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
+       hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
        hci_dev_unlock(hdev);
 
        if (!hcon)
index 70f9d945faf7b439ce7a5283e94bc9687e8e67af..c5571792921322560b48da05b59c626c305bb08a 100644 (file)
@@ -33,7 +33,7 @@
 
 #include "selftest.h"
 
-#define VERSION "2.20"
+#define VERSION "2.21"
 
 /* Bluetooth sockets */
 #define BT_MAX_PROTO   8
index b4548c739a6475446d643bd5b01ab8627ef1f08e..85b82f7adbd2dd96ec7163a6d3bc3c155dc2518e 100644 (file)
@@ -59,15 +59,11 @@ static const struct sco_param esco_param_msbc[] = {
        { EDR_ESCO_MASK | ESCO_EV3,   0x0008,   0x02 }, /* T1 */
 };
 
-static void hci_le_create_connection_cancel(struct hci_conn *conn)
-{
-       hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
-}
-
 /* This function requires the caller holds hdev->lock */
 static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
 {
        struct hci_conn_params *params;
+       struct hci_dev *hdev = conn->hdev;
        struct smp_irk *irk;
        bdaddr_t *bdaddr;
        u8 bdaddr_type;
@@ -76,14 +72,15 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
        bdaddr_type = conn->dst_type;
 
        /* Check if we need to convert to identity address */
-       irk = hci_get_irk(conn->hdev, bdaddr, bdaddr_type);
+       irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
        if (irk) {
                bdaddr = &irk->bdaddr;
                bdaddr_type = irk->addr_type;
        }
 
-       params = hci_explicit_connect_lookup(conn->hdev, bdaddr, bdaddr_type);
-       if (!params)
+       params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
+                                          bdaddr_type);
+       if (!params || !params->explicit_connect)
                return;
 
        /* The connection attempt was doing scan for new RPA, and is
@@ -91,19 +88,97 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
         * autoconnect action, remove them completely. If they are, just unmark
         * them as waiting for connection, by clearing explicit_connect field.
         */
-       if (params->auto_connect == HCI_AUTO_CONN_EXPLICIT)
-               hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type);
-       else
-               params->explicit_connect = false;
+       params->explicit_connect = false;
+
+       list_del_init(&params->action);
+
+       switch (params->auto_connect) {
+       case HCI_AUTO_CONN_EXPLICIT:
+               hci_conn_params_del(hdev, bdaddr, bdaddr_type);
+               /* return instead of break to avoid duplicate scan update */
+               return;
+       case HCI_AUTO_CONN_DIRECT:
+       case HCI_AUTO_CONN_ALWAYS:
+               list_add(&params->action, &hdev->pend_le_conns);
+               break;
+       case HCI_AUTO_CONN_REPORT:
+               list_add(&params->action, &hdev->pend_le_reports);
+               break;
+       default:
+               break;
+       }
+
+       hci_update_background_scan(hdev);
+}
+
+static void hci_conn_cleanup(struct hci_conn *conn)
+{
+       struct hci_dev *hdev = conn->hdev;
+
+       if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
+               hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
+
+       hci_chan_list_flush(conn);
+
+       hci_conn_hash_del(hdev, conn);
+
+       if (hdev->notify)
+               hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
+
+       hci_conn_del_sysfs(conn);
+
+       debugfs_remove_recursive(conn->debugfs);
+
+       hci_dev_put(hdev);
+
+       hci_conn_put(conn);
+}
+
+static void le_scan_cleanup(struct work_struct *work)
+{
+       struct hci_conn *conn = container_of(work, struct hci_conn,
+                                            le_scan_cleanup);
+       struct hci_dev *hdev = conn->hdev;
+       struct hci_conn *c = NULL;
+
+       BT_DBG("%s hcon %p", hdev->name, conn);
+
+       hci_dev_lock(hdev);
+
+       /* Check that the hci_conn is still around */
+       rcu_read_lock();
+       list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
+               if (c == conn)
+                       break;
+       }
+       rcu_read_unlock();
+
+       if (c == conn) {
+               hci_connect_le_scan_cleanup(conn);
+               hci_conn_cleanup(conn);
+       }
+
+       hci_dev_unlock(hdev);
+       hci_dev_put(hdev);
+       hci_conn_put(conn);
 }
 
-/* This function requires the caller holds hdev->lock */
 static void hci_connect_le_scan_remove(struct hci_conn *conn)
 {
-       hci_connect_le_scan_cleanup(conn);
+       BT_DBG("%s hcon %p", conn->hdev->name, conn);
+
+       /* We can't call hci_conn_del/hci_conn_cleanup here since that
+        * could deadlock with another hci_conn_del() call that's holding
+        * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
+        * Instead, grab temporary extra references to the hci_dev and
+        * hci_conn and perform the necessary cleanup in a separate work
+        * callback.
+        */
+
+       hci_dev_hold(conn->hdev);
+       hci_conn_get(conn);
 
-       hci_conn_hash_del(conn->hdev, conn);
-       hci_update_background_scan(conn->hdev);
+       schedule_work(&conn->le_scan_cleanup);
 }
 
 static void hci_acl_create_connection(struct hci_conn *conn)
@@ -149,33 +224,8 @@ static void hci_acl_create_connection(struct hci_conn *conn)
        hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
 }
 
-static void hci_acl_create_connection_cancel(struct hci_conn *conn)
-{
-       struct hci_cp_create_conn_cancel cp;
-
-       BT_DBG("hcon %p", conn);
-
-       if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
-               return;
-
-       bacpy(&cp.bdaddr, &conn->dst);
-       hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
-}
-
-static void hci_reject_sco(struct hci_conn *conn)
-{
-       struct hci_cp_reject_sync_conn_req cp;
-
-       cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
-       bacpy(&cp.bdaddr, &conn->dst);
-
-       hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
-}
-
 int hci_disconnect(struct hci_conn *conn, __u8 reason)
 {
-       struct hci_cp_disconnect cp;
-
        BT_DBG("hcon %p", conn);
 
        /* When we are master of an established connection and it enters
@@ -183,7 +233,8 @@ int hci_disconnect(struct hci_conn *conn, __u8 reason)
         * current clock offset.  Processing of the result is done
         * within the event handling and hci_clock_offset_evt function.
         */
-       if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) {
+       if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
+           (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
                struct hci_dev *hdev = conn->hdev;
                struct hci_cp_read_clock_offset clkoff_cp;
 
@@ -192,25 +243,7 @@ int hci_disconnect(struct hci_conn *conn, __u8 reason)
                             &clkoff_cp);
        }
 
-       conn->state = BT_DISCONN;
-
-       cp.handle = cpu_to_le16(conn->handle);
-       cp.reason = reason;
-       return hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
-}
-
-static void hci_amp_disconn(struct hci_conn *conn)
-{
-       struct hci_cp_disconn_phy_link cp;
-
-       BT_DBG("hcon %p", conn);
-
-       conn->state = BT_DISCONN;
-
-       cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
-       cp.reason = hci_proto_disconn_ind(conn);
-       hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
-                    sizeof(cp), &cp);
+       return hci_abort_conn(conn, reason);
 }
 
 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
@@ -376,35 +409,14 @@ static void hci_conn_timeout(struct work_struct *work)
        if (refcnt > 0)
                return;
 
-       switch (conn->state) {
-       case BT_CONNECT:
-       case BT_CONNECT2:
-               if (conn->out) {
-                       if (conn->type == ACL_LINK)
-                               hci_acl_create_connection_cancel(conn);
-                       else if (conn->type == LE_LINK) {
-                               if (test_bit(HCI_CONN_SCANNING, &conn->flags))
-                                       hci_connect_le_scan_remove(conn);
-                               else
-                                       hci_le_create_connection_cancel(conn);
-                       }
-               } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
-                       hci_reject_sco(conn);
-               }
-               break;
-       case BT_CONFIG:
-       case BT_CONNECTED:
-               if (conn->type == AMP_LINK) {
-                       hci_amp_disconn(conn);
-               } else {
-                       __u8 reason = hci_proto_disconn_ind(conn);
-                       hci_disconnect(conn, reason);
-               }
-               break;
-       default:
-               conn->state = BT_CLOSED;
-               break;
+       /* LE connections in scanning state need special handling */
+       if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
+           test_bit(HCI_CONN_SCANNING, &conn->flags)) {
+               hci_connect_le_scan_remove(conn);
+               return;
        }
+
+       hci_abort_conn(conn, hci_proto_disconn_ind(conn));
 }
 
 /* Enter sniff mode */
@@ -472,7 +484,7 @@ static void le_conn_timeout(struct work_struct *work)
                return;
        }
 
-       hci_le_create_connection_cancel(conn);
+       hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
 }
 
 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
@@ -535,6 +547,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
        INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
        INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
        INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
+       INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
 
        atomic_set(&conn->refcnt, 0);
 
@@ -581,27 +594,17 @@ int hci_conn_del(struct hci_conn *conn)
                }
        }
 
-       hci_chan_list_flush(conn);
-
        if (conn->amp_mgr)
                amp_mgr_put(conn->amp_mgr);
 
-       hci_conn_hash_del(hdev, conn);
-       if (hdev->notify)
-               hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
-
        skb_queue_purge(&conn->data_q);
 
-       hci_conn_del_sysfs(conn);
-
-       debugfs_remove_recursive(conn->debugfs);
-
-       if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
-               hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
-
-       hci_dev_put(hdev);
-
-       hci_conn_put(conn);
+       /* Remove the connection from the list and cleanup its remaining
+        * state. This is a separate function since for some cases like
+        * BT_CONNECT_SCAN we *only* want the cleanup part without the
+        * rest of hci_conn_del.
+        */
+       hci_conn_cleanup(conn);
 
        return 0;
 }
@@ -800,7 +803,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
         * attempt, we simply update pending_sec_level and auth_type fields
         * and return the object found.
         */
-       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+       conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
        conn_unfinished = NULL;
        if (conn) {
                if (conn->state == BT_CONNECT &&
@@ -950,13 +953,10 @@ static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
 {
        struct hci_conn *conn;
 
-       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
+       conn = hci_conn_hash_lookup_le(hdev, addr, type);
        if (!conn)
                return false;
 
-       if (conn->dst_type != type)
-               return false;
-
        if (conn->state != BT_CONNECTED)
                return false;
 
@@ -973,15 +973,23 @@ static int hci_explicit_conn_params_set(struct hci_request *req,
        if (is_connected(hdev, addr, addr_type))
                return -EISCONN;
 
-       params = hci_conn_params_add(hdev, addr, addr_type);
-       if (!params)
-               return -EIO;
+       params = hci_conn_params_lookup(hdev, addr, addr_type);
+       if (!params) {
+               params = hci_conn_params_add(hdev, addr, addr_type);
+               if (!params)
+                       return -ENOMEM;
 
-       /* If we created new params, or existing params were marked as disabled,
-        * mark them to be used just once to connect.
-        */
-       if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
+               /* If we created new params, mark them to be deleted in
+                * hci_connect_le_scan_cleanup. It's different case than
+                * existing disabled params, those will stay after cleanup.
+                */
                params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+       }
+
+       /* We're trying to connect, so make sure params are at pend_le_conns */
+       if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
+           params->auto_connect == HCI_AUTO_CONN_REPORT ||
+           params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
                list_del_init(&params->action);
                list_add(&params->action, &hdev->pend_le_conns);
        }
@@ -1021,7 +1029,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
         * attempt, we simply update pending_sec_level and auth_type fields
         * and return the object found.
         */
-       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+       conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
        if (conn) {
                if (conn->pending_sec_level < sec_level)
                        conn->pending_sec_level = sec_level;
index d2b3dd32d6cf1c6469b9fc728c62a625ac1c9b67..086ed9389da1f961a78c5eafe1f47e4702ec0914 100644 (file)
@@ -162,6 +162,16 @@ static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
        if (strtobool(buf, &enable))
                return -EINVAL;
 
+       /* When the diagnostic flags are not persistent and the transport
+        * is not active, then there is no need for the vendor callback.
+        *
+        * Instead just store the desired value. If needed the setting
+        * will be programmed when the controller gets powered on.
+        */
+       if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
+           !test_bit(HCI_RUNNING, &hdev->flags))
+               goto done;
+
        hci_req_lock(hdev);
        err = hdev->set_diag(hdev, enable);
        hci_req_unlock(hdev);
@@ -169,6 +179,7 @@ static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
        if (err < 0)
                return err;
 
+done:
        if (enable)
                hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
        else
@@ -1450,6 +1461,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
        set_bit(HCI_INIT, &hdev->flags);
 
        if (hci_dev_test_flag(hdev, HCI_SETUP)) {
+               hci_sock_dev_event(hdev, HCI_DEV_SETUP);
+
                if (hdev->setup)
                        ret = hdev->setup(hdev);
 
@@ -1490,10 +1503,21 @@ static int hci_dev_do_open(struct hci_dev *hdev)
 
        if (!ret) {
                if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
-                   !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
+                   !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
                        ret = __hci_init(hdev);
+                       if (!ret && hdev->post_init)
+                               ret = hdev->post_init(hdev);
+               }
        }
 
+       /* If the HCI Reset command is clearing all diagnostic settings,
+        * then they need to be reprogrammed after the init procedure
+        * completed.
+        */
+       if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
+           hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
+               ret = hdev->set_diag(hdev, true);
+
        clear_bit(HCI_INIT, &hdev->flags);
 
        if (!ret) {
@@ -2916,30 +2940,6 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
        return NULL;
 }
 
-/* This function requires the caller holds hdev->lock */
-struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
-                                                   bdaddr_t *addr,
-                                                   u8 addr_type)
-{
-       struct hci_conn_params *param;
-
-       list_for_each_entry(param, &hdev->pend_le_conns, action) {
-               if (bacmp(&param->addr, addr) == 0 &&
-                   param->addr_type == addr_type &&
-                   param->explicit_connect)
-                       return param;
-       }
-
-       list_for_each_entry(param, &hdev->pend_le_reports, action) {
-               if (bacmp(&param->addr, addr) == 0 &&
-                   param->addr_type == addr_type &&
-                   param->explicit_connect)
-                       return param;
-       }
-
-       return NULL;
-}
-
 /* This function requires the caller holds hdev->lock */
 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
                                            bdaddr_t *addr, u8 addr_type)
@@ -3562,14 +3562,15 @@ EXPORT_SYMBOL(hci_recv_frame);
 /* Receive diagnostic message from HCI drivers */
 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
 {
+       /* Mark as diagnostic packet */
+       bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
+
        /* Time stamp */
        __net_timestamp(skb);
 
-       /* Mark as diagnostic packet and send to monitor */
-       bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
-       hci_send_to_monitor(hdev, skb);
+       skb_queue_tail(&hdev->rx_q, skb);
+       queue_work(hdev->workqueue, &hdev->rx_work);
 
-       kfree_skb(skb);
        return 0;
 }
 EXPORT_SYMBOL(hci_recv_diag);
index 8acec932123ade906192ebbfd2daecee9b51f697..504892cfb25a8e2e1d126c3ecac13f3d949d1ad9 100644 (file)
@@ -55,7 +55,12 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
        wake_up_bit(&hdev->flags, HCI_INQUIRY);
 
        hci_dev_lock(hdev);
-       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+       /* Set discovery state to stopped if we're not doing LE active
+        * scanning.
+        */
+       if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
+           hdev->le_scan_type != LE_SCAN_ACTIVE)
+               hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
        hci_dev_unlock(hdev);
 
        hci_conn_check_pending(hdev);
@@ -1910,7 +1915,8 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
 
        hci_dev_lock(hdev);
 
-       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
+       conn = hci_conn_hash_lookup_le(hdev, &cp->peer_addr,
+                                      cp->peer_addr_type);
        if (!conn)
                goto unlock;
 
@@ -4648,8 +4654,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
        /* If we're not connectable only connect devices that we have in
         * our pend_le_conns list.
         */
-       params = hci_explicit_connect_lookup(hdev, addr, addr_type);
-
+       params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
+                                          addr_type);
        if (!params)
                return NULL;
 
index b7369220c9efff616d13f3ad3aeec44c809bd4e0..739f966e5d67f3843015a127a7dba08e2961a238 100644 (file)
@@ -564,3 +564,96 @@ void hci_update_background_scan(struct hci_dev *hdev)
        if (err && err != -ENODATA)
                BT_ERR("Failed to run HCI request: err %d", err);
 }
+
+void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
+                     u8 reason)
+{
+       switch (conn->state) {
+       case BT_CONNECTED:
+       case BT_CONFIG:
+               if (conn->type == AMP_LINK) {
+                       struct hci_cp_disconn_phy_link cp;
+
+                       cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
+                       cp.reason = reason;
+                       hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
+                                   &cp);
+               } else {
+                       struct hci_cp_disconnect dc;
+
+                       dc.handle = cpu_to_le16(conn->handle);
+                       dc.reason = reason;
+                       hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+               }
+
+               conn->state = BT_DISCONN;
+
+               break;
+       case BT_CONNECT:
+               if (conn->type == LE_LINK) {
+                       if (test_bit(HCI_CONN_SCANNING, &conn->flags))
+                               break;
+                       hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
+                                   0, NULL);
+               } else if (conn->type == ACL_LINK) {
+                       if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
+                               break;
+                       hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
+                                   6, &conn->dst);
+               }
+               break;
+       case BT_CONNECT2:
+               if (conn->type == ACL_LINK) {
+                       struct hci_cp_reject_conn_req rej;
+
+                       bacpy(&rej.bdaddr, &conn->dst);
+                       rej.reason = reason;
+
+                       hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
+                                   sizeof(rej), &rej);
+               } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
+                       struct hci_cp_reject_sync_conn_req rej;
+
+                       bacpy(&rej.bdaddr, &conn->dst);
+
+                       /* SCO rejection has its own limited set of
+                        * allowed error values (0x0D-0x0F) which isn't
+                        * compatible with most values passed to this
+                        * function. To be safe hard-code one of the
+                        * values that's suitable for SCO.
+                        */
+                       rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
+
+                       hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
+                                   sizeof(rej), &rej);
+               }
+               break;
+       default:
+               conn->state = BT_CLOSED;
+               break;
+       }
+}
+
+static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+{
+       if (status)
+               BT_DBG("Failed to abort connection: status 0x%2.2x", status);
+}
+
+int hci_abort_conn(struct hci_conn *conn, u8 reason)
+{
+       struct hci_request req;
+       int err;
+
+       hci_req_init(&req, conn->hdev);
+
+       __hci_abort_conn(&req, conn, reason);
+
+       err = hci_req_run(&req, abort_conn_complete);
+       if (err && err != -ENODATA) {
+               BT_ERR("Failed to run HCI request: err %d", err);
+               return err;
+       }
+
+       return 0;
+}
index bf6df92f42dbf44be59134349f8e62ca953d9214..25c7f1305dcbcb273816c15c95097f2a1ed2c1f2 100644 (file)
@@ -55,3 +55,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
 
 void hci_update_background_scan(struct hci_dev *hdev);
 void __hci_update_background_scan(struct hci_request *req);
+
+int hci_abort_conn(struct hci_conn *conn, u8 reason);
+void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
+                     u8 reason);
index 9a100c1fd7b5ec13cdec4b852e70ca769700a067..b9327e8c2d34ee43b84ab7b23bf9132088560a09 100644 (file)
@@ -120,10 +120,7 @@ static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
        /* Apply filter */
        flt = &hci_pi(sk)->filter;
 
-       if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
-               flt_type = 0;
-       else
-               flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
+       flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
 
        if (!test_bit(flt_type, &flt->type_mask))
                return true;
@@ -173,6 +170,11 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
                        continue;
 
                if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
+                       if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
+                           bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
+                           bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
+                           bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
+                               continue;
                        if (is_filtered_packet(sk, skb))
                                continue;
                } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
@@ -333,6 +335,12 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
                opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
                break;
 
+       case HCI_DEV_SETUP:
+               if (hdev->manufacturer == 0xffff)
+                       return NULL;
+
+               /* fall through */
+
        case HCI_DEV_UP:
                skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
                if (!skb)
@@ -401,15 +409,17 @@ static void send_monitor_replay(struct sock *sk)
                if (sock_queue_rcv_skb(sk, skb))
                        kfree_skb(skb);
 
-               if (!test_bit(HCI_UP, &hdev->flags))
-                       continue;
-
-               skb = create_monitor_event(hdev, HCI_DEV_UP);
-               if (!skb)
-                       continue;
+               if (test_bit(HCI_UP, &hdev->flags))
+                       skb = create_monitor_event(hdev, HCI_DEV_UP);
+               else if (hci_dev_test_flag(hdev, HCI_SETUP))
+                       skb = create_monitor_event(hdev, HCI_DEV_SETUP);
+               else
+                       skb = NULL;
 
-               if (sock_queue_rcv_skb(sk, skb))
-                       kfree_skb(skb);
+               if (skb) {
+                       if (sock_queue_rcv_skb(sk, skb))
+                               kfree_skb(skb);
+               }
        }
 
        read_unlock(&hci_dev_list_lock);
@@ -1250,6 +1260,12 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
                        goto drop;
                }
 
+               if (bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
+                   bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
+                       err = -EINVAL;
+                       goto drop;
+               }
+
                skb_queue_tail(&hdev->raw_q, skb);
                queue_work(hdev->workqueue, &hdev->tx_work);
        }
index f1a117f8cad22a245044014eae118e09bcdfe271..0bec4588c3c8c3db45a564c81394c464a7faaf4a 100644 (file)
@@ -401,6 +401,20 @@ static void hidp_idle_timeout(unsigned long arg)
 {
        struct hidp_session *session = (struct hidp_session *) arg;
 
+       /* The HIDP user-space API only contains calls to add and remove
+        * devices. There is no way to forward events of any kind. Therefore,
+        * we have to forcefully disconnect a device on idle-timeouts. This is
+        * unfortunate and weird API design, but it is spec-compliant and
+        * required for backwards-compatibility. Hence, on idle-timeout, we
+        * signal driver-detach events, so poll() will be woken up with an
+        * error-condition on both sockets.
+        */
+
+       session->intr_sock->sk->sk_err = EUNATCH;
+       session->ctrl_sock->sk->sk_err = EUNATCH;
+       wake_up_interruptible(sk_sleep(session->intr_sock->sk));
+       wake_up_interruptible(sk_sleep(session->ctrl_sock->sk));
+
        hidp_session_terminate(session);
 }
 
index 586b3d580cfcba0422828cab1843363178dfe85c..1bb5515270449e8115a0c169ea5b6380594c40a4 100644 (file)
@@ -1111,53 +1111,76 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
        if (!sk)
                return 0;
 
+       lock_sock(sk);
+
+       if (sk->sk_shutdown)
+               goto shutdown_already;
+
+       BT_DBG("Handling sock shutdown");
+
        /* prevent sk structure from being freed whilst unlocked */
        sock_hold(sk);
 
        chan = l2cap_pi(sk)->chan;
        /* prevent chan structure from being freed whilst unlocked */
        l2cap_chan_hold(chan);
-       conn = chan->conn;
 
        BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
 
+       if (chan->mode == L2CAP_MODE_ERTM &&
+           chan->unacked_frames > 0 &&
+           chan->state == BT_CONNECTED) {
+               err = __l2cap_wait_ack(sk, chan);
+
+               /* After waiting for ACKs, check whether shutdown
+                * has already been actioned to close the L2CAP
+                * link such as by l2cap_disconnection_req().
+                */
+               if (sk->sk_shutdown)
+                       goto has_shutdown;
+       }
+
+       sk->sk_shutdown = SHUTDOWN_MASK;
+       release_sock(sk);
+
+       l2cap_chan_lock(chan);
+       conn = chan->conn;
+       if (conn)
+               /* prevent conn structure from being freed */
+               l2cap_conn_get(conn);
+       l2cap_chan_unlock(chan);
+
        if (conn)
+               /* mutex lock must be taken before l2cap_chan_lock() */
                mutex_lock(&conn->chan_lock);
 
        l2cap_chan_lock(chan);
-       lock_sock(sk);
+       l2cap_chan_close(chan, 0);
+       l2cap_chan_unlock(chan);
 
-       if (!sk->sk_shutdown) {
-               if (chan->mode == L2CAP_MODE_ERTM &&
-                   chan->unacked_frames > 0 &&
-                   chan->state == BT_CONNECTED)
-                       err = __l2cap_wait_ack(sk, chan);
+       if (conn) {
+               mutex_unlock(&conn->chan_lock);
+               l2cap_conn_put(conn);
+       }
 
-               sk->sk_shutdown = SHUTDOWN_MASK;
+       lock_sock(sk);
 
-               release_sock(sk);
-               l2cap_chan_close(chan, 0);
-               lock_sock(sk);
+       if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
+           !(current->flags & PF_EXITING))
+               err = bt_sock_wait_state(sk, BT_CLOSED,
+                                        sk->sk_lingertime);
 
-               if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
-                   !(current->flags & PF_EXITING))
-                       err = bt_sock_wait_state(sk, BT_CLOSED,
-                                                sk->sk_lingertime);
-       }
+has_shutdown:
+       l2cap_chan_put(chan);
+       sock_put(sk);
 
+shutdown_already:
        if (!err && sk->sk_err)
                err = -sk->sk_err;
 
        release_sock(sk);
-       l2cap_chan_unlock(chan);
-
-       if (conn)
-               mutex_unlock(&conn->chan_lock);
-
-       l2cap_chan_put(chan);
-       sock_put(sk);
 
-       BT_DBG("err: %d", err);
+       BT_DBG("Sock shutdown complete err: %d", err);
 
        return err;
 }
index ccaf5a436d8f7a70799729a04ffc17583d11913f..7f22119276f391067b424da490e1c667d7a6272b 100644 (file)
@@ -268,6 +268,14 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
                               HCI_SOCK_TRUSTED, skip_sk);
 }
 
+static u8 le_addr_type(u8 mgmt_addr_type)
+{
+       if (mgmt_addr_type == BDADDR_LE_PUBLIC)
+               return ADDR_LE_DEV_PUBLIC;
+       else
+               return ADDR_LE_DEV_RANDOM;
+}
+
 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
                        u16 data_len)
 {
@@ -1631,35 +1639,8 @@ static int clean_up_hci_state(struct hci_dev *hdev)
        discov_stopped = hci_stop_discovery(&req);
 
        list_for_each_entry(conn, &hdev->conn_hash.list, list) {
-               struct hci_cp_disconnect dc;
-               struct hci_cp_reject_conn_req rej;
-
-               switch (conn->state) {
-               case BT_CONNECTED:
-               case BT_CONFIG:
-                       dc.handle = cpu_to_le16(conn->handle);
-                       dc.reason = 0x15; /* Terminated due to Power Off */
-                       hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
-                       break;
-               case BT_CONNECT:
-                       if (conn->type == LE_LINK)
-                               hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
-                                           0, NULL);
-                       else if (conn->type == ACL_LINK)
-                               hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
-                                           6, &conn->dst);
-                       break;
-               case BT_CONNECT2:
-                       bacpy(&rej.bdaddr, &conn->dst);
-                       rej.reason = 0x15; /* Terminated due to Power Off */
-                       if (conn->type == ACL_LINK)
-                               hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
-                                           sizeof(rej), &rej);
-                       else if (conn->type == SCO_LINK)
-                               hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
-                                           sizeof(rej), &rej);
-                       break;
-               }
+               /* 0x15 == Terminated due to Power Off */
+               __hci_abort_conn(&req, conn, 0x15);
        }
 
        err = hci_req_run(&req, clean_up_hci_complete);
@@ -3044,9 +3025,10 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
 {
        struct mgmt_cp_unpair_device *cp = data;
        struct mgmt_rp_unpair_device rp;
-       struct hci_cp_disconnect dc;
+       struct hci_conn_params *params;
        struct mgmt_pending_cmd *cmd;
        struct hci_conn *conn;
+       u8 addr_type;
        int err;
 
        memset(&rp, 0, sizeof(rp));
@@ -3087,36 +3069,23 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                        conn = NULL;
 
                err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
-       } else {
-               u8 addr_type;
-
-               conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
-                                              &cp->addr.bdaddr);
-               if (conn) {
-                       /* Defer clearing up the connection parameters
-                        * until closing to give a chance of keeping
-                        * them if a repairing happens.
-                        */
-                       set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
-
-                       /* If disconnection is not requested, then
-                        * clear the connection variable so that the
-                        * link is not terminated.
-                        */
-                       if (!cp->disconnect)
-                               conn = NULL;
+               if (err < 0) {
+                       err = mgmt_cmd_complete(sk, hdev->id,
+                                               MGMT_OP_UNPAIR_DEVICE,
+                                               MGMT_STATUS_NOT_PAIRED, &rp,
+                                               sizeof(rp));
+                       goto unlock;
                }
 
-               if (cp->addr.type == BDADDR_LE_PUBLIC)
-                       addr_type = ADDR_LE_DEV_PUBLIC;
-               else
-                       addr_type = ADDR_LE_DEV_RANDOM;
+               goto done;
+       }
 
-               hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
+       /* LE address type */
+       addr_type = le_addr_type(cp->addr.type);
 
-               err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
-       }
+       hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
 
+       err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
        if (err < 0) {
                err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
                                        MGMT_STATUS_NOT_PAIRED, &rp,
@@ -3124,6 +3093,36 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                goto unlock;
        }
 
+       conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
+       if (!conn) {
+               hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
+               goto done;
+       }
+
+       /* Abort any ongoing SMP pairing */
+       smp_cancel_pairing(conn);
+
+       /* Defer clearing up the connection parameters until closing to
+        * give a chance of keeping them if a repairing happens.
+        */
+       set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
+
+       /* Disable auto-connection parameters if present */
+       params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
+       if (params) {
+               if (params->explicit_connect)
+                       params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+               else
+                       params->auto_connect = HCI_AUTO_CONN_DISABLED;
+       }
+
+       /* If disconnection is not requested, then clear the connection
+        * variable so that the link is not terminated.
+        */
+       if (!cp->disconnect)
+               conn = NULL;
+
+done:
        /* If the connection variable is set, then termination of the
         * link is requested.
         */
@@ -3143,9 +3142,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
 
        cmd->cmd_complete = addr_cmd_complete;
 
-       dc.handle = cpu_to_le16(conn->handle);
-       dc.reason = 0x13; /* Remote User Terminated Connection */
-       err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+       err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
        if (err < 0)
                mgmt_pending_remove(cmd);
 
@@ -3193,7 +3190,8 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
                conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
                                               &cp->addr.bdaddr);
        else
-               conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
+               conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
+                                              le_addr_type(cp->addr.type));
 
        if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
                err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
@@ -3544,14 +3542,8 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
                                       auth_type);
        } else {
-               u8 addr_type;
-
-               /* Convert from L2CAP channel address type to HCI address type
-                */
-               if (cp->addr.type == BDADDR_LE_PUBLIC)
-                       addr_type = ADDR_LE_DEV_PUBLIC;
-               else
-                       addr_type = ADDR_LE_DEV_RANDOM;
+               u8 addr_type = le_addr_type(cp->addr.type);
+               struct hci_conn_params *p;
 
                /* When pairing a new device, it is expected to remember
                 * this device for future connections. Adding the connection
@@ -3562,7 +3554,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                 * If connection parameters already exist, then they
                 * will be kept and this function does nothing.
                 */
-               hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
+               p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
+
+               if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
+                       p->auto_connect = HCI_AUTO_CONN_DISABLED;
 
                conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
                                           addr_type, sec_level,
@@ -3693,7 +3688,8 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
        if (addr->type == BDADDR_BREDR)
                conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
        else
-               conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
+               conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
+                                              le_addr_type(addr->type));
 
        if (!conn) {
                err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
@@ -5596,14 +5592,9 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
 
        for (i = 0; i < irk_count; i++) {
                struct mgmt_irk_info *irk = &cp->irks[i];
-               u8 addr_type;
 
-               if (irk->addr.type == BDADDR_LE_PUBLIC)
-                       addr_type = ADDR_LE_DEV_PUBLIC;
-               else
-                       addr_type = ADDR_LE_DEV_RANDOM;
-
-               hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
+               hci_add_irk(hdev, &irk->addr.bdaddr,
+                           le_addr_type(irk->addr.type), irk->val,
                            BDADDR_ANY);
        }
 
@@ -5683,12 +5674,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
 
        for (i = 0; i < key_count; i++) {
                struct mgmt_ltk_info *key = &cp->keys[i];
-               u8 type, addr_type, authenticated;
-
-               if (key->addr.type == BDADDR_LE_PUBLIC)
-                       addr_type = ADDR_LE_DEV_PUBLIC;
-               else
-                       addr_type = ADDR_LE_DEV_RANDOM;
+               u8 type, authenticated;
 
                switch (key->type) {
                case MGMT_LTK_UNAUTHENTICATED:
@@ -5714,9 +5700,9 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
                        continue;
                }
 
-               hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
-                           authenticated, key->val, key->enc_size, key->ediv,
-                           key->rand);
+               hci_add_ltk(hdev, &key->addr.bdaddr,
+                           le_addr_type(key->addr.type), type, authenticated,
+                           key->val, key->enc_size, key->ediv, key->rand);
        }
 
        err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
@@ -6117,14 +6103,21 @@ static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
                __hci_update_background_scan(req);
                break;
        case HCI_AUTO_CONN_REPORT:
-               list_add(&params->action, &hdev->pend_le_reports);
+               if (params->explicit_connect)
+                       list_add(&params->action, &hdev->pend_le_conns);
+               else
+                       list_add(&params->action, &hdev->pend_le_reports);
                __hci_update_background_scan(req);
                break;
        case HCI_AUTO_CONN_DIRECT:
        case HCI_AUTO_CONN_ALWAYS:
                if (!is_connected(hdev, addr, addr_type)) {
                        list_add(&params->action, &hdev->pend_le_conns);
-                       __hci_update_background_scan(req);
+                       /* If we are in scan phase of connecting, we were
+                        * already added to pend_le_conns and scanning.
+                        */
+                       if (params->auto_connect != HCI_AUTO_CONN_EXPLICIT)
+                               __hci_update_background_scan(req);
                }
                break;
        }
@@ -6221,10 +6214,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
                goto added;
        }
 
-       if (cp->addr.type == BDADDR_LE_PUBLIC)
-               addr_type = ADDR_LE_DEV_PUBLIC;
-       else
-               addr_type = ADDR_LE_DEV_RANDOM;
+       addr_type = le_addr_type(cp->addr.type);
 
        if (cp->action == 0x02)
                auto_conn = HCI_AUTO_CONN_ALWAYS;
@@ -6353,10 +6343,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
                        goto complete;
                }
 
-               if (cp->addr.type == BDADDR_LE_PUBLIC)
-                       addr_type = ADDR_LE_DEV_PUBLIC;
-               else
-                       addr_type = ADDR_LE_DEV_RANDOM;
+               addr_type = le_addr_type(cp->addr.type);
 
                /* Kernel internally uses conn_params with resolvable private
                 * address, but Remove Device allows only identity addresses.
@@ -6379,7 +6366,8 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
                        goto unlock;
                }
 
-               if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
+               if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
+                   params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
                        err = cmd->cmd_complete(cmd,
                                                MGMT_STATUS_INVALID_PARAMS);
                        mgmt_pending_remove(cmd);
@@ -6415,6 +6403,10 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
                        if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
                                continue;
                        device_removed(sk, hdev, &p->addr, p->addr_type);
+                       if (p->explicit_connect) {
+                               p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+                               continue;
+                       }
                        list_del(&p->action);
                        list_del(&p->list);
                        kfree(p);
@@ -7857,27 +7849,13 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
        mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
 }
 
-void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
+void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
 {
        struct mgmt_ev_new_irk ev;
 
        memset(&ev, 0, sizeof(ev));
 
-       /* For identity resolving keys from devices that are already
-        * using a public address or static random address, do not
-        * ask for storing this key. The identity resolving key really
-        * is only mandatory for devices using resolvable random
-        * addresses.
-        *
-        * Storing all identity resolving keys has the downside that
-        * they will be also loaded on next boot of they system. More
-        * identity resolving keys, means more time during scanning is
-        * needed to actually resolve these addresses.
-        */
-       if (bacmp(&irk->rpa, BDADDR_ANY))
-               ev.store_hint = 0x01;
-       else
-               ev.store_hint = 0x00;
+       ev.store_hint = persistent;
 
        bacpy(&ev.rpa, &irk->rpa);
        bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
index 25644e1bc47948ba19d438df983420b59b0cae9f..c91353841e40500790c13d2ce894460e1cbbe9e3 100644 (file)
@@ -811,7 +811,6 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason)
                smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
                             &reason);
 
-       clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags);
        mgmt_auth_failed(hcon, HCI_ERROR_AUTH_FAILURE);
 
        if (chan->data)
@@ -1046,8 +1045,24 @@ static void smp_notify_keys(struct l2cap_conn *conn)
        struct smp_cmd_pairing *rsp = (void *) &smp->prsp[1];
        bool persistent;
 
+       if (hcon->type == ACL_LINK) {
+               if (hcon->key_type == HCI_LK_DEBUG_COMBINATION)
+                       persistent = false;
+               else
+                       persistent = !test_bit(HCI_CONN_FLUSH_KEY,
+                                              &hcon->flags);
+       } else {
+               /* The LTKs, IRKs and CSRKs should be persistent only if
+                * both sides had the bonding bit set in their
+                * authentication requests.
+                */
+               persistent = !!((req->auth_req & rsp->auth_req) &
+                               SMP_AUTH_BONDING);
+       }
+
        if (smp->remote_irk) {
-               mgmt_new_irk(hdev, smp->remote_irk);
+               mgmt_new_irk(hdev, smp->remote_irk, persistent);
+
                /* Now that user space can be considered to know the
                 * identity address track the connection based on it
                 * from now on (assuming this is an LE link).
@@ -1075,21 +1090,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
                }
        }
 
-       if (hcon->type == ACL_LINK) {
-               if (hcon->key_type == HCI_LK_DEBUG_COMBINATION)
-                       persistent = false;
-               else
-                       persistent = !test_bit(HCI_CONN_FLUSH_KEY,
-                                              &hcon->flags);
-       } else {
-               /* The LTKs and CSRKs should be persistent only if both sides
-                * had the bonding bit set in their authentication requests.
-                */
-               persistent = !!((req->auth_req & rsp->auth_req) &
-                               SMP_AUTH_BONDING);
-       }
-
-
        if (smp->csrk) {
                smp->csrk->bdaddr_type = hcon->dst_type;
                bacpy(&smp->csrk->bdaddr, &hcon->dst);
@@ -2380,6 +2380,32 @@ unlock:
        return ret;
 }
 
+void smp_cancel_pairing(struct hci_conn *hcon)
+{
+       struct l2cap_conn *conn = hcon->l2cap_data;
+       struct l2cap_chan *chan;
+       struct smp_chan *smp;
+
+       if (!conn)
+               return;
+
+       chan = conn->smp;
+       if (!chan)
+               return;
+
+       l2cap_chan_lock(chan);
+
+       smp = chan->data;
+       if (smp) {
+               if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
+                       smp_failure(conn, 0);
+               else
+                       smp_failure(conn, SMP_UNSPECIFIED);
+       }
+
+       l2cap_chan_unlock(chan);
+}
+
 static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
 {
        struct smp_cmd_encrypt_info *rp = (void *) skb->data;
index 6cf872563ea71d425dd1798314765edb56d39a51..ffcc70b6b1997d3d109aeab142738a77ee31ceb9 100644 (file)
@@ -180,6 +180,7 @@ enum smp_key_pref {
 };
 
 /* SMP Commands */
+void smp_cancel_pairing(struct hci_conn *hcon);
 bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
                             enum smp_key_pref key_pref);
 int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
index bdfb9544ca0379a88df9ac31d1e68789cf4aedba..5e88d3e17546618a728bb429fc61e20d5f8d79dc 100644 (file)
@@ -56,7 +56,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_reset_mac_header(skb);
        skb_pull(skb, ETH_HLEN);
 
-       if (!br_allowed_ingress(br, br_vlan_group(br), skb, &vid))
+       if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid))
                goto out;
 
        if (is_broadcast_ether_addr(dest))
index f43ce05c66a6727f765fe76973d5c9b159955821..c88bd8e8937eac5c23c6fb0b6f8e807c337c2596 100644 (file)
@@ -134,11 +134,14 @@ static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
 static void fdb_del_external_learn(struct net_bridge_fdb_entry *f)
 {
        struct switchdev_obj_port_fdb fdb = {
-               .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
-               .addr = f->addr.addr,
+               .obj = {
+                       .id = SWITCHDEV_OBJ_ID_PORT_FDB,
+                       .flags = SWITCHDEV_F_DEFER,
+               },
                .vid = f->vlan_id,
        };
 
+       ether_addr_copy(fdb.addr, f->addr.addr);
        switchdev_port_obj_del(f->dst->dev, &fdb.obj);
 }
 
index 6d5ed795c3e2ae2857edf404f6692b9d990e7c98..a9d424e20229c2c43a22240144ae7b973598871f 100644 (file)
@@ -32,7 +32,7 @@ static inline int should_deliver(const struct net_bridge_port *p,
 {
        struct net_bridge_vlan_group *vg;
 
-       vg = nbp_vlan_group(p);
+       vg = nbp_vlan_group_rcu(p);
        return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
                br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING;
 }
@@ -80,7 +80,7 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
 {
        struct net_bridge_vlan_group *vg;
 
-       vg = nbp_vlan_group(to);
+       vg = nbp_vlan_group_rcu(to);
        skb = br_handle_vlan(to->br, vg, skb);
        if (!skb)
                return;
@@ -112,7 +112,7 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
                return;
        }
 
-       vg = nbp_vlan_group(to);
+       vg = nbp_vlan_group_rcu(to);
        skb = br_handle_vlan(to->br, vg, skb);
        if (!skb)
                return;
index 934cae9fa317851baeb2045b751a41c94a270a4c..ec02f5869a780246dd22030dfe66efa6d5e5f8da 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <net/sock.h>
 #include <linux/if_vlan.h>
+#include <net/switchdev.h>
 
 #include "br_private.h"
 
@@ -248,7 +249,10 @@ static void del_nbp(struct net_bridge_port *p)
 
        list_del_rcu(&p->list);
 
+       nbp_vlan_flush(p);
        br_fdb_delete_by_port(br, p, 0, 1);
+       switchdev_deferred_process();
+
        nbp_update_port_count(br);
 
        netdev_upper_dev_unlink(dev, br->dev);
@@ -256,8 +260,6 @@ static void del_nbp(struct net_bridge_port *p)
        dev->priv_flags &= ~IFF_BRIDGE_PORT;
 
        netdev_rx_handler_unregister(dev);
-       /* use the synchronize_rcu done by netdev_rx_handler_unregister */
-       nbp_vlan_flush(p);
 
        br_multicast_del_port(p);
 
index f5c5a4500e2f676f8f301da265474ec1e175a7dc..f7fba74108a9377b12610c9cdfa033dc8c7c92a0 100644 (file)
@@ -44,7 +44,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
        brstats->rx_bytes += skb->len;
        u64_stats_update_end(&brstats->syncp);
 
-       vg = br_vlan_group(br);
+       vg = br_vlan_group_rcu(br);
        /* Bridge is just like any other port.  Make sure the
         * packet is allowed except in promisc modue when someone
         * may be running packet capture.
@@ -140,7 +140,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
        if (!p || p->state == BR_STATE_DISABLED)
                goto drop;
 
-       if (!br_allowed_ingress(p->br, nbp_vlan_group(p), skb, &vid))
+       if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid))
                goto out;
 
        /* insert into forwarding database after filtering to avoid spoofing */
index 370aa4d4cf4d3866624dbcdec6ad339ae302cd9a..7ddbe7ec81d61d4971b919c5988e7bed93436dec 100644 (file)
@@ -111,7 +111,6 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
 /* largest possible L2 header, see br_nf_dev_queue_xmit() */
 #define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
 
-#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
 struct brnf_frag_data {
        char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
        u8 encap_size;
@@ -121,7 +120,6 @@ struct brnf_frag_data {
 };
 
 static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
-#endif
 
 static void nf_bridge_info_free(struct sk_buff *skb)
 {
@@ -666,7 +664,6 @@ static unsigned int br_nf_forward_arp(void *priv,
        return NF_STOLEN;
 }
 
-#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
 static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct brnf_frag_data *data;
@@ -691,9 +688,7 @@ static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff
        nf_bridge_info_free(skb);
        return br_dev_queue_push_xmit(net, sk, skb);
 }
-#endif
 
-#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
 static int
 br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                  int (*output)(struct net *, struct sock *, struct sk_buff *))
@@ -711,7 +706,6 @@ br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
 
        return ip_do_fragment(net, sk, skb, output);
 }
-#endif
 
 static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
 {
@@ -734,11 +728,11 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
 
        nf_bridge = nf_bridge_info_get(skb);
 
-#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
        /* This is wrong! We should preserve the original fragment
         * boundaries by preserving frag_list rather than refragmenting.
         */
-       if (skb->protocol == htons(ETH_P_IP)) {
+       if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) &&
+           skb->protocol == htons(ETH_P_IP)) {
                struct brnf_frag_data *data;
 
                if (br_validate_ipv4(net, skb))
@@ -760,9 +754,8 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
 
                return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
        }
-#endif
-#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
-       if (skb->protocol == htons(ETH_P_IPV6)) {
+       if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) &&
+           skb->protocol == htons(ETH_P_IPV6)) {
                const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
                struct brnf_frag_data *data;
 
@@ -786,7 +779,6 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
                kfree_skb(skb);
                return -EMSGSIZE;
        }
-#endif
        nf_bridge_info_free(skb);
        return br_dev_queue_push_xmit(net, sk, skb);
  drop:
@@ -904,49 +896,42 @@ EXPORT_SYMBOL_GPL(br_netfilter_enable);
 static struct nf_hook_ops br_nf_ops[] __read_mostly = {
        {
                .hook = br_nf_pre_routing,
-               .owner = THIS_MODULE,
                .pf = NFPROTO_BRIDGE,
                .hooknum = NF_BR_PRE_ROUTING,
                .priority = NF_BR_PRI_BRNF,
        },
        {
                .hook = br_nf_local_in,
-               .owner = THIS_MODULE,
                .pf = NFPROTO_BRIDGE,
                .hooknum = NF_BR_LOCAL_IN,
                .priority = NF_BR_PRI_BRNF,
        },
        {
                .hook = br_nf_forward_ip,
-               .owner = THIS_MODULE,
                .pf = NFPROTO_BRIDGE,
                .hooknum = NF_BR_FORWARD,
                .priority = NF_BR_PRI_BRNF - 1,
        },
        {
                .hook = br_nf_forward_arp,
-               .owner = THIS_MODULE,
                .pf = NFPROTO_BRIDGE,
                .hooknum = NF_BR_FORWARD,
                .priority = NF_BR_PRI_BRNF,
        },
        {
                .hook = br_nf_post_routing,
-               .owner = THIS_MODULE,
                .pf = NFPROTO_BRIDGE,
                .hooknum = NF_BR_POST_ROUTING,
                .priority = NF_BR_PRI_LAST,
        },
        {
                .hook = ip_sabotage_in,
-               .owner = THIS_MODULE,
                .pf = NFPROTO_IPV4,
                .hooknum = NF_INET_PRE_ROUTING,
                .priority = NF_IP_PRI_FIRST,
        },
        {
                .hook = ip_sabotage_in,
-               .owner = THIS_MODULE,
                .pf = NFPROTO_IPV6,
                .hooknum = NF_INET_PRE_ROUTING,
                .priority = NF_IP6_PRI_FIRST,
index d792d1a848ad09fde1c883867f058af0efb9d48c..40197ff8918abab433c563dd92186479652ed84d 100644 (file)
@@ -102,10 +102,10 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev,
        rcu_read_lock();
        if (br_port_exists(dev)) {
                p = br_port_get_rcu(dev);
-               vg = nbp_vlan_group(p);
+               vg = nbp_vlan_group_rcu(p);
        } else if (dev->priv_flags & IFF_EBRIDGE) {
                br = netdev_priv(dev);
-               vg = br_vlan_group(br);
+               vg = br_vlan_group_rcu(br);
        }
        num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
        rcu_read_unlock();
@@ -253,7 +253,7 @@ static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
         * if vlaninfo represents a range
         */
        pvid = br_get_pvid(vg);
-       list_for_each_entry(v, &vg->vlan_list, vlist) {
+       list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
                flags = 0;
                if (!br_vlan_should_use(v))
                        continue;
@@ -303,7 +303,7 @@ static int br_fill_ifvlaninfo(struct sk_buff *skb,
        u16 pvid;
 
        pvid = br_get_pvid(vg);
-       list_for_each_entry(v, &vg->vlan_list, vlist) {
+       list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
                if (!br_vlan_should_use(v))
                        continue;
 
@@ -386,22 +386,27 @@ static int br_fill_ifinfo(struct sk_buff *skb,
                struct nlattr *af;
                int err;
 
+               /* RCU needed because of the VLAN locking rules (rcu || rtnl) */
+               rcu_read_lock();
                if (port)
-                       vg = nbp_vlan_group(port);
+                       vg = nbp_vlan_group_rcu(port);
                else
-                       vg = br_vlan_group(br);
+                       vg = br_vlan_group_rcu(br);
 
-               if (!vg || !vg->num_vlans)
+               if (!vg || !vg->num_vlans) {
+                       rcu_read_unlock();
                        goto done;
-
+               }
                af = nla_nest_start(skb, IFLA_AF_SPEC);
-               if (!af)
+               if (!af) {
+                       rcu_read_unlock();
                        goto nla_put_failure;
-
+               }
                if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
                        err = br_fill_ifvlaninfo_compressed(skb, vg);
                else
                        err = br_fill_ifvlaninfo(skb, vg);
+               rcu_read_unlock();
                if (err)
                        goto nla_put_failure;
                nla_nest_end(skb, af);
@@ -1209,29 +1214,10 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
        return 0;
 }
 
-static size_t br_get_link_af_size(const struct net_device *dev)
-{
-       struct net_bridge_port *p;
-       struct net_bridge *br;
-       int num_vlans = 0;
-
-       if (br_port_exists(dev)) {
-               p = br_port_get_rtnl(dev);
-               num_vlans = br_get_num_vlan_infos(nbp_vlan_group(p),
-                                                 RTEXT_FILTER_BRVLAN);
-       } else if (dev->priv_flags & IFF_EBRIDGE) {
-               br = netdev_priv(dev);
-               num_vlans = br_get_num_vlan_infos(br_vlan_group(br),
-                                                 RTEXT_FILTER_BRVLAN);
-       }
-
-       /* Each VLAN is returned in bridge_vlan_info along with flags */
-       return num_vlans * nla_total_size(sizeof(struct bridge_vlan_info));
-}
 
 static struct rtnl_af_ops br_af_ops __read_mostly = {
        .family                 = AF_BRIDGE,
-       .get_link_af_size       = br_get_link_af_size,
+       .get_link_af_size       = br_get_link_af_size_filtered,
 };
 
 struct rtnl_link_ops br_link_ops __read_mostly = {
index ba0c67b2159a59a277d8142d7871f3af91578c83..216018c760187db31e45206225ce8c7594a849d5 100644 (file)
@@ -229,7 +229,7 @@ struct net_bridge_port
        struct netpoll                  *np;
 #endif
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
-       struct net_bridge_vlan_group    *vlgrp;
+       struct net_bridge_vlan_group    __rcu *vlgrp;
 #endif
 };
 
@@ -337,7 +337,7 @@ struct net_bridge
        struct kobject                  *ifobj;
        u32                             auto_cnt;
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
-       struct net_bridge_vlan_group    *vlgrp;
+       struct net_bridge_vlan_group    __rcu *vlgrp;
        u8                              vlan_enabled;
        __be16                          vlan_proto;
        u16                             default_pvid;
@@ -700,13 +700,25 @@ int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask);
 static inline struct net_bridge_vlan_group *br_vlan_group(
                                        const struct net_bridge *br)
 {
-       return br->vlgrp;
+       return rtnl_dereference(br->vlgrp);
 }
 
 static inline struct net_bridge_vlan_group *nbp_vlan_group(
                                        const struct net_bridge_port *p)
 {
-       return p->vlgrp;
+       return rtnl_dereference(p->vlgrp);
+}
+
+static inline struct net_bridge_vlan_group *br_vlan_group_rcu(
+                                       const struct net_bridge *br)
+{
+       return rcu_dereference(br->vlgrp);
+}
+
+static inline struct net_bridge_vlan_group *nbp_vlan_group_rcu(
+                                       const struct net_bridge_port *p)
+{
+       return rcu_dereference(p->vlgrp);
 }
 
 /* Since bridge now depends on 8021Q module, but the time bridge sees the
@@ -853,6 +865,19 @@ static inline struct net_bridge_vlan_group *nbp_vlan_group(
 {
        return NULL;
 }
+
+static inline struct net_bridge_vlan_group *br_vlan_group_rcu(
+                                       const struct net_bridge *br)
+{
+       return NULL;
+}
+
+static inline struct net_bridge_vlan_group *nbp_vlan_group_rcu(
+                                       const struct net_bridge_port *p)
+{
+       return NULL;
+}
+
 #endif
 
 struct nf_br_ops {
index db6d243defb2dd0154887c273e4eb718ffab14b0..80c34d70218c0f9d2066016e3f5ba5fc56656490 100644 (file)
@@ -41,13 +41,14 @@ void br_set_state(struct net_bridge_port *p, unsigned int state)
 {
        struct switchdev_attr attr = {
                .id = SWITCHDEV_ATTR_ID_PORT_STP_STATE,
+               .flags = SWITCHDEV_F_DEFER,
                .u.stp_state = state,
        };
        int err;
 
        p->state = state;
        err = switchdev_port_attr_set(p->dev, &attr);
-       if (err && err != -EOPNOTSUPP)
+       if (err)
                br_warn(p->br, "error setting offload STP state on port %u(%s)\n",
                                (unsigned int) p->port_no, p->dev->name);
 }
index 4ca449a161320f7ef1c6f4864940e8557a7d18e3..fa53d7a89f485ac9039b6168db3063512c4d9d42 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/kmod.h>
 #include <linux/etherdevice.h>
 #include <linux/rtnetlink.h>
+#include <net/switchdev.h>
 
 #include "br_private.h"
 #include "br_private_stp.h"
@@ -35,11 +36,22 @@ static inline port_id br_make_port_id(__u8 priority, __u16 port_no)
 /* called under bridge lock */
 void br_init_port(struct net_bridge_port *p)
 {
+       struct switchdev_attr attr = {
+               .id = SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
+               .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP | SWITCHDEV_F_DEFER,
+               .u.ageing_time = p->br->ageing_time,
+       };
+       int err;
+
        p->port_id = br_make_port_id(p->priority, p->port_no);
        br_become_designated_port(p);
        br_set_state(p, BR_STATE_BLOCKING);
        p->topology_change_ack = 0;
        p->config_pending = 0;
+
+       err = switchdev_port_attr_set(p->dev, &attr);
+       if (err)
+               netdev_err(p->dev, "failed to set HW ageing time\n");
 }
 
 /* called under bridge lock */
index 04ef1926ee7eea204756c258e9b0689316ef5f15..8365bd53c42179dec15314fa99bf2346f729899f 100644 (file)
@@ -102,7 +102,15 @@ static ssize_t ageing_time_show(struct device *d,
 
 static int set_ageing_time(struct net_bridge *br, unsigned long val)
 {
-       return br_set_ageing_time(br, val);
+       int ret;
+
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       ret = br_set_ageing_time(br, val);
+       rtnl_unlock();
+
+       return ret;
 }
 
 static ssize_t ageing_time_store(struct device *d,
index ad7e4f6b6d6b5bf01c95e59aeab2657113c386c8..5f0d0cc4744f2219f2fd77a1a3cd11b266f03748 100644 (file)
@@ -54,9 +54,9 @@ static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
        struct net_bridge_vlan_group *vg;
 
        if (br_vlan_is_master(v))
-               vg = v->br->vlgrp;
+               vg = br_vlan_group(v->br);
        else
-               vg = v->port->vlgrp;
+               vg = nbp_vlan_group(v->port);
 
        if (flags & BRIDGE_VLAN_INFO_PVID)
                __vlan_add_pvid(vg, v->vid);
@@ -91,11 +91,16 @@ static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
 
 static void __vlan_add_list(struct net_bridge_vlan *v)
 {
+       struct net_bridge_vlan_group *vg;
        struct list_head *headp, *hpos;
        struct net_bridge_vlan *vent;
 
-       headp = br_vlan_is_master(v) ? &v->br->vlgrp->vlan_list :
-                                      &v->port->vlgrp->vlan_list;
+       if (br_vlan_is_master(v))
+               vg = br_vlan_group(v->br);
+       else
+               vg = nbp_vlan_group(v->port);
+
+       headp = &vg->vlan_list;
        list_for_each_prev(hpos, headp) {
                vent = list_entry(hpos, struct net_bridge_vlan, vlist);
                if (v->vid < vent->vid)
@@ -137,14 +142,16 @@ static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
  */
 static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
 {
+       struct net_bridge_vlan_group *vg;
        struct net_bridge_vlan *masterv;
 
-       masterv = br_vlan_find(br->vlgrp, vid);
+       vg = br_vlan_group(br);
+       masterv = br_vlan_find(vg, vid);
        if (!masterv) {
                /* missing global ctx, create it now */
                if (br_vlan_add(br, vid, 0))
                        return NULL;
-               masterv = br_vlan_find(br->vlgrp, vid);
+               masterv = br_vlan_find(vg, vid);
                if (WARN_ON(!masterv))
                        return NULL;
        }
@@ -155,11 +162,14 @@ static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid
 
 static void br_vlan_put_master(struct net_bridge_vlan *masterv)
 {
+       struct net_bridge_vlan_group *vg;
+
        if (!br_vlan_is_master(masterv))
                return;
 
+       vg = br_vlan_group(masterv->br);
        if (atomic_dec_and_test(&masterv->refcnt)) {
-               rhashtable_remove_fast(&masterv->br->vlgrp->vlan_hash,
+               rhashtable_remove_fast(&vg->vlan_hash,
                                       &masterv->vnode, br_vlan_rht_params);
                __vlan_del_list(masterv);
                kfree_rcu(masterv, rcu);
@@ -189,12 +199,12 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
        if (br_vlan_is_master(v)) {
                br = v->br;
                dev = br->dev;
-               vg = br->vlgrp;
+               vg = br_vlan_group(br);
        } else {
                p = v->port;
                br = p->br;
                dev = p->dev;
-               vg = p->vlgrp;
+               vg = nbp_vlan_group(p);
        }
 
        if (p) {
@@ -266,10 +276,10 @@ static int __vlan_del(struct net_bridge_vlan *v)
        int err = 0;
 
        if (br_vlan_is_master(v)) {
-               vg = v->br->vlgrp;
+               vg = br_vlan_group(v->br);
        } else {
                p = v->port;
-               vg = v->port->vlgrp;
+               vg = nbp_vlan_group(v->port);
                masterv = v->brvlan;
        }
 
@@ -297,15 +307,20 @@ out:
        return err;
 }
 
-static void __vlan_flush(struct net_bridge_vlan_group *vlgrp)
+static void __vlan_group_free(struct net_bridge_vlan_group *vg)
+{
+       WARN_ON(!list_empty(&vg->vlan_list));
+       rhashtable_destroy(&vg->vlan_hash);
+       kfree(vg);
+}
+
+static void __vlan_flush(struct net_bridge_vlan_group *vg)
 {
        struct net_bridge_vlan *vlan, *tmp;
 
-       __vlan_delete_pvid(vlgrp, vlgrp->pvid);
-       list_for_each_entry_safe(vlan, tmp, &vlgrp->vlan_list, vlist)
+       __vlan_delete_pvid(vg, vg->pvid);
+       list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
                __vlan_del(vlan);
-       rhashtable_destroy(&vlgrp->vlan_hash);
-       kfree(vlgrp);
 }
 
 struct sk_buff *br_handle_vlan(struct net_bridge *br,
@@ -467,7 +482,7 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
        if (!br->vlan_enabled)
                return true;
 
-       vg = p->vlgrp;
+       vg = nbp_vlan_group(p);
        if (!vg || !vg->num_vlans)
                return false;
 
@@ -493,12 +508,14 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
  */
 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
 {
+       struct net_bridge_vlan_group *vg;
        struct net_bridge_vlan *vlan;
        int ret;
 
        ASSERT_RTNL();
 
-       vlan = br_vlan_find(br->vlgrp, vid);
+       vg = br_vlan_group(br);
+       vlan = br_vlan_find(vg, vid);
        if (vlan) {
                if (!br_vlan_is_brentry(vlan)) {
                        /* Trying to change flags of non-existent bridge vlan */
@@ -513,7 +530,7 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
                        }
                        atomic_inc(&vlan->refcnt);
                        vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
-                       br->vlgrp->num_vlans++;
+                       vg->num_vlans++;
                }
                __vlan_add_flags(vlan, flags);
                return 0;
@@ -541,11 +558,13 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
  */
 int br_vlan_delete(struct net_bridge *br, u16 vid)
 {
+       struct net_bridge_vlan_group *vg;
        struct net_bridge_vlan *v;
 
        ASSERT_RTNL();
 
-       v = br_vlan_find(br->vlgrp, vid);
+       vg = br_vlan_group(br);
+       v = br_vlan_find(vg, vid);
        if (!v || !br_vlan_is_brentry(v))
                return -ENOENT;
 
@@ -557,9 +576,15 @@ int br_vlan_delete(struct net_bridge *br, u16 vid)
 
 void br_vlan_flush(struct net_bridge *br)
 {
+       struct net_bridge_vlan_group *vg;
+
        ASSERT_RTNL();
 
-       __vlan_flush(br_vlan_group(br));
+       vg = br_vlan_group(br);
+       __vlan_flush(vg);
+       RCU_INIT_POINTER(br->vlgrp, NULL);
+       synchronize_rcu();
+       __vlan_group_free(vg);
 }
 
 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
@@ -626,6 +651,7 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
        int err = 0;
        struct net_bridge_port *p;
        struct net_bridge_vlan *vlan;
+       struct net_bridge_vlan_group *vg;
        __be16 oldproto;
 
        if (br->vlan_proto == proto)
@@ -633,7 +659,8 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
 
        /* Add VLANs for the new proto to the device filter. */
        list_for_each_entry(p, &br->port_list, list) {
-               list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist) {
+               vg = nbp_vlan_group(p);
+               list_for_each_entry(vlan, &vg->vlan_list, vlist) {
                        err = vlan_vid_add(p->dev, proto, vlan->vid);
                        if (err)
                                goto err_filt;
@@ -647,19 +674,23 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
        br_recalculate_fwd_mask(br);
 
        /* Delete VLANs for the old proto from the device filter. */
-       list_for_each_entry(p, &br->port_list, list)
-               list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist)
+       list_for_each_entry(p, &br->port_list, list) {
+               vg = nbp_vlan_group(p);
+               list_for_each_entry(vlan, &vg->vlan_list, vlist)
                        vlan_vid_del(p->dev, oldproto, vlan->vid);
+       }
 
        return 0;
 
 err_filt:
-       list_for_each_entry_continue_reverse(vlan, &p->vlgrp->vlan_list, vlist)
+       list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
                vlan_vid_del(p->dev, proto, vlan->vid);
 
-       list_for_each_entry_continue_reverse(p, &br->port_list, list)
-               list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist)
+       list_for_each_entry_continue_reverse(p, &br->port_list, list) {
+               vg = nbp_vlan_group(p);
+               list_for_each_entry(vlan, &vg->vlan_list, vlist)
                        vlan_vid_del(p->dev, proto, vlan->vid);
+       }
 
        return err;
 }
@@ -703,11 +734,11 @@ static void br_vlan_disable_default_pvid(struct net_bridge *br)
        /* Disable default_pvid on all ports where it is still
         * configured.
         */
-       if (vlan_default_pvid(br->vlgrp, pvid))
+       if (vlan_default_pvid(br_vlan_group(br), pvid))
                br_vlan_delete(br, pvid);
 
        list_for_each_entry(p, &br->port_list, list) {
-               if (vlan_default_pvid(p->vlgrp, pvid))
+               if (vlan_default_pvid(nbp_vlan_group(p), pvid))
                        nbp_vlan_delete(p, pvid);
        }
 
@@ -717,6 +748,7 @@ static void br_vlan_disable_default_pvid(struct net_bridge *br)
 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
 {
        const struct net_bridge_vlan *pvent;
+       struct net_bridge_vlan_group *vg;
        struct net_bridge_port *p;
        u16 old_pvid;
        int err = 0;
@@ -737,8 +769,9 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
        /* Update default_pvid config only if we do not conflict with
         * user configuration.
         */
-       pvent = br_vlan_find(br->vlgrp, pvid);
-       if ((!old_pvid || vlan_default_pvid(br->vlgrp, old_pvid)) &&
+       vg = br_vlan_group(br);
+       pvent = br_vlan_find(vg, pvid);
+       if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
            (!pvent || !br_vlan_should_use(pvent))) {
                err = br_vlan_add(br, pvid,
                                  BRIDGE_VLAN_INFO_PVID |
@@ -754,9 +787,10 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
                /* Update default_pvid config only if we do not conflict with
                 * user configuration.
                 */
+               vg = nbp_vlan_group(p);
                if ((old_pvid &&
-                    !vlan_default_pvid(p->vlgrp, old_pvid)) ||
-                   br_vlan_find(p->vlgrp, pvid))
+                    !vlan_default_pvid(vg, old_pvid)) ||
+                   br_vlan_find(vg, pvid))
                        continue;
 
                err = nbp_vlan_add(p, pvid,
@@ -825,17 +859,19 @@ unlock:
 
 int br_vlan_init(struct net_bridge *br)
 {
+       struct net_bridge_vlan_group *vg;
        int ret = -ENOMEM;
 
-       br->vlgrp = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
-       if (!br->vlgrp)
+       vg = kzalloc(sizeof(*vg), GFP_KERNEL);
+       if (!vg)
                goto out;
-       ret = rhashtable_init(&br->vlgrp->vlan_hash, &br_vlan_rht_params);
+       ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
        if (ret)
                goto err_rhtbl;
-       INIT_LIST_HEAD(&br->vlgrp->vlan_list);
+       INIT_LIST_HEAD(&vg->vlan_list);
        br->vlan_proto = htons(ETH_P_8021Q);
        br->default_pvid = 1;
+       rcu_assign_pointer(br->vlgrp, vg);
        ret = br_vlan_add(br, 1,
                          BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
                          BRIDGE_VLAN_INFO_BRENTRY);
@@ -846,9 +882,9 @@ out:
        return ret;
 
 err_vlan_add:
-       rhashtable_destroy(&br->vlgrp->vlan_hash);
+       rhashtable_destroy(&vg->vlan_hash);
 err_rhtbl:
-       kfree(br->vlgrp);
+       kfree(vg);
 
        goto out;
 }
@@ -866,9 +902,7 @@ int nbp_vlan_init(struct net_bridge_port *p)
        if (ret)
                goto err_rhtbl;
        INIT_LIST_HEAD(&vg->vlan_list);
-       /* Make sure everything's committed before publishing vg */
-       smp_wmb();
-       p->vlgrp = vg;
+       rcu_assign_pointer(p->vlgrp, vg);
        if (p->br->default_pvid) {
                ret = nbp_vlan_add(p, p->br->default_pvid,
                                   BRIDGE_VLAN_INFO_PVID |
@@ -897,7 +931,7 @@ int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
 
        ASSERT_RTNL();
 
-       vlan = br_vlan_find(port->vlgrp, vid);
+       vlan = br_vlan_find(nbp_vlan_group(port), vid);
        if (vlan) {
                __vlan_add_flags(vlan, flags);
                return 0;
@@ -925,7 +959,7 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
 
        ASSERT_RTNL();
 
-       v = br_vlan_find(port->vlgrp, vid);
+       v = br_vlan_find(nbp_vlan_group(port), vid);
        if (!v)
                return -ENOENT;
        br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
@@ -936,12 +970,13 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
 
 void nbp_vlan_flush(struct net_bridge_port *port)
 {
-       struct net_bridge_vlan *vlan;
+       struct net_bridge_vlan_group *vg;
 
        ASSERT_RTNL();
 
-       list_for_each_entry(vlan, &port->vlgrp->vlan_list, vlist)
-               vlan_vid_del(port->dev, port->br->vlan_proto, vlan->vid);
-
-       __vlan_flush(nbp_vlan_group(port));
+       vg = nbp_vlan_group(port);
+       __vlan_flush(vg);
+       RCU_INIT_POINTER(port->vlgrp, NULL);
+       synchronize_rcu();
+       __vlan_group_free(vg);
 }
index f9242dffa65e0cff5e61557656b2bb8040c1a08c..32eccd101f2681971e14cacb758a847d421a02ff 100644 (file)
@@ -73,21 +73,18 @@ ebt_out_hook(void *priv, struct sk_buff *skb,
 static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
        {
                .hook           = ebt_in_hook,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_BRIDGE,
                .hooknum        = NF_BR_LOCAL_IN,
                .priority       = NF_BR_PRI_FILTER_BRIDGED,
        },
        {
                .hook           = ebt_in_hook,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_BRIDGE,
                .hooknum        = NF_BR_FORWARD,
                .priority       = NF_BR_PRI_FILTER_BRIDGED,
        },
        {
                .hook           = ebt_out_hook,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_BRIDGE,
                .hooknum        = NF_BR_LOCAL_OUT,
                .priority       = NF_BR_PRI_FILTER_OTHER,
index 4bbefe03ab588f12218b77195b3891ede33532af..ec55358f00c8a9672b0465ed6dee9d5f03836b5f 100644 (file)
@@ -73,21 +73,18 @@ ebt_nat_out(void *priv, struct sk_buff *skb,
 static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
        {
                .hook           = ebt_nat_out,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_BRIDGE,
                .hooknum        = NF_BR_LOCAL_OUT,
                .priority       = NF_BR_PRI_NAT_DST_OTHER,
        },
        {
                .hook           = ebt_nat_out,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_BRIDGE,
                .hooknum        = NF_BR_POST_ROUTING,
                .priority       = NF_BR_PRI_NAT_SRC,
        },
        {
                .hook           = ebt_nat_in,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_BRIDGE,
                .hooknum        = NF_BR_PRE_ROUTING,
                .priority       = NF_BR_PRI_NAT_DST_BRIDGED,
index a1ba6875c2a2073d55b6f797e16d88baed3af3d1..6863310d6973ba616323f33a5d57a04a583a5430 100644 (file)
@@ -96,7 +96,7 @@ struct bcm_op {
        canid_t can_id;
        u32 flags;
        unsigned long frames_abs, frames_filtered;
-       struct timeval ival1, ival2;
+       struct bcm_timeval ival1, ival2;
        struct hrtimer timer, thrtimer;
        struct tasklet_struct tsklet, thrtsklet;
        ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
@@ -131,6 +131,11 @@ static inline struct bcm_sock *bcm_sk(const struct sock *sk)
        return (struct bcm_sock *)sk;
 }
 
+static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
+{
+       return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
+}
+
 #define CFSIZ sizeof(struct can_frame)
 #define OPSIZ sizeof(struct bcm_op)
 #define MHSIZ sizeof(struct bcm_msg_head)
@@ -953,8 +958,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                op->count = msg_head->count;
                op->ival1 = msg_head->ival1;
                op->ival2 = msg_head->ival2;
-               op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
-               op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
+               op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
+               op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
 
                /* disable an active timer due to zero values? */
                if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64)
@@ -1134,8 +1139,8 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                        /* set timer value */
                        op->ival1 = msg_head->ival1;
                        op->ival2 = msg_head->ival2;
-                       op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
-                       op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
+                       op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
+                       op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
 
                        /* disable an active timer due to zero value? */
                        if (!op->kt_ival1.tv64)
index 80b94e37c94aae115155454b9f4386a1b91021de..f79ccac6699fb7b171b680261db9000f7fe4c70c 100644 (file)
@@ -285,6 +285,7 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
        switch (op->op) {
        case CEPH_OSD_OP_READ:
        case CEPH_OSD_OP_WRITE:
+       case CEPH_OSD_OP_WRITEFULL:
                ceph_osd_data_release(&op->extent.osd_data);
                break;
        case CEPH_OSD_OP_CALL:
@@ -485,13 +486,14 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
        size_t payload_len = 0;
 
        BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
-              opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE);
+              opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
+              opcode != CEPH_OSD_OP_TRUNCATE);
 
        op->extent.offset = offset;
        op->extent.length = length;
        op->extent.truncate_size = truncate_size;
        op->extent.truncate_seq = truncate_seq;
-       if (opcode == CEPH_OSD_OP_WRITE)
+       if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
                payload_len += length;
 
        op->payload_len = payload_len;
@@ -670,9 +672,11 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
                break;
        case CEPH_OSD_OP_READ:
        case CEPH_OSD_OP_WRITE:
+       case CEPH_OSD_OP_WRITEFULL:
        case CEPH_OSD_OP_ZERO:
        case CEPH_OSD_OP_TRUNCATE:
-               if (src->op == CEPH_OSD_OP_WRITE)
+               if (src->op == CEPH_OSD_OP_WRITE ||
+                   src->op == CEPH_OSD_OP_WRITEFULL)
                        request_data_len = src->extent.length;
                dst->extent.offset = cpu_to_le64(src->extent.offset);
                dst->extent.length = cpu_to_le64(src->extent.length);
@@ -681,7 +685,8 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
                dst->extent.truncate_seq =
                        cpu_to_le32(src->extent.truncate_seq);
                osd_data = &src->extent.osd_data;
-               if (src->op == CEPH_OSD_OP_WRITE)
+               if (src->op == CEPH_OSD_OP_WRITE ||
+                   src->op == CEPH_OSD_OP_WRITEFULL)
                        ceph_osdc_msg_data_add(req->r_request, osd_data);
                else
                        ceph_osdc_msg_data_add(req->r_reply, osd_data);
index a229bf0d649dc9bf58ece00e95d58f8a133addca..13f49f81ae13a3e3f07b3304b8f04bcb791d84b3 100644 (file)
@@ -99,6 +99,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/stat.h>
 #include <net/dst.h>
+#include <net/dst_metadata.h>
 #include <net/pkt_sched.h>
 #include <net/checksum.h>
 #include <net/xfrm.h>
@@ -681,6 +682,32 @@ int dev_get_iflink(const struct net_device *dev)
 }
 EXPORT_SYMBOL(dev_get_iflink);
 
+/**
+ *     dev_fill_metadata_dst - Retrieve tunnel egress information.
+ *     @dev: targeted interface
+ *     @skb: The packet.
+ *
+ *     For better visibility of tunnel traffic OVS needs to retrieve
+ *     egress tunnel information for a packet. Following API allows
+ *     user to get this info.
+ */
+int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+       struct ip_tunnel_info *info;
+
+       if (!dev->netdev_ops  || !dev->netdev_ops->ndo_fill_metadata_dst)
+               return -EINVAL;
+
+       info = skb_tunnel_info_unclone(skb);
+       if (!info)
+               return -ENOMEM;
+       if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
+               return -EINVAL;
+
+       return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
+}
+EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
+
 /**
  *     __dev_get_by_name       - find a device by its name
  *     @net: the applicable net namespace
@@ -5346,6 +5373,12 @@ static int __netdev_upper_dev_link(struct net_device *dev,
        changeupper_info.master = master;
        changeupper_info.linking = true;
 
+       ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
+                                           &changeupper_info.info);
+       ret = notifier_to_errno(ret);
+       if (ret)
+               return ret;
+
        ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
                                                   master);
        if (ret)
@@ -5488,6 +5521,9 @@ void netdev_upper_dev_unlink(struct net_device *dev,
        changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
        changeupper_info.linking = false;
 
+       call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
+                                     &changeupper_info.info);
+
        __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
 
        /* Here is the tricky part. We must remove all dev's lower
index b495ab1797fae303d12a3251f09b141052c1ff55..29edf74846fc9cfef49f3fc35b4ba41de6c254af 100644 (file)
@@ -1284,7 +1284,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
 
        gstrings.len = ret;
 
-       data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+       data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
        if (!data)
                return -ENOMEM;
 
index 0b00094932ab46f20b9c1ae6948322c5dd130afc..672eefbfbe99fff2ade1bd2a095fb2366a2d2c0b 100644 (file)
@@ -1414,6 +1414,7 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
                return dev_forward_skb(dev, skb2);
 
        skb2->dev = dev;
+       skb_sender_cpu_clear(skb2);
        return dev_queue_xmit(skb2);
 }
 
@@ -1941,9 +1942,13 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
                goto out;
 
        /* We're copying the filter that has been originally attached,
-        * so no conversion/decode needed anymore.
+        * so no conversion/decode needed anymore. eBPF programs that
+        * have no original program cannot be dumped through this.
         */
+       ret = -EACCES;
        fprog = filter->prog->orig_prog;
+       if (!fprog)
+               goto out;
 
        ret = fprog->len;
        if (!len)
index 24775953fa68e03445ba4eaaf3640808782ea744..504bd17b7456c3a16a0832ed28ede040c41ff316 100644 (file)
@@ -497,7 +497,8 @@ void rtnl_af_unregister(struct rtnl_af_ops *ops)
 }
 EXPORT_SYMBOL_GPL(rtnl_af_unregister);
 
-static size_t rtnl_link_get_af_size(const struct net_device *dev)
+static size_t rtnl_link_get_af_size(const struct net_device *dev,
+                                   u32 ext_filter_mask)
 {
        struct rtnl_af_ops *af_ops;
        size_t size;
@@ -509,7 +510,7 @@ static size_t rtnl_link_get_af_size(const struct net_device *dev)
                if (af_ops->get_link_af_size) {
                        /* AF_* + nested data */
                        size += nla_total_size(sizeof(struct nlattr)) +
-                               af_ops->get_link_af_size(dev);
+                               af_ops->get_link_af_size(dev, ext_filter_mask);
                }
        }
 
@@ -837,7 +838,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
                         /* IFLA_VF_STATS_BROADCAST */
                         nla_total_size(sizeof(__u64)) +
                         /* IFLA_VF_STATS_MULTICAST */
-                        nla_total_size(sizeof(__u64)));
+                        nla_total_size(sizeof(__u64)) +
+                        nla_total_size(sizeof(struct ifla_vf_trust)));
                return size;
        } else
                return 0;
@@ -900,7 +902,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
               + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
               + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
               + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
-              + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
+              + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
               + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
               + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
               + nla_total_size(1); /* IFLA_PROTO_DOWN */
@@ -1160,6 +1162,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                        struct ifla_vf_link_state vf_linkstate;
                        struct ifla_vf_rss_query_en vf_rss_query_en;
                        struct ifla_vf_stats vf_stats;
+                       struct ifla_vf_trust vf_trust;
 
                        /*
                         * Not all SR-IOV capable drivers support the
@@ -1169,6 +1172,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                         */
                        ivi.spoofchk = -1;
                        ivi.rss_query_en = -1;
+                       ivi.trusted = -1;
                        memset(ivi.mac, 0, sizeof(ivi.mac));
                        /* The default value for VF link state is "auto"
                         * IFLA_VF_LINK_STATE_AUTO which equals zero
@@ -1182,7 +1186,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                                vf_tx_rate.vf =
                                vf_spoofchk.vf =
                                vf_linkstate.vf =
-                               vf_rss_query_en.vf = ivi.vf;
+                               vf_rss_query_en.vf =
+                               vf_trust.vf = ivi.vf;
 
                        memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
                        vf_vlan.vlan = ivi.vlan;
@@ -1193,6 +1198,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                        vf_spoofchk.setting = ivi.spoofchk;
                        vf_linkstate.link_state = ivi.linkstate;
                        vf_rss_query_en.setting = ivi.rss_query_en;
+                       vf_trust.setting = ivi.trusted;
                        vf = nla_nest_start(skb, IFLA_VF_INFO);
                        if (!vf) {
                                nla_nest_cancel(skb, vfinfo);
@@ -1210,7 +1216,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                                    &vf_linkstate) ||
                            nla_put(skb, IFLA_VF_RSS_QUERY_EN,
                                    sizeof(vf_rss_query_en),
-                                   &vf_rss_query_en))
+                                   &vf_rss_query_en) ||
+                           nla_put(skb, IFLA_VF_TRUST,
+                                   sizeof(vf_trust), &vf_trust))
                                goto nla_put_failure;
                        memset(&vf_stats, 0, sizeof(vf_stats));
                        if (dev->netdev_ops->ndo_get_vf_stats)
@@ -1347,6 +1355,7 @@ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
        [IFLA_VF_LINK_STATE]    = { .len = sizeof(struct ifla_vf_link_state) },
        [IFLA_VF_RSS_QUERY_EN]  = { .len = sizeof(struct ifla_vf_rss_query_en) },
        [IFLA_VF_STATS]         = { .type = NLA_NESTED },
+       [IFLA_VF_TRUST]         = { .len = sizeof(struct ifla_vf_trust) },
 };
 
 static const struct nla_policy ifla_vf_stats_policy[IFLA_VF_STATS_MAX + 1] = {
@@ -1586,6 +1595,16 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
                        return err;
        }
 
+       if (tb[IFLA_VF_TRUST]) {
+               struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
+
+               err = -EOPNOTSUPP;
+               if (ops->ndo_set_vf_trust)
+                       err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
+               if (err < 0)
+                       return err;
+       }
+
        return err;
 }
 
@@ -3443,4 +3462,3 @@ void __init rtnetlink_init(void)
        rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL);
        rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL);
 }
-
index dcc7d62654d567ec30346feb405619fa54d39775..0ef30aa90132c7a1a04971c773d4de8ed4ac146b 100644 (file)
@@ -422,13 +422,25 @@ static void sock_warn_obsolete_bsdism(const char *name)
        }
 }
 
+static bool sock_needs_netstamp(const struct sock *sk)
+{
+       switch (sk->sk_family) {
+       case AF_UNSPEC:
+       case AF_UNIX:
+               return false;
+       default:
+               return true;
+       }
+}
+
 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
 
 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
 {
        if (sk->sk_flags & flags) {
                sk->sk_flags &= ~flags;
-               if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
+               if (sock_needs_netstamp(sk) &&
+                   !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
                        net_disable_timestamp();
        }
 }
@@ -1582,7 +1594,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                if (newsk->sk_prot->sockets_allocated)
                        sk_sockets_allocated_inc(newsk);
 
-               if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
+               if (sock_needs_netstamp(sk) &&
+                   newsk->sk_flags & SK_FLAGS_TIMESTAMP)
                        net_enable_timestamp();
        }
 out:
@@ -2510,7 +2523,8 @@ void sock_enable_timestamp(struct sock *sk, int flag)
                 * time stamping, but time stamping might have been on
                 * already because of the other one
                 */
-               if (!(previous_flags & SK_FLAGS_TIMESTAMP))
+               if (sock_needs_netstamp(sk) &&
+                   !(previous_flags & SK_FLAGS_TIMESTAMP))
                        net_enable_timestamp();
        }
 }
index 630b30b4fb5368332428289e449b3247be0cd97b..5dca7ce8ee9f58619ba1ac6c0bc85c73f208fe9d 100644 (file)
@@ -1,4 +1,5 @@
 #include <linux/export.h>
+#include <linux/if_vlan.h>
 #include <net/ip.h>
 #include <net/tso.h>
 #include <asm/unaligned.h>
@@ -14,18 +15,24 @@ EXPORT_SYMBOL(tso_count_descs);
 void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
                   int size, bool is_last)
 {
-       struct iphdr *iph;
        struct tcphdr *tcph;
        int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
        int mac_hdr_len = skb_network_offset(skb);
 
        memcpy(hdr, skb->data, hdr_len);
-       iph = (struct iphdr *)(hdr + mac_hdr_len);
-       iph->id = htons(tso->ip_id);
-       iph->tot_len = htons(size + hdr_len - mac_hdr_len);
+       if (!tso->ipv6) {
+               struct iphdr *iph = (void *)(hdr + mac_hdr_len);
+
+               iph->id = htons(tso->ip_id);
+               iph->tot_len = htons(size + hdr_len - mac_hdr_len);
+               tso->ip_id++;
+       } else {
+               struct ipv6hdr *iph = (void *)(hdr + mac_hdr_len);
+
+               iph->payload_len = htons(size + tcp_hdrlen(skb));
+       }
        tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
        put_unaligned_be32(tso->tcp_seq, &tcph->seq);
-       tso->ip_id++;
 
        if (!is_last) {
                /* Clear all special flags for not last packet */
@@ -61,6 +68,7 @@ void tso_start(struct sk_buff *skb, struct tso_t *tso)
        tso->ip_id = ntohs(ip_hdr(skb)->id);
        tso->tcp_seq = ntohl(tcp_hdr(skb)->seq);
        tso->next_frag_idx = 0;
+       tso->ipv6 = vlan_get_protocol(skb) == htons(ETH_P_IPV6);
 
        /* Build first data */
        tso->size = skb_headlen(skb) - hdr_len;
index 923f5a180134ee0b180ca86a389d3f4a59d56b8d..b0e28d24e1a749ce1cfa7204a7cbdd201da8368f 100644 (file)
@@ -278,7 +278,9 @@ int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 
 struct sock *dccp_v4_request_recv_sock(const struct sock *sk, struct sk_buff *skb,
                                       struct request_sock *req,
-                                      struct dst_entry *dst);
+                                      struct dst_entry *dst,
+                                      struct request_sock *req_unhash,
+                                      bool *own_req);
 struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
                            struct request_sock *req);
 
index 8e99681c8189d82e3e5f6a3c6089b103c0df2ca3..5684e14932bd47e97b9d547307bfc50230e7be7d 100644 (file)
@@ -208,7 +208,6 @@ void dccp_req_err(struct sock *sk, u64 seq)
 
        if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) {
                NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
-               reqsk_put(req);
        } else {
                /*
                 * Still in RESPOND, just remove it silently.
@@ -218,6 +217,7 @@ void dccp_req_err(struct sock *sk, u64 seq)
                 */
                inet_csk_reqsk_queue_drop(req->rsk_listener, req);
        }
+       reqsk_put(req);
 }
 EXPORT_SYMBOL(dccp_req_err);
 
@@ -393,7 +393,9 @@ static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb)
 struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
                                       struct sk_buff *skb,
                                       struct request_sock *req,
-                                      struct dst_entry *dst)
+                                      struct dst_entry *dst,
+                                      struct request_sock *req_unhash,
+                                      bool *own_req)
 {
        struct inet_request_sock *ireq;
        struct inet_sock *newinet;
@@ -426,7 +428,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
 
        if (__inet_inherit_port(sk, newsk) < 0)
                goto put_and_exit;
-       __inet_hash_nolisten(newsk, NULL);
+       *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
 
        return newsk;
 
@@ -799,15 +801,10 @@ static int dccp_v4_rcv(struct sk_buff *skb)
                                  DCCP_SKB_CB(skb)->dccpd_ack_seq);
        }
 
-       /* Step 2:
-        *      Look up flow ID in table and get corresponding socket */
+lookup:
        sk = __inet_lookup_skb(&dccp_hashinfo, skb,
                               dh->dccph_sport, dh->dccph_dport);
-       /*
-        * Step 2:
-        *      If no socket ...
-        */
-       if (sk == NULL) {
+       if (!sk) {
                dccp_pr_debug("failed to look up flow ID in table and "
                              "get corresponding socket\n");
                goto no_dccp_socket;
@@ -830,8 +827,12 @@ static int dccp_v4_rcv(struct sk_buff *skb)
                struct sock *nsk = NULL;
 
                sk = req->rsk_listener;
-               if (sk->sk_state == DCCP_LISTEN)
+               if (likely(sk->sk_state == DCCP_LISTEN)) {
                        nsk = dccp_check_req(sk, skb, req);
+               } else {
+                       inet_csk_reqsk_queue_drop_and_put(sk, req);
+                       goto lookup;
+               }
                if (!nsk) {
                        reqsk_put(req);
                        goto discard_it;
index aed314f8c7c60b00191aabc42c733f02304b18b0..ef4e48ce9143073872adc2c2816f3b0eb3665a4b 100644 (file)
@@ -380,7 +380,9 @@ drop:
 static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
                                              struct sk_buff *skb,
                                              struct request_sock *req,
-                                             struct dst_entry *dst)
+                                             struct dst_entry *dst,
+                                             struct request_sock *req_unhash,
+                                             bool *own_req)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
        struct ipv6_pinfo *newnp;
@@ -393,7 +395,8 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
                /*
                 *      v6 mapped
                 */
-               newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
+               newsk = dccp_v4_request_recv_sock(sk, skb, req, dst,
+                                                 req_unhash, own_req);
                if (newsk == NULL)
                        return NULL;
 
@@ -511,7 +514,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
                dccp_done(newsk);
                goto out;
        }
-       __inet_hash(newsk, NULL);
+       *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
 
        return newsk;
 
@@ -656,16 +659,11 @@ static int dccp_v6_rcv(struct sk_buff *skb)
        else
                DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
 
-       /* Step 2:
-        *      Look up flow ID in table and get corresponding socket */
+lookup:
        sk = __inet6_lookup_skb(&dccp_hashinfo, skb,
                                dh->dccph_sport, dh->dccph_dport,
                                inet6_iif(skb));
-       /*
-        * Step 2:
-        *      If no socket ...
-        */
-       if (sk == NULL) {
+       if (!sk) {
                dccp_pr_debug("failed to look up flow ID in table and "
                              "get corresponding socket\n");
                goto no_dccp_socket;
@@ -688,8 +686,12 @@ static int dccp_v6_rcv(struct sk_buff *skb)
                struct sock *nsk = NULL;
 
                sk = req->rsk_listener;
-               if (sk->sk_state == DCCP_LISTEN)
+               if (likely(sk->sk_state == DCCP_LISTEN)) {
                        nsk = dccp_check_req(sk, skb, req);
+               } else {
+                       inet_csk_reqsk_queue_drop_and_put(sk, req);
+                       goto lookup;
+               }
                if (!nsk) {
                        reqsk_put(req);
                        goto discard_it;
index d10aace43672a962080c68b20c2fa9ba36d8ac83..1994f8af646b15fe668c01b207567f9016865c4f 100644 (file)
@@ -143,6 +143,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
 {
        struct sock *child = NULL;
        struct dccp_request_sock *dreq = dccp_rsk(req);
+       bool own_req;
 
        /* Check for retransmitted REQUEST */
        if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
@@ -182,14 +183,13 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
        if (dccp_parse_options(sk, dreq, skb))
                 goto drop;
 
-       child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
-       if (child == NULL)
+       child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
+                                                        req, &own_req);
+       if (!child)
                goto listen_overflow;
 
-       inet_csk_reqsk_queue_drop(sk, req);
-       inet_csk_reqsk_queue_add(sk, req, child);
-out:
-       return child;
+       return inet_csk_complete_hashdance(sk, child, req, own_req);
+
 listen_overflow:
        dccp_pr_debug("listen_overflow!\n");
        DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
@@ -198,7 +198,7 @@ drop:
                req->rsk_ops->send_reset(sk, skb);
 
        inet_csk_reqsk_queue_drop(sk, req);
-       goto out;
+       return NULL;
 }
 
 EXPORT_SYMBOL_GPL(dccp_check_req);
index 27fce283117babac70b4be2ca77c82eef5badf0c..607a14f20d88011e6de8540b21a69e6527d49df0 100644 (file)
@@ -789,9 +789,7 @@ static int dn_forward(struct sk_buff *skb)
        struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr);
        struct dn_route *rt;
        int header_len;
-#ifdef CONFIG_NETFILTER
        struct net_device *dev = skb->dev;
-#endif
 
        if (skb->pkt_type != PACKET_HOST)
                goto drop;
index aa398bcef9e30f39774afc2f4bdb060071150489..1eba07feb34adb451734e18e1c73031c9b7b2e35 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/of_platform.h>
 #include <linux/of_net.h>
 #include <linux/sysfs.h>
+#include <linux/phy_fixed.h>
 #include "dsa_priv.h"
 
 char dsa_driver_version[] = "0.1";
@@ -305,7 +306,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
        if (ret < 0)
                goto out;
 
-       ds->slave_mii_bus = mdiobus_alloc();
+       ds->slave_mii_bus = devm_mdiobus_alloc(parent);
        if (ds->slave_mii_bus == NULL) {
                ret = -ENOMEM;
                goto out;
@@ -314,7 +315,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
 
        ret = mdiobus_register(ds->slave_mii_bus);
        if (ret < 0)
-               goto out_free;
+               goto out;
 
 
        /*
@@ -367,10 +368,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
 
        return ret;
 
-out_free:
-       mdiobus_free(ds->slave_mii_bus);
 out:
-       kfree(ds);
        return ret;
 }
 
@@ -400,7 +398,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
        /*
         * Allocate and initialise switch state.
         */
-       ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL);
+       ds = devm_kzalloc(parent, sizeof(*ds) + drv->priv_size, GFP_KERNEL);
        if (ds == NULL)
                return ERR_PTR(-ENOMEM);
 
@@ -420,10 +418,47 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
 
 static void dsa_switch_destroy(struct dsa_switch *ds)
 {
+       struct device_node *port_dn;
+       struct phy_device *phydev;
+       struct dsa_chip_data *cd = ds->pd;
+       int port;
+
 #ifdef CONFIG_NET_DSA_HWMON
        if (ds->hwmon_dev)
                hwmon_device_unregister(ds->hwmon_dev);
 #endif
+
+       /* Disable configuration of the CPU and DSA ports */
+       for (port = 0; port < DSA_MAX_PORTS; port++) {
+               if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
+                       continue;
+
+               port_dn = cd->port_dn[port];
+               if (of_phy_is_fixed_link(port_dn)) {
+                       phydev = of_phy_find_device(port_dn);
+                       if (phydev) {
+                               int addr = phydev->addr;
+
+                               phy_device_free(phydev);
+                               of_node_put(port_dn);
+                               fixed_phy_del(addr);
+                       }
+               }
+       }
+
+       /* Destroy network devices for physical switch ports. */
+       for (port = 0; port < DSA_MAX_PORTS; port++) {
+               if (!(ds->phys_port_mask & (1 << port)))
+                       continue;
+
+               if (!ds->ports[port])
+                       continue;
+
+               unregister_netdev(ds->ports[port]);
+               free_netdev(ds->ports[port]);
+       }
+
+       mdiobus_unregister(ds->slave_mii_bus);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -802,10 +837,11 @@ static inline void dsa_of_remove(struct device *dev)
 }
 #endif
 
-static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
-                         struct device *parent, struct dsa_platform_data *pd)
+static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
+                        struct device *parent, struct dsa_platform_data *pd)
 {
        int i;
+       unsigned configured = 0;
 
        dst->pd = pd;
        dst->master_netdev = dev;
@@ -825,8 +861,16 @@ static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
                dst->ds[i] = ds;
                if (ds->drv->poll_link != NULL)
                        dst->link_poll_needed = 1;
+
+               ++configured;
        }
 
+       /*
+        * If no switch was found, exit cleanly
+        */
+       if (!configured)
+               return -EPROBE_DEFER;
+
        /*
         * If we use a tagging format that doesn't have an ethertype
         * field, make sure that all packets from this point on get
@@ -843,6 +887,8 @@ static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
                dst->link_poll_timer.expires = round_jiffies(jiffies + HZ);
                add_timer(&dst->link_poll_timer);
        }
+
+       return 0;
 }
 
 static int dsa_probe(struct platform_device *pdev)
@@ -883,7 +929,7 @@ static int dsa_probe(struct platform_device *pdev)
                goto out;
        }
 
-       dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+       dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL);
        if (dst == NULL) {
                dev_put(dev);
                ret = -ENOMEM;
@@ -892,7 +938,9 @@ static int dsa_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, dst);
 
-       dsa_setup_dst(dst, dev, &pdev->dev, pd);
+       ret = dsa_setup_dst(dst, dev, &pdev->dev, pd);
+       if (ret)
+               goto out;
 
        return 0;
 
@@ -914,7 +962,7 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst)
        for (i = 0; i < dst->pd->nr_chips; i++) {
                struct dsa_switch *ds = dst->ds[i];
 
-               if (ds != NULL)
+               if (ds)
                        dsa_switch_destroy(ds);
        }
 }
index bb2bd3b56b1611b94b75bb8d05a1387e4ab37904..481754ee062a58bc0f644492c714e4718f9fcb6e 100644 (file)
@@ -378,31 +378,11 @@ static int dsa_slave_port_fdb_dump(struct net_device *dev,
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
        struct dsa_switch *ds = p->parent;
-       unsigned char addr[ETH_ALEN] = { 0 };
-       u16 vid = 0;
-       int ret;
-
-       if (!ds->drv->port_fdb_getnext)
-               return -EOPNOTSUPP;
-
-       for (;;) {
-               bool is_static;
-
-               ret = ds->drv->port_fdb_getnext(ds, p->port, addr, &vid,
-                                               &is_static);
-               if (ret < 0)
-                       break;
-
-               fdb->addr = addr;
-               fdb->vid = vid;
-               fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
 
-               ret = cb(&fdb->obj);
-               if (ret < 0)
-                       break;
-       }
+       if (ds->drv->port_fdb_dump)
+               return ds->drv->port_fdb_dump(ds, p->port, fdb, cb);
 
-       return ret == -ENOENT ? 0 : ret;
+       return -EOPNOTSUPP;
 }
 
 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -453,7 +433,7 @@ static int dsa_slave_stp_update(struct net_device *dev, u8 state)
 }
 
 static int dsa_slave_port_attr_set(struct net_device *dev,
-                                  struct switchdev_attr *attr,
+                                  const struct switchdev_attr *attr,
                                   struct switchdev_trans *trans)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
@@ -1276,7 +1256,7 @@ int dsa_slave_netdevice_event(struct notifier_block *unused,
                        goto out;
 
                err = dsa_slave_master_changed(dev);
-               if (err)
+               if (err && err != -EOPNOTSUPP)
                        netdev_warn(dev, "failed to reflect master change\n");
 
                break;
index 65d55e05516c02310dfcf277d5024f2f3f0727d1..ef185dd4110d74e44139e96dd6fdeec26aafb5fd 100644 (file)
@@ -90,36 +90,12 @@ static lowpan_rx_result lowpan_rx_h_frag(struct sk_buff *skb)
 
 int lowpan_iphc_decompress(struct sk_buff *skb)
 {
-       struct ieee802154_addr_sa sa, da;
        struct ieee802154_hdr hdr;
-       u8 iphc0, iphc1;
-       void *sap, *dap;
 
        if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
                return -EINVAL;
 
-       raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
-
-       if (lowpan_fetch_skb_u8(skb, &iphc0) ||
-           lowpan_fetch_skb_u8(skb, &iphc1))
-               return -EINVAL;
-
-       ieee802154_addr_to_sa(&sa, &hdr.source);
-       ieee802154_addr_to_sa(&da, &hdr.dest);
-
-       if (sa.addr_type == IEEE802154_ADDR_SHORT)
-               sap = &sa.short_addr;
-       else
-               sap = &sa.hwaddr;
-
-       if (da.addr_type == IEEE802154_ADDR_SHORT)
-               dap = &da.short_addr;
-       else
-               dap = &da.hwaddr;
-
-       return lowpan_header_decompress(skb, skb->dev, sap, sa.addr_type,
-                                       IEEE802154_ADDR_LEN, dap, da.addr_type,
-                                       IEEE802154_ADDR_LEN, iphc0, iphc1);
+       return lowpan_header_decompress(skb, skb->dev, &hdr.dest, &hdr.source);
 }
 
 static lowpan_rx_result lowpan_rx_h_iphc(struct sk_buff *skb)
@@ -308,16 +284,16 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *wdev,
        if (wdev->type != ARPHRD_IEEE802154 ||
            skb->pkt_type == PACKET_OTHERHOST ||
            !lowpan_rx_h_check(skb))
-               return NET_RX_DROP;
+               goto drop;
 
        ldev = wdev->ieee802154_ptr->lowpan_dev;
        if (!ldev || !netif_running(ldev))
-               return NET_RX_DROP;
+               goto drop;
 
        /* Replacing skb->dev and followed rx handlers will manipulate skb. */
        skb = skb_share_check(skb, GFP_ATOMIC);
        if (!skb)
-               return NET_RX_DROP;
+               goto out;
        skb->dev = ldev;
 
        /* When receive frag1 it's likely that we manipulate the buffer.
@@ -328,10 +304,15 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *wdev,
            lowpan_is_iphc(*skb_network_header(skb))) {
                skb = skb_unshare(skb, GFP_ATOMIC);
                if (!skb)
-                       return NET_RX_DROP;
+                       goto out;
        }
 
        return lowpan_invoke_rx_handlers(skb);
+
+drop:
+       kfree_skb(skb);
+out:
+       return NET_RX_DROP;
 }
 
 static struct packet_type lowpan_packet_type = {
index 62a21f6f021e3ac7b8a40c6869d6397e99867dba..d4353faced35cf64f98a68fe6a749120cff411d2 100644 (file)
@@ -14,6 +14,9 @@
 
 #include "6lowpan_i.h"
 
+#define LOWPAN_FRAG1_HEAD_SIZE 0x4
+#define LOWPAN_FRAGN_HEAD_SIZE 0x5
+
 /* don't save pan id, it's intra pan */
 struct lowpan_addr {
        u8 mode;
@@ -218,7 +221,7 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
        saddr = &info.saddr.u.extended_addr;
 
        *dgram_size = skb->len;
-       lowpan_header_compress(skb, ldev, ETH_P_IPV6, daddr, saddr, skb->len);
+       lowpan_header_compress(skb, ldev, daddr, saddr);
        /* dgram_offset = (saved bytes after compression) + lowpan header len */
        *dgram_offset = (*dgram_size - skb->len) + skb_network_header_len(skb);
 
@@ -235,7 +238,7 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
        /* if the destination address is the broadcast address, use the
         * corresponding short address
         */
-       if (lowpan_is_addr_broadcast((const u8 *)daddr)) {
+       if (!memcmp(daddr, ldev->broadcast, EUI64_ADDR_LEN)) {
                da.mode = IEEE802154_ADDR_SHORT;
                da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
                cb->ackreq = false;
index 89aacb630a53362bb9de51c7381ae9b6a799bdc3..c29809f765dc5d4d95edd5d6ac3cc321fcb97c88 100644 (file)
@@ -8,6 +8,7 @@ obj-y     := route.o inetpeer.o protocol.o \
             inet_timewait_sock.o inet_connection_sock.o \
             tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \
             tcp_minisocks.o tcp_cong.o tcp_metrics.o tcp_fastopen.o \
+            tcp_recovery.o \
             tcp_offload.o datagram.o raw.o udp.o udplite.o \
             udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \
             fib_frontend.o fib_semantics.o fib_trie.o \
index 01308e6e612735aee02b71b460f9a02e93f8673f..59b3e0e8fd5110031eff0303b5f75622bdd7d22a 100644 (file)
@@ -312,7 +312,7 @@ static void arp_send_dst(int type, int ptype, __be32 dest_ip,
        if (!skb)
                return;
 
-       skb_dst_set(skb, dst);
+       skb_dst_set(skb, dst_clone(dst));
        arp_xmit(skb);
 }
 
@@ -384,7 +384,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
        }
 
        if (skb && !(dev->priv_flags & IFF_XMIT_DST_RELEASE))
-               dst = dst_clone(skb_dst(skb));
+               dst = skb_dst(skb);
        arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
                     dst_hw, dev->dev_addr, NULL, dst);
 }
@@ -816,7 +816,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
                                } else {
                                        pneigh_enqueue(&arp_tbl,
                                                       in_dev->arp_parms, skb);
-                                       return 0;
+                                       goto out_free_dst;
                                }
                                goto out;
                        }
@@ -870,6 +870,8 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
 
 out:
        consume_skb(skb);
+out_free_dst:
+       dst_release(reply_dst);
        return 0;
 }
 
index 7350084728444f3ba07ca4db68bdc965fa75abf7..cebd9d31e65a4a7539cab0bef71887736bc188f7 100644 (file)
@@ -1644,7 +1644,8 @@ errout:
                rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
 }
 
-static size_t inet_get_link_af_size(const struct net_device *dev)
+static size_t inet_get_link_af_size(const struct net_device *dev,
+                                   u32 ext_filter_mask)
 {
        struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
 
@@ -2398,4 +2399,3 @@ void __init devinet_init(void)
        rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
                      inet_netconf_dump_devconf, NULL);
 }
-
index d7c2bb0c4f6536c1b1d3ee57dc97e1047793f3ba..e786873c89f207adeccaffe3ad7046590da6cc38 100644 (file)
@@ -867,9 +867,10 @@ void fib_add_ifaddr(struct in_ifaddr *ifa)
 
        if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) &&
            (prefix != addr || ifa->ifa_prefixlen < 32)) {
-               fib_magic(RTM_NEWROUTE,
-                         dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
-                         prefix, ifa->ifa_prefixlen, prim);
+               if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE))
+                       fib_magic(RTM_NEWROUTE,
+                                 dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
+                                 prefix, ifa->ifa_prefixlen, prim);
 
                /* Add network specific broadcasts, when it takes a sense */
                if (ifa->ifa_prefixlen < 31) {
@@ -914,9 +915,10 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
                }
        } else if (!ipv4_is_zeronet(any) &&
                   (any != ifa->ifa_local || ifa->ifa_prefixlen < 32)) {
-               fib_magic(RTM_DELROUTE,
-                         dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
-                         any, ifa->ifa_prefixlen, prim);
+               if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE))
+                       fib_magic(RTM_DELROUTE,
+                                 dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
+                                 any, ifa->ifa_prefixlen, prim);
                subnet = 1;
        }
 
index af77298c8b4f01594fa1c98173ef26068ef874a5..42778d9d71e532cf658d8aabd37d178d06ebb081 100644 (file)
@@ -545,7 +545,7 @@ static void fib_rebalance(struct fib_info *fi)
                if (nh->nh_flags & RTNH_F_DEAD)
                        continue;
 
-               in_dev = __in_dev_get_rcu(nh->nh_dev);
+               in_dev = __in_dev_get_rtnl(nh->nh_dev);
 
                if (in_dev &&
                    IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
@@ -559,7 +559,7 @@ static void fib_rebalance(struct fib_info *fi)
        change_nexthops(fi) {
                int upper_bound;
 
-               in_dev = __in_dev_get_rcu(nexthop_nh->nh_dev);
+               in_dev = __in_dev_get_rtnl(nexthop_nh->nh_dev);
 
                if (nexthop_nh->nh_flags & RTNH_F_DEAD) {
                        upper_bound = -1;
index f3c356b7c1f0b25674572a798b16d221bf883879..36e26977c9088c1dbd09cd13e9a5e2c43369fe31 100644 (file)
@@ -659,9 +659,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
         */
 
        saddr = iph->daddr;
-       if (!((type == ICMP_REDIRECT) &&
-             net->ipv4.sysctl_icmp_redirects_use_orig_daddr) &&
-           !(rt->rt_flags & RTCF_LOCAL)) {
+       if (!(rt->rt_flags & RTCF_LOCAL)) {
                struct net_device *dev = NULL;
 
                rcu_read_lock();
@@ -1224,11 +1222,6 @@ static int __net_init icmp_sk_init(struct net *net)
        net->ipv4.sysctl_icmp_ratemask = 0x1818;
        net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0;
 
-       /* Control paramerer - use the daddr of originating packets as saddr
-        * in redirect messages?
-        */
-       net->ipv4.sysctl_icmp_redirects_use_orig_daddr = 0;
-
        return 0;
 
 fail:
index 514b9e910bd4ea245995346fe8188edd6f24b46f..1feb15f23de8c4f673fd0fe713df2dd9195995cf 100644 (file)
@@ -523,15 +523,15 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
                               struct request_sock *req)
 {
        struct inet_hashinfo *hashinfo = req_to_sk(req)->sk_prot->h.hashinfo;
-       spinlock_t *lock;
-       bool found;
+       bool found = false;
 
-       lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
-
-       spin_lock(lock);
-       found = __sk_nulls_del_node_init_rcu(req_to_sk(req));
-       spin_unlock(lock);
+       if (sk_hashed(req_to_sk(req))) {
+               spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
 
+               spin_lock(lock);
+               found = __sk_nulls_del_node_init_rcu(req_to_sk(req));
+               spin_unlock(lock);
+       }
        if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
                reqsk_put(req);
        return found;
@@ -546,6 +546,13 @@ void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
 }
 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
 
+void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
+{
+       inet_csk_reqsk_queue_drop(sk, req);
+       reqsk_put(req);
+}
+EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
+
 static void reqsk_timer_handler(unsigned long data)
 {
        struct request_sock *req = (struct request_sock *)data;
@@ -608,8 +615,7 @@ static void reqsk_timer_handler(unsigned long data)
                return;
        }
 drop:
-       inet_csk_reqsk_queue_drop(sk_listener, req);
-       reqsk_put(req);
+       inet_csk_reqsk_queue_drop_and_put(sk_listener, req);
 }
 
 static void reqsk_queue_hash_req(struct request_sock *req,
@@ -727,14 +733,14 @@ void inet_csk_prepare_forced_close(struct sock *sk)
 }
 EXPORT_SYMBOL(inet_csk_prepare_forced_close);
 
-int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
+int inet_csk_listen_start(struct sock *sk, int backlog)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct inet_sock *inet = inet_sk(sk);
 
        reqsk_queue_alloc(&icsk->icsk_accept_queue);
 
-       sk->sk_max_ack_backlog = 0;
+       sk->sk_max_ack_backlog = backlog;
        sk->sk_ack_backlog = 0;
        inet_csk_delack_init(sk);
 
@@ -758,6 +764,72 @@ int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
 }
 EXPORT_SYMBOL_GPL(inet_csk_listen_start);
 
+static void inet_child_forget(struct sock *sk, struct request_sock *req,
+                             struct sock *child)
+{
+       sk->sk_prot->disconnect(child, O_NONBLOCK);
+
+       sock_orphan(child);
+
+       percpu_counter_inc(sk->sk_prot->orphan_count);
+
+       if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
+               BUG_ON(tcp_sk(child)->fastopen_rsk != req);
+               BUG_ON(sk != req->rsk_listener);
+
+               /* Paranoid, to prevent race condition if
+                * an inbound pkt destined for child is
+                * blocked by sock lock in tcp_v4_rcv().
+                * Also to satisfy an assertion in
+                * tcp_v4_destroy_sock().
+                */
+               tcp_sk(child)->fastopen_rsk = NULL;
+       }
+       inet_csk_destroy_sock(child);
+       reqsk_put(req);
+}
+
+void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
+                             struct sock *child)
+{
+       struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+
+       spin_lock(&queue->rskq_lock);
+       if (unlikely(sk->sk_state != TCP_LISTEN)) {
+               inet_child_forget(sk, req, child);
+       } else {
+               req->sk = child;
+               req->dl_next = NULL;
+               if (queue->rskq_accept_head == NULL)
+                       queue->rskq_accept_head = req;
+               else
+                       queue->rskq_accept_tail->dl_next = req;
+               queue->rskq_accept_tail = req;
+               sk_acceptq_added(sk);
+       }
+       spin_unlock(&queue->rskq_lock);
+}
+EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
+
+struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
+                                        struct request_sock *req, bool own_req)
+{
+       if (own_req) {
+               inet_csk_reqsk_queue_drop(sk, req);
+               reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
+               inet_csk_reqsk_queue_add(sk, req, child);
+               /* Warning: caller must not call reqsk_put(req);
+                * child stole last reference on it.
+                */
+               return child;
+       }
+       /* Too bad, another child took ownership of the request, undo. */
+       bh_unlock_sock(child);
+       sock_put(child);
+       return NULL;
+}
+EXPORT_SYMBOL(inet_csk_complete_hashdance);
+
 /*
  *     This routine closes sockets which have been at least partially
  *     opened, but not yet accepted.
@@ -784,31 +856,11 @@ void inet_csk_listen_stop(struct sock *sk)
                WARN_ON(sock_owned_by_user(child));
                sock_hold(child);
 
-               sk->sk_prot->disconnect(child, O_NONBLOCK);
-
-               sock_orphan(child);
-
-               percpu_counter_inc(sk->sk_prot->orphan_count);
-
-               if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
-                       BUG_ON(tcp_sk(child)->fastopen_rsk != req);
-                       BUG_ON(sk != req->rsk_listener);
-
-                       /* Paranoid, to prevent race condition if
-                        * an inbound pkt destined for child is
-                        * blocked by sock lock in tcp_v4_rcv().
-                        * Also to satisfy an assertion in
-                        * tcp_v4_destroy_sock().
-                        */
-                       tcp_sk(child)->fastopen_rsk = NULL;
-               }
-               inet_csk_destroy_sock(child);
-
+               inet_child_forget(sk, req, child);
                bh_unlock_sock(child);
                local_bh_enable();
                sock_put(child);
 
-               reqsk_put(req);
                cond_resched();
        }
        if (queue->fastopenq.rskq_rst_head) {
@@ -823,7 +875,7 @@ void inet_csk_listen_stop(struct sock *sk)
                        req = next;
                }
        }
-       WARN_ON(sk->sk_ack_backlog);
+       WARN_ON_ONCE(sk->sk_ack_backlog);
 }
 EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
 
index 08643a3616af70fd3f3ba4ce0d41e258d9f259d9..ccc5980797fcdb9ed3a1003db47e4e8cb7180279 100644 (file)
@@ -137,6 +137,10 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
 
        spin_lock(&head->lock);
        tb = inet_csk(sk)->icsk_bind_hash;
+       if (unlikely(!tb)) {
+               spin_unlock(&head->lock);
+               return -ENOENT;
+       }
        if (tb->port != port) {
                /* NOTE: using tproxy and redirecting skbs to a proxy
                 * on a different listener port breaks the assumption
@@ -403,13 +407,13 @@ static u32 inet_sk_port_offset(const struct sock *sk)
 /* insert a socket into ehash, and eventually remove another one
  * (The another one can be a SYN_RECV or TIMEWAIT
  */
-int inet_ehash_insert(struct sock *sk, struct sock *osk)
+bool inet_ehash_insert(struct sock *sk, struct sock *osk)
 {
        struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
        struct hlist_nulls_head *list;
        struct inet_ehash_bucket *head;
        spinlock_t *lock;
-       int ret = 0;
+       bool ret = true;
 
        WARN_ON_ONCE(!sk_unhashed(sk));
 
@@ -419,30 +423,41 @@ int inet_ehash_insert(struct sock *sk, struct sock *osk)
        lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
 
        spin_lock(lock);
-       __sk_nulls_add_node_rcu(sk, list);
        if (osk) {
-               WARN_ON(sk->sk_hash != osk->sk_hash);
-               sk_nulls_del_node_init_rcu(osk);
+               WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
+               ret = sk_nulls_del_node_init_rcu(osk);
        }
+       if (ret)
+               __sk_nulls_add_node_rcu(sk, list);
        spin_unlock(lock);
        return ret;
 }
 
-void __inet_hash_nolisten(struct sock *sk, struct sock *osk)
+bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
 {
-       inet_ehash_insert(sk, osk);
-       sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+       bool ok = inet_ehash_insert(sk, osk);
+
+       if (ok) {
+               sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+       } else {
+               percpu_counter_inc(sk->sk_prot->orphan_count);
+               sk->sk_state = TCP_CLOSE;
+               sock_set_flag(sk, SOCK_DEAD);
+               inet_csk_destroy_sock(sk);
+       }
+       return ok;
 }
-EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
+EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
 
 void __inet_hash(struct sock *sk, struct sock *osk)
 {
        struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
        struct inet_listen_hashbucket *ilb;
 
-       if (sk->sk_state != TCP_LISTEN)
-               return __inet_hash_nolisten(sk, osk);
-
+       if (sk->sk_state != TCP_LISTEN) {
+               inet_ehash_nolisten(sk, osk);
+               return;
+       }
        WARN_ON(!sk_unhashed(sk));
        ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
 
@@ -563,7 +578,7 @@ ok:
                inet_bind_hash(sk, tb, port);
                if (sk_unhashed(sk)) {
                        inet_sk(sk)->inet_sport = htons(port);
-                       __inet_hash_nolisten(sk, (struct sock *)tw);
+                       inet_ehash_nolisten(sk, (struct sock *)tw);
                }
                if (tw)
                        inet_twsk_bind_unhash(tw, hinfo);
@@ -580,7 +595,7 @@ ok:
        tb  = inet_csk(sk)->icsk_bind_hash;
        spin_lock_bh(&head->lock);
        if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
-               __inet_hash_nolisten(sk, NULL);
+               inet_ehash_nolisten(sk, NULL);
                spin_unlock_bh(&head->lock);
                return 0;
        } else {
index bd0679d90519b170dc98369e9b438e4c31b152b9..614521437e30159c5234e9b24fcc41bad7ea6c6b 100644 (file)
@@ -498,10 +498,26 @@ static struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
                                        csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 }
 
+static struct rtable *gre_get_rt(struct sk_buff *skb,
+                                struct net_device *dev,
+                                struct flowi4 *fl,
+                                const struct ip_tunnel_key *key)
+{
+       struct net *net = dev_net(dev);
+
+       memset(fl, 0, sizeof(*fl));
+       fl->daddr = key->u.ipv4.dst;
+       fl->saddr = key->u.ipv4.src;
+       fl->flowi4_tos = RT_TOS(key->tos);
+       fl->flowi4_mark = skb->mark;
+       fl->flowi4_proto = IPPROTO_GRE;
+
+       return ip_route_output_key(net, fl);
+}
+
 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip_tunnel_info *tun_info;
-       struct net *net = dev_net(dev);
        const struct ip_tunnel_key *key;
        struct flowi4 fl;
        struct rtable *rt;
@@ -516,14 +532,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
                goto err_free_skb;
 
        key = &tun_info->key;
-       memset(&fl, 0, sizeof(fl));
-       fl.daddr = key->u.ipv4.dst;
-       fl.saddr = key->u.ipv4.src;
-       fl.flowi4_tos = RT_TOS(key->tos);
-       fl.flowi4_mark = skb->mark;
-       fl.flowi4_proto = IPPROTO_GRE;
-
-       rt = ip_route_output_key(net, &fl);
+       rt = gre_get_rt(skb, dev, &fl, key);
        if (IS_ERR(rt))
                goto err_free_skb;
 
@@ -566,6 +575,24 @@ err_free_skb:
        dev->stats.tx_dropped++;
 }
 
+static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+       struct ip_tunnel_info *info = skb_tunnel_info(skb);
+       struct rtable *rt;
+       struct flowi4 fl4;
+
+       if (ip_tunnel_info_af(info) != AF_INET)
+               return -EINVAL;
+
+       rt = gre_get_rt(skb, dev, &fl4, &info->key);
+       if (IS_ERR(rt))
+               return PTR_ERR(rt);
+
+       ip_rt_put(rt);
+       info->key.u.ipv4.src = fl4.saddr;
+       return 0;
+}
+
 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
                              struct net_device *dev)
 {
@@ -1023,6 +1050,7 @@ static const struct net_device_ops gre_tap_netdev_ops = {
        .ndo_change_mtu         = ip_tunnel_change_mtu,
        .ndo_get_stats64        = ip_tunnel_get_stats64,
        .ndo_get_iflink         = ip_tunnel_get_iflink,
+       .ndo_fill_metadata_dst  = gre_fill_metadata_dst,
 };
 
 static void ipgre_tap_setup(struct net_device *dev)
index 67404e1fe7d40fe5121f7405d738bea40fe70942..50e29737b584624b6d338630622e87634961612e 100644 (file)
@@ -1596,7 +1596,6 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
                          arg->csumoffset) = csum_fold(csum_add(nskb->csum,
                                                                arg->csum));
                nskb->ip_summed = CHECKSUM_NONE;
-               skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
                ip_push_pending_frames(sk, &fl4);
        }
 out:
index ed4ef09c2136ec34871fc240a8197bf694e2d9ef..0bc7412d9e14a3395ab9c877ee6683f33748a3b1 100644 (file)
@@ -146,6 +146,10 @@ u8 root_server_path[256] = { 0, }; /* Path to mount as root */
 /* vendor class identifier */
 static char vendor_class_identifier[253] __initdata;
 
+#if defined(CONFIG_IP_PNP_DHCP)
+static char dhcp_client_identifier[253] __initdata;
+#endif
+
 /* Persistent data: */
 
 static int ic_proto_used;                      /* Protocol used, if any */
@@ -728,6 +732,16 @@ ic_dhcp_init_options(u8 *options)
                        memcpy(e, vendor_class_identifier, len);
                        e += len;
                }
+               len = strlen(dhcp_client_identifier + 1);
+               /* the minimum length of identifier is 2, include 1 byte type,
+                * and can not be larger than the length of options
+                */
+               if (len >= 1 && len < 312 - (e - options) - 1) {
+                       *e++ = 61;
+                       *e++ = len + 1;
+                       memcpy(e, dhcp_client_identifier, len + 1);
+                       e += len + 1;
+               }
        }
 
        *e++ = 255;     /* End of the list */
@@ -1557,8 +1571,24 @@ static int __init ic_proto_name(char *name)
                return 0;
        }
 #ifdef CONFIG_IP_PNP_DHCP
-       else if (!strcmp(name, "dhcp")) {
+       else if (!strncmp(name, "dhcp", 4)) {
+               char *client_id;
+
                ic_proto_enabled &= ~IC_RARP;
+               client_id = strstr(name, "dhcp,");
+               if (client_id) {
+                       char *v;
+
+                       client_id = client_id + 5;
+                       v = strchr(client_id, ',');
+                       if (!v)
+                               return 1;
+                       *v = 0;
+                       if (kstrtou8(client_id, 0, dhcp_client_identifier))
+                               DBG("DHCP: Invalid client identifier type\n");
+                       strncpy(dhcp_client_identifier + 1, v + 1, 251);
+                       *v = ',';
+               }
                return 1;
        }
 #endif
index 690d27d3f2f90d99612de8ed4a32dec0596a680a..a3558417653567ffe3a83d06515fb1b68ec36dcf 100644 (file)
@@ -75,6 +75,7 @@ endif # NF_TABLES
 
 config NF_DUP_IPV4
        tristate "Netfilter IPv4 packet duplication to alternate destination"
+       depends on !NF_CONNTRACK || NF_CONNTRACK
        help
          This option enables the nf_dup_ipv4 core, which duplicates an IPv4
          packet to be rerouted to another destination.
index 2dad3e1c5f11d850f2ef9d7ae19b4ebe731840d4..11dccba474b7964fe7d9ee472c48d5eca435ae41 100644 (file)
@@ -186,7 +186,7 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
        if (FWINV(ret != 0, ARPT_INV_VIA_IN)) {
                dprintf("VIA in mismatch (%s vs %s).%s\n",
                        indev, arpinfo->iniface,
-                       arpinfo->invflags&ARPT_INV_VIA_IN ?" (INV)":"");
+                       arpinfo->invflags & ARPT_INV_VIA_IN ? " (INV)" : "");
                return 0;
        }
 
@@ -195,7 +195,7 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
        if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) {
                dprintf("VIA out mismatch (%s vs %s).%s\n",
                        outdev, arpinfo->outiface,
-                       arpinfo->invflags&ARPT_INV_VIA_OUT ?" (INV)":"");
+                       arpinfo->invflags & ARPT_INV_VIA_OUT ? " (INV)" : "");
                return 0;
        }
 
@@ -468,7 +468,7 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
                                pos = newpos;
                        }
                }
-               next:
+next:
                duprintf("Finished chain %u\n", hook);
        }
        return 1;
@@ -632,7 +632,7 @@ static inline void cleanup_entry(struct arpt_entry *e)
  * newinfo).
  */
 static int translate_table(struct xt_table_info *newinfo, void *entry0,
-                           const struct arpt_replace *repl)
+                          const struct arpt_replace *repl)
 {
        struct arpt_entry *iter;
        unsigned int i;
@@ -892,7 +892,7 @@ static int compat_table_info(const struct xt_table_info *info,
 #endif
 
 static int get_info(struct net *net, void __user *user,
-                    const int *len, int compat)
+                   const int *len, int compat)
 {
        char name[XT_TABLE_MAXNAMELEN];
        struct xt_table *t;
@@ -1069,7 +1069,7 @@ static int __do_replace(struct net *net, const char *name,
 }
 
 static int do_replace(struct net *net, const void __user *user,
-                      unsigned int len)
+                     unsigned int len)
 {
        int ret;
        struct arpt_replace tmp;
index 42d0946956db6b545305ac5f1b9a601ffad4c87c..b99affad6ba1f4939e10f676a06f92cd27c32add 100644 (file)
@@ -102,7 +102,7 @@ ip_packet_match(const struct iphdr *ip,
        if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
                dprintf("VIA in mismatch (%s vs %s).%s\n",
                        indev, ipinfo->iniface,
-                       ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
+                       ipinfo->invflags & IPT_INV_VIA_IN ? " (INV)" : "");
                return false;
        }
 
@@ -111,7 +111,7 @@ ip_packet_match(const struct iphdr *ip,
        if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
                dprintf("VIA out mismatch (%s vs %s).%s\n",
                        outdev, ipinfo->outiface,
-                       ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
+                       ipinfo->invflags & IPT_INV_VIA_OUT ? " (INV)" : "");
                return false;
        }
 
@@ -120,7 +120,7 @@ ip_packet_match(const struct iphdr *ip,
            FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
                dprintf("Packet protocol %hi does not match %hi.%s\n",
                        ip->protocol, ipinfo->proto,
-                       ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
+                       ipinfo->invflags & IPT_INV_PROTO ? " (INV)" : "");
                return false;
        }
 
@@ -431,8 +431,8 @@ ipt_do_table(struct sk_buff *skb,
        } while (!acpar.hotdrop);
        pr_debug("Exiting %s; sp at %u\n", __func__, stackidx);
 
-       xt_write_recseq_end(addend);
-       local_bh_enable();
+       xt_write_recseq_end(addend);
+       local_bh_enable();
 
 #ifdef DEBUG_ALLOW_ALL
        return NF_ACCEPT;
@@ -484,7 +484,7 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                unsigned int oldpos, size;
 
                                if ((strcmp(t->target.u.user.name,
-                                           XT_STANDARD_TARGET) == 0) &&
+                                           XT_STANDARD_TARGET) == 0) &&
                                    t->verdict < -NF_MAX_VERDICT - 1) {
                                        duprintf("mark_source_chains: bad "
                                                "negative verdict (%i)\n",
@@ -549,7 +549,7 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                pos = newpos;
                        }
                }
-               next:
+next:
                duprintf("Finished chain %u\n", hook);
        }
        return 1;
@@ -804,7 +804,7 @@ cleanup_entry(struct ipt_entry *e, struct net *net)
    newinfo) */
 static int
 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
-                const struct ipt_replace *repl)
+               const struct ipt_replace *repl)
 {
        struct ipt_entry *iter;
        unsigned int i;
@@ -1078,7 +1078,7 @@ static int compat_table_info(const struct xt_table_info *info,
 #endif
 
 static int get_info(struct net *net, void __user *user,
-                    const int *len, int compat)
+                   const int *len, int compat)
 {
        char name[XT_TABLE_MAXNAMELEN];
        struct xt_table *t;
@@ -1304,7 +1304,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
 
 static int
 do_add_counters(struct net *net, const void __user *user,
-                unsigned int len, int compat)
+               unsigned int len, int compat)
 {
        unsigned int i;
        struct xt_counters_info tmp;
index 3f32c03e8b2e956f416c7df69066f826d91495ef..4a9e6db9df8d719a14b6aa129b78ba614587767f 100644 (file)
@@ -492,14 +492,14 @@ static void arp_print(struct arp_payload *payload)
 {
 #define HBUFFERLEN 30
        char hbuffer[HBUFFERLEN];
-       int j,k;
+       int j, k;
 
-       for (k=0, j=0; k < HBUFFERLEN-3 && j < ETH_ALEN; j++) {
+       for (k = 0, j = 0; k < HBUFFERLEN - 3 && j < ETH_ALEN; j++) {
                hbuffer[k++] = hex_asc_hi(payload->src_hw[j]);
                hbuffer[k++] = hex_asc_lo(payload->src_hw[j]);
-               hbuffer[k++]=':';
+               hbuffer[k++] = ':';
        }
-       hbuffer[--k]='\0';
+       hbuffer[--k] = '\0';
 
        pr_debug("src %pI4@%s, dst %pI4\n",
                 &payload->src_ip, hbuffer, &payload->dst_ip);
index f1a8df8ecc1f344d58aa834fb7230b12e0fa1bc2..5fdc556514bac3335f0c4f78b2c01c54f1c8b68f 100644 (file)
@@ -231,7 +231,7 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
        synproxy_build_options(nth, opts);
 
        synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
-                         niph, nth, tcp_hdr_size);
+                         niph, nth, tcp_hdr_size);
 }
 
 static bool
@@ -437,14 +437,12 @@ static struct xt_target synproxy_tg4_reg __read_mostly = {
 static struct nf_hook_ops ipv4_synproxy_ops[] __read_mostly = {
        {
                .hook           = ipv4_synproxy_hook,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_LOCAL_IN,
                .priority       = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
        },
        {
                .hook           = ipv4_synproxy_hook,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_POST_ROUTING,
                .priority       = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
index 14a2aa8b8a142502095e4da8a034eae6e78a8313..a787d07f6cb757b741fcd600f9972c3399c95c85 100644 (file)
@@ -25,7 +25,7 @@ spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert)
        bool r;
        pr_debug("spi_match:%c 0x%x <= 0x%x <= 0x%x\n",
                 invert ? '!' : ' ', min, spi, max);
-       r=(spi >= min && spi <= max) ^ invert;
+       r = (spi >= min && spi <= max) ^ invert;
        pr_debug(" result %s\n", r ? "PASS" : "FAILED");
        return r;
 }
index 74dd6671b66da53a8919944d1ff563ce6cc063c4..78cc64eddfc1855849652c563b3c931cd7cc72e6 100644 (file)
@@ -60,9 +60,7 @@ static bool rpfilter_lookup_reverse(struct net *net, struct flowi4 *fl4,
        if (FIB_RES_DEV(res) == dev)
                dev_match = true;
 #endif
-       if (dev_match || flags & XT_RPFILTER_LOOSE)
-               return FIB_RES_NH(res).nh_scope <= RT_SCOPE_HOST;
-       return dev_match;
+       return dev_match || flags & XT_RPFILTER_LOOSE;
 }
 
 static bool rpfilter_is_local(const struct sk_buff *skb)
index 3a2e4d830a0b2ae7a75d6e962fa17f35e4b07bc4..ae2cd275204643ebff64d7537dd617bd3a0eec62 100644 (file)
@@ -68,7 +68,6 @@ static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = {
        /* Before packet filtering, change destination */
        {
                .hook           = iptable_nat_ipv4_in,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_PRE_ROUTING,
                .priority       = NF_IP_PRI_NAT_DST,
@@ -76,7 +75,6 @@ static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = {
        /* After packet filtering, change source */
        {
                .hook           = iptable_nat_ipv4_out,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_POST_ROUTING,
                .priority       = NF_IP_PRI_NAT_SRC,
@@ -84,7 +82,6 @@ static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = {
        /* Before packet filtering, change destination */
        {
                .hook           = iptable_nat_ipv4_local_fn,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_LOCAL_OUT,
                .priority       = NF_IP_PRI_NAT_DST,
@@ -92,7 +89,6 @@ static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = {
        /* After packet filtering, change source */
        {
                .hook           = iptable_nat_ipv4_fn,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_LOCAL_IN,
                .priority       = NF_IP_PRI_NAT_SRC,
index f534e2f05bad3b32a00356b7d5e5a6046cc59e49..c2e23d5e9cd4a8412d38f20eb8ddedf2f32f8aa6 100644 (file)
@@ -79,7 +79,7 @@ static int __init iptable_security_init(void)
        int ret;
 
        ret = register_pernet_subsys(&iptable_security_net_ops);
-        if (ret < 0)
+       if (ret < 0)
                return ret;
 
        sectbl_ops = xt_hook_link(&security_table, iptable_security_hook);
index 752fb40adcf8a3ea43f892d7cb33e1b236a8d9d3..461ca926fd39408613f62a3de8bfc5ff4cbce7f6 100644 (file)
@@ -166,42 +166,36 @@ static unsigned int ipv4_conntrack_local(void *priv,
 static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
        {
                .hook           = ipv4_conntrack_in,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_PRE_ROUTING,
                .priority       = NF_IP_PRI_CONNTRACK,
        },
        {
                .hook           = ipv4_conntrack_local,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_LOCAL_OUT,
                .priority       = NF_IP_PRI_CONNTRACK,
        },
        {
                .hook           = ipv4_helper,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_POST_ROUTING,
                .priority       = NF_IP_PRI_CONNTRACK_HELPER,
        },
        {
                .hook           = ipv4_confirm,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_POST_ROUTING,
                .priority       = NF_IP_PRI_CONNTRACK_CONFIRM,
        },
        {
                .hook           = ipv4_helper,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_LOCAL_IN,
                .priority       = NF_IP_PRI_CONNTRACK_HELPER,
        },
        {
                .hook           = ipv4_confirm,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_LOCAL_IN,
                .priority       = NF_IP_PRI_CONNTRACK_CONFIRM,
index bf25f45b23d24305bfa8a22f4bb5946153052221..0e5591c2ee9f6d66acb47ce2cbbf31403dc286f7 100644 (file)
@@ -95,14 +95,12 @@ static unsigned int ipv4_conntrack_defrag(void *priv,
 static struct nf_hook_ops ipv4_defrag_ops[] = {
        {
                .hook           = ipv4_conntrack_defrag,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_PRE_ROUTING,
                .priority       = NF_IP_PRI_CONNTRACK_DEFRAG,
        },
        {
                .hook           = ipv4_conntrack_defrag,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_LOCAL_OUT,
                .priority       = NF_IP_PRI_CONNTRACK_DEFRAG,
index 7c676671329d9432eb2392f6b4fc649ebcdae97f..ddb894ac1458ca40bf30d213e8b3b2c8a18880af 100644 (file)
@@ -1156,7 +1156,7 @@ static int snmp_parse_mangle(unsigned char *msg,
                }
 
                if (obj->type == SNMP_IPADDR)
-                       mangle_address(ctx.begin, ctx.pointer - 4 , map, check);
+                       mangle_address(ctx.begin, ctx.pointer - 4, map, check);
 
                kfree(obj->id);
                kfree(obj);
index 4c0892badb8b1eb47881b8c24976a872f3c61c6c..4cbe9f0a428179d8c35fa5f0a05dd2b445498c11 100644 (file)
@@ -221,8 +221,10 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct sock *child;
+       bool own_req;
 
-       child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
+       child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
+                                                NULL, &own_req);
        if (child) {
                atomic_set(&req->rsk_refcnt, 1);
                sock_rps_save_rxhash(child, skb);
index 30a531ccbf77cbda67bc8cec8781b06cb4dbd474..25300c5e283bc3879fa4400628d4a29141d52e3e 100644 (file)
@@ -495,6 +495,13 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "tcp_recovery",
+               .data           = &sysctl_tcp_recovery,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
        {
                .procname       = "tcp_reordering",
                .data           = &sysctl_tcp_reordering,
@@ -576,6 +583,13 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "tcp_min_rtt_wlen",
+               .data           = &sysctl_tcp_min_rtt_wlen,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
        {
                .procname       = "tcp_low_latency",
                .data           = &sysctl_tcp_low_latency,
@@ -817,13 +831,6 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
-       {
-               .procname       = "icmp_redirects_use_orig_daddr",
-               .data           = &init_net.ipv4.sysctl_icmp_redirects_use_orig_daddr,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
        {
                .procname       = "icmp_ratelimit",
                .data           = &init_net.ipv4.sysctl_icmp_ratelimit,
index ac1bdbb50352efde9686e5c05b2c042afadb49c2..0cfa7c0c1e80dae18b3aa51c3d71d021dd8b4f84 100644 (file)
@@ -388,6 +388,7 @@ void tcp_init_sock(struct sock *sk)
 
        icsk->icsk_rto = TCP_TIMEOUT_INIT;
        tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
+       tp->rtt_min[0].rtt = ~0U;
 
        /* So many TCP implementations out there (incorrectly) count the
         * initial SYN frame in their delayed-ACK and congestion control
index 7092a61c4dc8465fcf17ff71b289cf25bbb8b559..7e538f71f5fbae087c3e3e4367d60e08cd609ac5 100644 (file)
@@ -209,7 +209,7 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
 
                /* alpha = (1 - g) * alpha + g * F */
 
-               alpha -= alpha >> dctcp_shift_g;
+               alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
                if (bytes_ecn) {
                        /* If dctcp_shift_g == 1, a 32bit value would overflow
                         * after 8 Mbytes.
index 93396bf7b475f3972d247d282faa406d962696dd..55be6ac70cff3679cd7a80aa9aaac48ac156a203 100644 (file)
@@ -133,12 +133,14 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
        struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
        struct sock *child;
        u32 end_seq;
+       bool own_req;
 
        req->num_retrans = 0;
        req->num_timeout = 0;
        req->sk = NULL;
 
-       child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
+       child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
+                                                        NULL, &own_req);
        if (!child)
                return NULL;
 
index 3b35c3f4d268a5d8a8a2e6a9232d2e682b360c25..fdd88c3803a673881053039cdc8ff44bc1b8aa4a 100644 (file)
@@ -95,6 +95,7 @@ int sysctl_tcp_stdurg __read_mostly;
 int sysctl_tcp_rfc1337 __read_mostly;
 int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
 int sysctl_tcp_frto __read_mostly = 2;
+int sysctl_tcp_min_rtt_wlen __read_mostly = 300;
 
 int sysctl_tcp_thin_dupack __read_mostly;
 
@@ -880,6 +881,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
 
        if (metric > 0)
                tcp_disable_early_retrans(tp);
+       tp->rack.reord = 1;
 }
 
 /* This must be called before lost_out is incremented */
@@ -905,8 +907,7 @@ static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
        }
 }
 
-static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
-                                           struct sk_buff *skb)
+void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb)
 {
        tcp_verify_retransmit_hint(tp, skb);
 
@@ -1047,70 +1048,6 @@ static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
        return !before(start_seq, end_seq - tp->max_window);
 }
 
-/* Check for lost retransmit. This superb idea is borrowed from "ratehalving".
- * Event "B". Later note: FACK people cheated me again 8), we have to account
- * for reordering! Ugly, but should help.
- *
- * Search retransmitted skbs from write_queue that were sent when snd_nxt was
- * less than what is now known to be received by the other end (derived from
- * highest SACK block). Also calculate the lowest snd_nxt among the remaining
- * retransmitted skbs to avoid some costly processing per ACKs.
- */
-static void tcp_mark_lost_retrans(struct sock *sk, int *flag)
-{
-       const struct inet_connection_sock *icsk = inet_csk(sk);
-       struct tcp_sock *tp = tcp_sk(sk);
-       struct sk_buff *skb;
-       int cnt = 0;
-       u32 new_low_seq = tp->snd_nxt;
-       u32 received_upto = tcp_highest_sack_seq(tp);
-
-       if (!tcp_is_fack(tp) || !tp->retrans_out ||
-           !after(received_upto, tp->lost_retrans_low) ||
-           icsk->icsk_ca_state != TCP_CA_Recovery)
-               return;
-
-       tcp_for_write_queue(skb, sk) {
-               u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
-
-               if (skb == tcp_send_head(sk))
-                       break;
-               if (cnt == tp->retrans_out)
-                       break;
-               if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
-                       continue;
-
-               if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS))
-                       continue;
-
-               /* TODO: We would like to get rid of tcp_is_fack(tp) only
-                * constraint here (see above) but figuring out that at
-                * least tp->reordering SACK blocks reside between ack_seq
-                * and received_upto is not easy task to do cheaply with
-                * the available datastructures.
-                *
-                * Whether FACK should check here for tp->reordering segs
-                * in-between one could argue for either way (it would be
-                * rather simple to implement as we could count fack_count
-                * during the walk and do tp->fackets_out - fack_count).
-                */
-               if (after(received_upto, ack_seq)) {
-                       TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
-                       tp->retrans_out -= tcp_skb_pcount(skb);
-                       *flag |= FLAG_LOST_RETRANS;
-                       tcp_skb_mark_lost_uncond_verify(tp, skb);
-                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
-               } else {
-                       if (before(ack_seq, new_low_seq))
-                               new_low_seq = ack_seq;
-                       cnt += tcp_skb_pcount(skb);
-               }
-       }
-
-       if (tp->retrans_out)
-               tp->lost_retrans_low = new_low_seq;
-}
-
 static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
                            struct tcp_sack_block_wire *sp, int num_sacks,
                            u32 prior_snd_una)
@@ -1236,6 +1173,8 @@ static u8 tcp_sacktag_one(struct sock *sk,
                return sacked;
 
        if (!(sacked & TCPCB_SACKED_ACKED)) {
+               tcp_rack_advance(tp, xmit_time, sacked);
+
                if (sacked & TCPCB_SACKED_RETRANS) {
                        /* If the segment is not tagged as lost,
                         * we do not clear RETRANS, believing
@@ -1837,7 +1776,6 @@ advance_sp:
            ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
                tcp_update_reordering(sk, tp->fackets_out - state->reord, 0);
 
-       tcp_mark_lost_retrans(sk, &state->flag);
        tcp_verify_left_out(tp);
 out:
 
@@ -2314,14 +2252,29 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
        tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
+static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when)
+{
+       return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
+              before(tp->rx_opt.rcv_tsecr, when);
+}
+
+/* skb is spurious retransmitted if the returned timestamp echo
+ * reply is prior to the skb transmission time
+ */
+static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp,
+                                    const struct sk_buff *skb)
+{
+       return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) &&
+              tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb));
+}
+
 /* Nothing was retransmitted or returned timestamp is less
  * than timestamp of the first retransmission.
  */
 static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
 {
        return !tp->retrans_stamp ||
-               (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
-                before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp));
+              tcp_tsopt_ecr_before(tp, tp->retrans_stamp);
 }
 
 /* Undo procedures. */
@@ -2853,6 +2806,11 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
                }
        }
 
+       /* Use RACK to detect loss */
+       if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS &&
+           tcp_rack_mark_lost(sk))
+               flag |= FLAG_LOST_RETRANS;
+
        /* E. Process state. */
        switch (icsk->icsk_ca_state) {
        case TCP_CA_Recovery:
@@ -2915,8 +2873,69 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
        tcp_xmit_retransmit_queue(sk);
 }
 
+/* Kathleen Nichols' algorithm for tracking the minimum value of
+ * a data stream over some fixed time interval. (E.g., the minimum
+ * RTT over the past five minutes.) It uses constant space and constant
+ * time per update yet almost always delivers the same minimum as an
+ * implementation that has to keep all the data in the window.
+ *
+ * The algorithm keeps track of the best, 2nd best & 3rd best min
+ * values, maintaining an invariant that the measurement time of the
+ * n'th best >= n-1'th best. It also makes sure that the three values
+ * are widely separated in the time window since that bounds the worse
+ * case error when that data is monotonically increasing over the window.
+ *
+ * Upon getting a new min, we can forget everything earlier because it
+ * has no value - the new min is <= everything else in the window by
+ * definition and it's the most recent. So we restart fresh on every new min
+ * and overwrites 2nd & 3rd choices. The same property holds for 2nd & 3rd
+ * best.
+ */
+static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us)
+{
+       const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ;
+       struct rtt_meas *m = tcp_sk(sk)->rtt_min;
+       struct rtt_meas rttm = { .rtt = (rtt_us ? : 1), .ts = now };
+       u32 elapsed;
+
+       /* Check if the new measurement updates the 1st, 2nd, or 3rd choices */
+       if (unlikely(rttm.rtt <= m[0].rtt))
+               m[0] = m[1] = m[2] = rttm;
+       else if (rttm.rtt <= m[1].rtt)
+               m[1] = m[2] = rttm;
+       else if (rttm.rtt <= m[2].rtt)
+               m[2] = rttm;
+
+       elapsed = now - m[0].ts;
+       if (unlikely(elapsed > wlen)) {
+               /* Passed entire window without a new min so make 2nd choice
+                * the new min & 3rd choice the new 2nd. So forth and so on.
+                */
+               m[0] = m[1];
+               m[1] = m[2];
+               m[2] = rttm;
+               if (now - m[0].ts > wlen) {
+                       m[0] = m[1];
+                       m[1] = rttm;
+                       if (now - m[0].ts > wlen)
+                               m[0] = rttm;
+               }
+       } else if (m[1].ts == m[0].ts && elapsed > wlen / 4) {
+               /* Passed a quarter of the window without a new min so
+                * take 2nd choice from the 2nd quarter of the window.
+                */
+               m[2] = m[1] = rttm;
+       } else if (m[2].ts == m[1].ts && elapsed > wlen / 2) {
+               /* Passed half the window without a new min so take the 3rd
+                * choice from the last half of the window.
+                */
+               m[2] = rttm;
+       }
+}
+
 static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
-                                     long seq_rtt_us, long sack_rtt_us)
+                                     long seq_rtt_us, long sack_rtt_us,
+                                     long ca_rtt_us)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2925,9 +2944,6 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
         * Karn's algorithm forbids taking RTT if some retransmitted data
         * is acked (RFC6298).
         */
-       if (flag & FLAG_RETRANS_DATA_ACKED)
-               seq_rtt_us = -1L;
-
        if (seq_rtt_us < 0)
                seq_rtt_us = sack_rtt_us;
 
@@ -2939,11 +2955,16 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
         */
        if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
            flag & FLAG_ACKED)
-               seq_rtt_us = jiffies_to_usecs(tcp_time_stamp - tp->rx_opt.rcv_tsecr);
-
+               seq_rtt_us = ca_rtt_us = jiffies_to_usecs(tcp_time_stamp -
+                                                         tp->rx_opt.rcv_tsecr);
        if (seq_rtt_us < 0)
                return false;
 
+       /* ca_rtt_us >= 0 is counting on the invariant that ca_rtt_us is
+        * always taken together with ACK, SACK, or TS-opts. Any negative
+        * values will be skipped with the seq_rtt_us < 0 check above.
+        */
+       tcp_update_rtt_min(sk, ca_rtt_us);
        tcp_rtt_estimator(sk, seq_rtt_us);
        tcp_set_rto(sk);
 
@@ -2964,7 +2985,7 @@ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
                rtt_us = skb_mstamp_us_delta(&now, &tcp_rsk(req)->snt_synack);
        }
 
-       tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L);
+       tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us);
 }
 
 
@@ -3131,6 +3152,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
 
                if (sacked & TCPCB_SACKED_ACKED)
                        tp->sacked_out -= acked_pcount;
+               else if (tcp_is_sack(tp) && !tcp_skb_spurious_retrans(tp, skb))
+                       tcp_rack_advance(tp, &skb->skb_mstamp, sacked);
                if (sacked & TCPCB_LOST)
                        tp->lost_out -= acked_pcount;
 
@@ -3169,7 +3192,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                flag |= FLAG_SACK_RENEGING;
 
        skb_mstamp_get(&now);
-       if (likely(first_ackt.v64)) {
+       if (likely(first_ackt.v64) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
                seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt);
                ca_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
        }
@@ -3178,7 +3201,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                ca_rtt_us = skb_mstamp_us_delta(&now, &sack->last_sackt);
        }
 
-       rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us);
+       rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
+                                       ca_rtt_us);
 
        if (flag & FLAG_ACKED) {
                tcp_rearm_rto(sk);
@@ -6236,7 +6260,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
        }
        if (fastopen_sk) {
                af_ops->send_synack(fastopen_sk, dst, &fl, req,
-                                   skb_get_queue_mapping(skb), &foc, false);
+                                   &foc, false);
                /* Add the child socket directly into the accept queue */
                inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
                sk->sk_data_ready(sk);
@@ -6247,7 +6271,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
                if (!want_cookie)
                        inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
                af_ops->send_synack(sk, dst, &fl, req,
-                                   skb_get_queue_mapping(skb), &foc, !want_cookie);
+                                   &foc, !want_cookie);
                if (want_cookie)
                        goto drop_and_free;
        }
index ddb198392c7fd9be24d62a5d87da051b86cc8c55..1c2648bbac4b22b55739dde4d92dd2ca0533f77a 100644 (file)
@@ -324,7 +324,6 @@ void tcp_req_err(struct sock *sk, u32 seq)
 
        if (seq != tcp_rsk(req)->snt_isn) {
                NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
-               reqsk_put(req);
        } else {
                /*
                 * Still in SYN_RECV, just remove it silently.
@@ -332,9 +331,10 @@ void tcp_req_err(struct sock *sk, u32 seq)
                 * created socket, and POSIX does not want network
                 * errors returned from accept().
                 */
-               NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
                inet_csk_reqsk_queue_drop(req->rsk_listener, req);
+               NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
        }
+       reqsk_put(req);
 }
 EXPORT_SYMBOL(tcp_req_err);
 
@@ -821,7 +821,6 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
                              struct flowi *fl,
                              struct request_sock *req,
-                             u16 queue_mapping,
                              struct tcp_fastopen_cookie *foc,
                                  bool attach_req)
 {
@@ -839,7 +838,6 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
        if (skb) {
                __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 
-               skb_set_queue_mapping(skb, queue_mapping);
                err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
                                            ireq->ir_rmt_addr,
                                            ireq->opt);
@@ -1249,7 +1247,9 @@ EXPORT_SYMBOL(tcp_v4_conn_request);
  */
 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
                                  struct request_sock *req,
-                                 struct dst_entry *dst)
+                                 struct dst_entry *dst,
+                                 struct request_sock *req_unhash,
+                                 bool *own_req)
 {
        struct inet_request_sock *ireq;
        struct inet_sock *newinet;
@@ -1325,7 +1325,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
 
        if (__inet_inherit_port(sk, newsk) < 0)
                goto put_and_exit;
-       __inet_hash_nolisten(newsk, NULL);
+       *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
 
        return newsk;
 
@@ -1572,6 +1572,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
        TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
        TCP_SKB_CB(skb)->sacked  = 0;
 
+lookup:
        sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
        if (!sk)
                goto no_tcp_socket;
@@ -1587,8 +1588,12 @@ process:
                sk = req->rsk_listener;
                if (tcp_v4_inbound_md5_hash(sk, skb))
                        goto discard_and_relse;
-               if (sk->sk_state == TCP_LISTEN)
+               if (likely(sk->sk_state == TCP_LISTEN)) {
                        nsk = tcp_check_req(sk, skb, req, false);
+               } else {
+                       inet_csk_reqsk_queue_drop_and_put(sk, req);
+                       goto lookup;
+               }
                if (!nsk) {
                        reqsk_put(req);
                        goto discard_it;
index 41828bdc5d32ad13526dcc0c390f09604e67d9d9..3575dd1e5b6775ad8a35bb3ce0e951bc01e37e7c 100644 (file)
@@ -470,6 +470,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
 
                newtp->srtt_us = 0;
                newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
+               newtp->rtt_min[0].rtt = ~0U;
                newicsk->icsk_rto = TCP_TIMEOUT_INIT;
 
                newtp->packets_out = 0;
@@ -547,6 +548,8 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
                tcp_ecn_openreq_child(newtp, req);
                newtp->fastopen_rsk = NULL;
                newtp->syn_data_acked = 0;
+               newtp->rack.mstamp.v64 = 0;
+               newtp->rack.advanced = 0;
 
                newtp->saved_syn = req->saved_syn;
                req->saved_syn = NULL;
@@ -577,6 +580,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
        const struct tcphdr *th = tcp_hdr(skb);
        __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
        bool paws_reject = false;
+       bool own_req;
 
        tmp_opt.saw_tstamp = 0;
        if (th->doff > (sizeof(struct tcphdr)>>2)) {
@@ -764,18 +768,14 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
         * ESTABLISHED STATE. If it will be dropped after
         * socket is created, wait for troubles.
         */
-       child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
+       child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
+                                                        req, &own_req);
        if (!child)
                goto listen_overflow;
 
        sock_rps_save_rxhash(child, skb);
        tcp_synack_rtt_meas(child, req);
-       inet_csk_reqsk_queue_drop(sk, req);
-       inet_csk_reqsk_queue_add(sk, req, child);
-       /* Warning: caller must not call reqsk_put(req);
-        * child stole last reference on it.
-        */
-       return child;
+       return inet_csk_complete_hashdance(sk, child, req, own_req);
 
 listen_overflow:
        if (!sysctl_tcp_abort_on_overflow) {
index 6e79fcb0addb9443384614c6c1c9cff5bc571ad1..f4f9793eb0255e62f623303b87e44c1c777e2251 100644 (file)
@@ -2655,8 +2655,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                        net_dbg_ratelimited("retrans_out leaked\n");
                }
 #endif
-               if (!tp->retrans_out)
-                       tp->lost_retrans_low = tp->snd_nxt;
                TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
                tp->retrans_out += tcp_skb_pcount(skb);
 
@@ -2664,10 +2662,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                if (!tp->retrans_stamp)
                        tp->retrans_stamp = tcp_skb_timestamp(skb);
 
-               /* snd_nxt is stored to detect loss of retransmitted segment,
-                * see tcp_input.c tcp_sacktag_write_queue().
-                */
-               TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
        } else if (err != -EBUSY) {
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
        }
@@ -3416,7 +3410,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
         */
        tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
        skb_mstamp_get(&skb->skb_mstamp);
-       NET_INC_STATS_BH(sock_net(sk), mib);
+       NET_INC_STATS(sock_net(sk), mib);
        return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
 }
 
@@ -3518,7 +3512,7 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
        int res;
 
        tcp_rsk(req)->txhash = net_tx_rndhash();
-       res = af_ops->send_synack(sk, NULL, &fl, req, 0, NULL, true);
+       res = af_ops->send_synack(sk, NULL, &fl, req, NULL, true);
        if (!res) {
                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
new file mode 100644 (file)
index 0000000..5353085
--- /dev/null
@@ -0,0 +1,109 @@
+#include <linux/tcp.h>
+#include <net/tcp.h>
+
+int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOST_RETRANS;
+
+/* Marks a packet lost, if some packet sent later has been (s)acked.
+ * The underlying idea is similar to the traditional dupthresh and FACK
+ * but they look at different metrics:
+ *
+ * dupthresh: 3 OOO packets delivered (packet count)
+ * FACK: sequence delta to highest sacked sequence (sequence space)
+ * RACK: sent time delta to the latest delivered packet (time domain)
+ *
+ * The advantage of RACK is it applies to both original and retransmitted
+ * packet and therefore is robust against tail losses. Another advantage
+ * is being more resilient to reordering by simply allowing some
+ * "settling delay", instead of tweaking the dupthresh.
+ *
+ * The current version is only used after recovery starts but can be
+ * easily extended to detect the first loss.
+ */
+int tcp_rack_mark_lost(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *skb;
+       u32 reo_wnd, prior_retrans = tp->retrans_out;
+
+       if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced)
+               return 0;
+
+       /* Reset the advanced flag to avoid unnecessary queue scanning */
+       tp->rack.advanced = 0;
+
+       /* To be more reordering resilient, allow min_rtt/4 settling delay
+        * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
+        * RTT because reordering is often a path property and less related
+        * to queuing or delayed ACKs.
+        *
+        * TODO: measure and adapt to the observed reordering delay, and
+        * use a timer to retransmit like the delayed early retransmit.
+        */
+       reo_wnd = 1000;
+       if (tp->rack.reord && tcp_min_rtt(tp) != ~0U)
+               reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);
+
+       tcp_for_write_queue(skb, sk) {
+               struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
+
+               if (skb == tcp_send_head(sk))
+                       break;
+
+               /* Skip ones already (s)acked */
+               if (!after(scb->end_seq, tp->snd_una) ||
+                   scb->sacked & TCPCB_SACKED_ACKED)
+                       continue;
+
+               if (skb_mstamp_after(&tp->rack.mstamp, &skb->skb_mstamp)) {
+
+                       if (skb_mstamp_us_delta(&tp->rack.mstamp,
+                                               &skb->skb_mstamp) <= reo_wnd)
+                               continue;
+
+                       /* skb is lost if packet sent later is sacked */
+                       tcp_skb_mark_lost_uncond_verify(tp, skb);
+                       if (scb->sacked & TCPCB_SACKED_RETRANS) {
+                               scb->sacked &= ~TCPCB_SACKED_RETRANS;
+                               tp->retrans_out -= tcp_skb_pcount(skb);
+                               NET_INC_STATS_BH(sock_net(sk),
+                                                LINUX_MIB_TCPLOSTRETRANSMIT);
+                       }
+               } else if (!(scb->sacked & TCPCB_RETRANS)) {
+                       /* Original data are sent sequentially so stop early
+                        * b/c the rest are all sent after rack_sent
+                        */
+                       break;
+               }
+       }
+       return prior_retrans - tp->retrans_out;
+}
+
+/* Record the most recently (re)sent time among the (s)acked packets */
+void tcp_rack_advance(struct tcp_sock *tp,
+                     const struct skb_mstamp *xmit_time, u8 sacked)
+{
+       if (tp->rack.mstamp.v64 &&
+           !skb_mstamp_after(xmit_time, &tp->rack.mstamp))
+               return;
+
+       if (sacked & TCPCB_RETRANS) {
+               struct skb_mstamp now;
+
+               /* If the sacked packet was retransmitted, it's ambiguous
+                * whether the retransmission or the original (or the prior
+                * retransmission) was sacked.
+                *
+                * If the original is lost, there is no ambiguity. Otherwise
+                * we assume the original can be delayed up to aRTT + min_rtt.
+                * the aRTT term is bounded by the fast recovery or timeout,
+                * so it's at least one RTT (i.e., retransmission is at least
+                * an RTT later).
+                */
+               skb_mstamp_get(&now);
+               if (skb_mstamp_us_delta(&now, xmit_time) < tcp_min_rtt(tp))
+                       return;
+       }
+
+       tp->rack.mstamp = *xmit_time;
+       tp->rack.advanced = 1;
+}
index 9f298d0dc9a1ccc3ac53dd205be8b90e56cc866b..7ee6518afa86ff785cacda0f115297ff6e5d0fa5 100644 (file)
@@ -30,6 +30,8 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
 
        mtu = dst_mtu(skb_dst(skb));
        if (skb->len > mtu) {
+               skb->protocol = htons(ETH_P_IP);
+
                if (skb->sk)
                        xfrm_local_error(skb, mtu);
                else
index c8380f1876f193fa86c05375e055e31be165167a..d0c685cdc3456aa21359365e873b30e650209d23 100644 (file)
@@ -81,6 +81,7 @@
 #include <net/ip.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/l3mdev.h>
 #include <linux/if_tunnel.h>
 #include <linux/rtnetlink.h>
 #include <linux/netconf.h>
@@ -2146,7 +2147,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
                      unsigned long expires, u32 flags)
 {
        struct fib6_config cfg = {
-               .fc_table = RT6_TABLE_PREFIX,
+               .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
                .fc_metric = IP6_RT_PRIO_ADDRCONF,
                .fc_ifindex = dev->ifindex,
                .fc_expires = expires,
@@ -2179,8 +2180,9 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
        struct fib6_node *fn;
        struct rt6_info *rt = NULL;
        struct fib6_table *table;
+       u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
 
-       table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX);
+       table = fib6_get_table(dev_net(dev), tb_id);
        if (!table)
                return NULL;
 
@@ -2211,7 +2213,7 @@ out:
 static void addrconf_add_mroute(struct net_device *dev)
 {
        struct fib6_config cfg = {
-               .fc_table = RT6_TABLE_LOCAL,
+               .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
                .fc_metric = IP6_RT_PRIO_ADDRCONF,
                .fc_ifindex = dev->ifindex,
                .fc_dst_len = 8,
@@ -3029,6 +3031,10 @@ static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
 {
        struct in6_addr addr;
 
+       /* no link local addresses on L3 master devices */
+       if (netif_is_l3_master(idev->dev))
+               return;
+
        ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
 
        if (idev->addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY) {
@@ -3119,6 +3125,8 @@ static void addrconf_gre_config(struct net_device *dev)
        }
 
        addrconf_addr_gen(idev, true);
+       if (dev->flags & IFF_POINTOPOINT)
+               addrconf_add_mroute(dev);
 }
 #endif
 
@@ -4780,7 +4788,8 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-static size_t inet6_get_link_af_size(const struct net_device *dev)
+static size_t inet6_get_link_af_size(const struct net_device *dev,
+                                    u32 ext_filter_mask)
 {
        if (!__in6_dev_get(dev))
                return 0;
index 9f777ec59a59d24566d87643889a8c591dd52637..ed33abf57abd7d7ec71685a7180cf88ec132626c 100644 (file)
@@ -32,6 +32,7 @@ struct fib6_rule {
 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
                                   int flags, pol_lookup_t lookup)
 {
+       struct rt6_info *rt;
        struct fib_lookup_arg arg = {
                .lookup_ptr = lookup,
                .flags = FIB_LOOKUP_NOREF,
@@ -40,11 +41,21 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
        fib_rules_lookup(net->ipv6.fib6_rules_ops,
                         flowi6_to_flowi(fl6), flags, &arg);
 
-       if (arg.result)
-               return arg.result;
+       rt = arg.result;
 
-       dst_hold(&net->ipv6.ip6_null_entry->dst);
-       return &net->ipv6.ip6_null_entry->dst;
+       if (!rt) {
+               dst_hold(&net->ipv6.ip6_null_entry->dst);
+               return &net->ipv6.ip6_null_entry->dst;
+       }
+
+       if (rt->rt6i_flags & RTF_REJECT &&
+           rt->dst.error == -EAGAIN) {
+               ip6_rt_put(rt);
+               rt = net->ipv6.ip6_null_entry;
+               dst_hold(&rt->dst);
+       }
+
+       return &rt->dst;
 }
 
 static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
index 6c2b2132c8d328e4d947c3b0b8216ea40f582f90..36c5a98b04727b220e9ea77e00d96410d4bb3f74 100644 (file)
@@ -68,6 +68,7 @@
 #include <net/xfrm.h>
 #include <net/inet_common.h>
 #include <net/dsfield.h>
+#include <net/l3mdev.h>
 
 #include <asm/uaccess.h>
 
@@ -452,7 +453,8 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
         *      and anycast addresses will be checked later.
         */
        if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
-               net_dbg_ratelimited("icmp6_send: addr_any/mcast source\n");
+               net_dbg_ratelimited("icmp6_send: addr_any/mcast source [%pI6c > %pI6c]\n",
+                                   &hdr->saddr, &hdr->daddr);
                return;
        }
 
@@ -460,7 +462,8 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
         *      Never answer to a ICMP packet.
         */
        if (is_ineligible(skb)) {
-               net_dbg_ratelimited("icmp6_send: no reply to icmp error\n");
+               net_dbg_ratelimited("icmp6_send: no reply to icmp error [%pI6c > %pI6c]\n",
+                                   &hdr->saddr, &hdr->daddr);
                return;
        }
 
@@ -496,6 +499,9 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        else if (!fl6.flowi6_oif)
                fl6.flowi6_oif = np->ucast_oif;
 
+       if (!fl6.flowi6_oif)
+               fl6.flowi6_oif = l3mdev_master_ifindex(skb->dev);
+
        dst = icmpv6_route_lookup(net, skb, sk, &fl6);
        if (IS_ERR(dst))
                goto out;
@@ -509,7 +515,8 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        len = skb->len - msg.offset;
        len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(struct icmp6hdr));
        if (len < 0) {
-               net_dbg_ratelimited("icmp: len problem\n");
+               net_dbg_ratelimited("icmp: len problem [%pI6c > %pI6c]\n",
+                                   &hdr->saddr, &hdr->daddr);
                goto out_dst_release;
        }
 
@@ -575,7 +582,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        fl6.daddr = ipv6_hdr(skb)->saddr;
        if (saddr)
                fl6.saddr = *saddr;
-       fl6.flowi6_oif = skb->dev->ifindex;
+       fl6.flowi6_oif = l3mdev_fib_oif(skb->dev);
        fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
        fl6.flowi6_mark = mark;
        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
@@ -781,7 +788,8 @@ static int icmpv6_rcv(struct sk_buff *skb)
                if (type & ICMPV6_INFOMSG_MASK)
                        break;
 
-               net_dbg_ratelimited("icmpv6: msg of unknown type\n");
+               net_dbg_ratelimited("icmpv6: msg of unknown type [%pI6c > %pI6c]\n",
+                                   saddr, daddr);
 
                /*
                 * error of unknown type.
index 7d2e0023c72dbe2e466b35ffb1c6f0c0446af6da..0c7e276c230e4ab2cd7c7ab0688e84920a41f69b 100644 (file)
@@ -264,6 +264,7 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
 
        return NULL;
 }
+EXPORT_SYMBOL_GPL(fib6_get_table);
 
 static void __net_init fib6_tables_init(struct net *net)
 {
@@ -285,7 +286,17 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
                                   int flags, pol_lookup_t lookup)
 {
-       return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
+       struct rt6_info *rt;
+
+       rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
+       if (rt->rt6i_flags & RTF_REJECT &&
+           rt->dst.error == -EAGAIN) {
+               ip6_rt_put(rt);
+               rt = net->ipv6.ip6_null_entry;
+               dst_hold(&rt->dst);
+       }
+
+       return &rt->dst;
 }
 
 static void __net_init fib6_tables_init(struct net *net)
index 08b62047c67f311ca808533cb7a83b5caab0cfc8..eeca943f12dc083e195dde804c764c8732d11b9e 100644 (file)
@@ -264,6 +264,9 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
        struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
        int err = -ENOSYS;
 
+       if (skb->encapsulation)
+               skb_set_inner_network_header(skb, nhoff);
+
        iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
 
        rcu_read_lock();
@@ -280,6 +283,13 @@ out_unlock:
        return err;
 }
 
+static int sit_gro_complete(struct sk_buff *skb, int nhoff)
+{
+       skb->encapsulation = 1;
+       skb_shinfo(skb)->gso_type |= SKB_GSO_SIT;
+       return ipv6_gro_complete(skb, nhoff);
+}
+
 static struct packet_offload ipv6_packet_offload __read_mostly = {
        .type = cpu_to_be16(ETH_P_IPV6),
        .callbacks = {
@@ -292,6 +302,8 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
 static const struct net_offload sit_offload = {
        .callbacks = {
                .gso_segment    = ipv6_gso_segment,
+               .gro_receive    = ipv6_gro_receive,
+               .gro_complete   = sit_gro_complete,
        },
 };
 
index 32583b507c2ee7613cf9d27030d476d602d866bc..c2650688aca757708cb9f8877ed82101036ac5b5 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <linux/errno.h>
 #include <linux/kernel.h>
+#include <linux/overflow-arith.h>
 #include <linux/string.h>
 #include <linux/socket.h>
 #include <linux/net.h>
@@ -55,6 +56,7 @@
 #include <net/xfrm.h>
 #include <net/checksum.h>
 #include <linux/mroute6.h>
+#include <net/l3mdev.h>
 
 static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
@@ -388,6 +390,9 @@ int ip6_forward(struct sk_buff *skb)
        if (skb->pkt_type != PACKET_HOST)
                goto drop;
 
+       if (unlikely(skb->sk))
+               goto drop;
+
        if (skb_warn_if_lro(skb))
                goto drop;
 
@@ -592,7 +597,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                if (np->frag_size)
                        mtu = np->frag_size;
        }
-       mtu -= hlen + sizeof(struct frag_hdr);
+
+       if (overflow_usub(mtu, hlen + sizeof(struct frag_hdr), &mtu) ||
+           mtu <= 7)
+               goto fail_toobig;
 
        frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
                                    &ipv6_hdr(skb)->saddr);
@@ -885,7 +893,8 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
 #ifdef CONFIG_IPV6_SUBTREES
            ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
 #endif
-           (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
+          (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
+             (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
                dst_release(dst);
                dst = NULL;
        }
@@ -1037,7 +1046,7 @@ struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
        if (final_dst)
                fl6->daddr = *final_dst;
        if (!fl6->flowi6_oif)
-               fl6->flowi6_oif = dst->dev->ifindex;
+               fl6->flowi6_oif = l3mdev_fib_oif(dst->dev);
 
        return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
 }
index b18012f9f9fcba0b7d1f59b2a156f0d35b28b63a..3e0f855e1bead049064a284494eda378f85ae47e 100644 (file)
@@ -67,6 +67,7 @@
 #include <net/flow.h>
 #include <net/ip6_checksum.h>
 #include <net/inet_common.h>
+#include <net/l3mdev.h>
 #include <linux/proc_fs.h>
 
 #include <linux/netfilter.h>
@@ -147,6 +148,7 @@ struct neigh_table nd_tbl = {
        .gc_thresh2 =    512,
        .gc_thresh3 =   1024,
 };
+EXPORT_SYMBOL_GPL(nd_tbl);
 
 static void ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data)
 {
@@ -441,8 +443,11 @@ static void ndisc_send_skb(struct sk_buff *skb,
 
        if (!dst) {
                struct flowi6 fl6;
+               int oif = l3mdev_fib_oif(skb->dev);
 
-               icmpv6_flow_init(sk, &fl6, type, saddr, daddr, skb->dev->ifindex);
+               icmpv6_flow_init(sk, &fl6, type, saddr, daddr, oif);
+               if (oif != skb->dev->ifindex)
+                       fl6.flowi6_flags |= FLOWI_FLAG_L3MDEV_SRC;
                dst = icmp6_dst_alloc(skb->dev, &fl6);
                if (IS_ERR(dst)) {
                        kfree_skb(skb);
@@ -766,7 +771,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
 
        ifp = ipv6_get_ifaddr(dev_net(dev), &msg->target, dev, 1);
        if (ifp) {
-
+have_ifp:
                if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) {
                        if (dad) {
                                /*
@@ -792,6 +797,18 @@ static void ndisc_recv_ns(struct sk_buff *skb)
        } else {
                struct net *net = dev_net(dev);
 
+               /* perhaps an address on the master device */
+               if (netif_is_l3_slave(dev)) {
+                       struct net_device *mdev;
+
+                       mdev = netdev_master_upper_dev_get_rcu(dev);
+                       if (mdev) {
+                               ifp = ipv6_get_ifaddr(net, &msg->target, mdev, 1);
+                               if (ifp)
+                                       goto have_ifp;
+                       }
+               }
+
                idev = in6_dev_get(dev);
                if (!idev) {
                        /* XXX: count this drop? */
@@ -1483,6 +1500,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
        struct flowi6 fl6;
        int rd_len;
        u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
+       int oif = l3mdev_fib_oif(dev);
        bool ret;
 
        if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
@@ -1499,7 +1517,10 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
        }
 
        icmpv6_flow_init(sk, &fl6, NDISC_REDIRECT,
-                        &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex);
+                        &saddr_buf, &ipv6_hdr(skb)->saddr, oif);
+
+       if (oif != skb->dev->ifindex)
+               fl6.flowi6_flags |= FLOWI_FLAG_L3MDEV_SRC;
 
        dst = ip6_route_output(net, NULL, &fl6);
        if (dst->error) {
index 96833e4b31939a191eaf7de297ac438d4aa41fa4..f6a024e141e595541009cb24c172e0c52a8a879f 100644 (file)
@@ -58,6 +58,7 @@ endif # NF_TABLES
 
 config NF_DUP_IPV6
        tristate "Netfilter IPv6 packet duplication to alternate destination"
+       depends on !NF_CONNTRACK || NF_CONNTRACK
        help
          This option enables the nf_dup_ipv6 core, which duplicates an IPv6
          packet to be rerouted to another destination.
index 80e3bd72b715fc628290298ef92ecf23b5ea818f..99425cf2819b83ceb33d49af65284cb16fd076ec 100644 (file)
@@ -117,7 +117,7 @@ ip6_packet_match(const struct sk_buff *skb,
        if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
                dprintf("VIA in mismatch (%s vs %s).%s\n",
                        indev, ip6info->iniface,
-                       ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
+                       ip6info->invflags & IP6T_INV_VIA_IN ? " (INV)" : "");
                return false;
        }
 
@@ -126,14 +126,14 @@ ip6_packet_match(const struct sk_buff *skb,
        if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
                dprintf("VIA out mismatch (%s vs %s).%s\n",
                        outdev, ip6info->outiface,
-                       ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
+                       ip6info->invflags & IP6T_INV_VIA_OUT ? " (INV)" : "");
                return false;
        }
 
 /* ... might want to do something with class and flowlabel here ... */
 
        /* look for the desired protocol header */
-       if((ip6info->flags & IP6T_F_PROTO)) {
+       if (ip6info->flags & IP6T_F_PROTO) {
                int protohdr;
                unsigned short _frag_off;
 
@@ -151,9 +151,9 @@ ip6_packet_match(const struct sk_buff *skb,
                                ip6info->proto);
 
                if (ip6info->proto == protohdr) {
-                       if(ip6info->invflags & IP6T_INV_PROTO) {
+                       if (ip6info->invflags & IP6T_INV_PROTO)
                                return false;
-                       }
+
                        return true;
                }
 
@@ -443,8 +443,8 @@ ip6t_do_table(struct sk_buff *skb,
                        break;
        } while (!acpar.hotdrop);
 
-       xt_write_recseq_end(addend);
-       local_bh_enable();
+       xt_write_recseq_end(addend);
+       local_bh_enable();
 
 #ifdef DEBUG_ALLOW_ALL
        return NF_ACCEPT;
@@ -561,7 +561,7 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                pos = newpos;
                        }
                }
-               next:
+next:
                duprintf("Finished chain %u\n", hook);
        }
        return 1;
@@ -816,7 +816,7 @@ static void cleanup_entry(struct ip6t_entry *e, struct net *net)
    newinfo) */
 static int
 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
-                const struct ip6t_replace *repl)
+               const struct ip6t_replace *repl)
 {
        struct ip6t_entry *iter;
        unsigned int i;
@@ -1090,7 +1090,7 @@ static int compat_table_info(const struct xt_table_info *info,
 #endif
 
 static int get_info(struct net *net, void __user *user,
-                    const int *len, int compat)
+                   const int *len, int compat)
 {
        char name[XT_TABLE_MAXNAMELEN];
        struct xt_table *t;
@@ -1152,7 +1152,7 @@ static int get_info(struct net *net, void __user *user,
 
 static int
 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
-            const int *len)
+           const int *len)
 {
        int ret;
        struct ip6t_get_entries get;
index a10a2a9e9f94129e2d4a92b0c58012eb346485b3..3deed5860a42510078a2377260819aa269d0bc47 100644 (file)
@@ -244,7 +244,7 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
        synproxy_build_options(nth, opts);
 
        synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
-                         niph, nth, tcp_hdr_size);
+                         niph, nth, tcp_hdr_size);
 }
 
 static bool
@@ -458,14 +458,12 @@ static struct xt_target synproxy_tg6_reg __read_mostly = {
 static struct nf_hook_ops ipv6_synproxy_ops[] __read_mostly = {
        {
                .hook           = ipv6_synproxy_hook,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_LOCAL_IN,
                .priority       = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
        },
        {
                .hook           = ipv6_synproxy_hook,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_POST_ROUTING,
                .priority       = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
index abea175d5853212421c4d8919f0de6794476534a..de2a10a565f549bc4ae183d1c5af31d29fb106e4 100644 (file)
@@ -70,7 +70,6 @@ static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = {
        /* Before packet filtering, change destination */
        {
                .hook           = ip6table_nat_in,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_PRE_ROUTING,
                .priority       = NF_IP6_PRI_NAT_DST,
@@ -78,7 +77,6 @@ static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = {
        /* After packet filtering, change source */
        {
                .hook           = ip6table_nat_out,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_POST_ROUTING,
                .priority       = NF_IP6_PRI_NAT_SRC,
@@ -86,7 +84,6 @@ static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = {
        /* Before packet filtering, change destination */
        {
                .hook           = ip6table_nat_local_fn,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_LOCAL_OUT,
                .priority       = NF_IP6_PRI_NAT_DST,
@@ -94,7 +91,6 @@ static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = {
        /* After packet filtering, change source */
        {
                .hook           = ip6table_nat_fn,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_LOCAL_IN,
                .priority       = NF_IP6_PRI_NAT_SRC,
index dd83ad42f8f65f18b0081a5e18da55d6c4289ed6..1aa5848764a78a1691e8bbb9af4240eb1733c73e 100644 (file)
@@ -187,42 +187,36 @@ static unsigned int ipv6_conntrack_local(void *priv,
 static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
        {
                .hook           = ipv6_conntrack_in,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_PRE_ROUTING,
                .priority       = NF_IP6_PRI_CONNTRACK,
        },
        {
                .hook           = ipv6_conntrack_local,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_LOCAL_OUT,
                .priority       = NF_IP6_PRI_CONNTRACK,
        },
        {
                .hook           = ipv6_helper,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_POST_ROUTING,
                .priority       = NF_IP6_PRI_CONNTRACK_HELPER,
        },
        {
                .hook           = ipv6_confirm,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_POST_ROUTING,
                .priority       = NF_IP6_PRI_LAST,
        },
        {
                .hook           = ipv6_helper,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_LOCAL_IN,
                .priority       = NF_IP6_PRI_CONNTRACK_HELPER,
        },
        {
                .hook           = ipv6_confirm,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_LOCAL_IN,
                .priority       = NF_IP6_PRI_LAST-1,
index d3b797446cea5fa2a87d50c45366e702c640f730..660bc10c7a9c178ae50c12e0ba6245cb7bb6202d 100644 (file)
@@ -57,12 +57,12 @@ static const u_int8_t invmap[] = {
        [ICMPV6_ECHO_REQUEST - 128]     = ICMPV6_ECHO_REPLY + 1,
        [ICMPV6_ECHO_REPLY - 128]       = ICMPV6_ECHO_REQUEST + 1,
        [ICMPV6_NI_QUERY - 128]         = ICMPV6_NI_REPLY + 1,
-       [ICMPV6_NI_REPLY - 128]         = ICMPV6_NI_QUERY +1
+       [ICMPV6_NI_REPLY - 128]         = ICMPV6_NI_QUERY + 1
 };
 
 static const u_int8_t noct_valid_new[] = {
        [ICMPV6_MGM_QUERY - 130] = 1,
-       [ICMPV6_MGM_REPORT -130] = 1,
+       [ICMPV6_MGM_REPORT - 130] = 1,
        [ICMPV6_MGM_REDUCTION - 130] = 1,
        [NDISC_ROUTER_SOLICITATION - 130] = 1,
        [NDISC_ROUTER_ADVERTISEMENT - 130] = 1,
index 2fb86a99bf5f1325cb97e1bd75c5870b38f64116..056f5d4a852aa1d8014c439378e2b2281b3622e4 100644 (file)
@@ -59,7 +59,7 @@ struct nf_ct_frag6_skb_cb
        struct sk_buff          *orig;
 };
 
-#define NFCT_FRAG6_CB(skb)     ((struct nf_ct_frag6_skb_cb*)((skb)->cb))
+#define NFCT_FRAG6_CB(skb)     ((struct nf_ct_frag6_skb_cb *)((skb)->cb))
 
 static struct inet_frags nf_frags;
 
@@ -445,7 +445,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
        skb_reset_transport_header(head);
        skb_push(head, head->data - skb_network_header(head));
 
-       for (fp=head->next; fp; fp = fp->next) {
+       for (fp = head->next; fp; fp = fp->next) {
                head->data_len += fp->len;
                head->len += fp->len;
                if (head->ip_summed != fp->ip_summed)
index 5173a89a238ef37e2862b7e91abafb84700fc191..4fdbed5ebfb6bbe92136fc545533914a40be7a2a 100644 (file)
@@ -85,14 +85,12 @@ static unsigned int ipv6_defrag(void *priv,
 static struct nf_hook_ops ipv6_defrag_ops[] = {
        {
                .hook           = ipv6_defrag,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_PRE_ROUTING,
                .priority       = NF_IP6_PRI_CONNTRACK_DEFRAG,
        },
        {
                .hook           = ipv6_defrag,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_LOCAL_OUT,
                .priority       = NF_IP6_PRI_CONNTRACK_DEFRAG,
index 7309e475f68b405d040e53069b123663914ffd7b..e0f922b777e3d9333ca4723e422182d083c6afba 100644 (file)
@@ -26,7 +26,7 @@ const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
        int tcphoff;
 
        proto = oip6h->nexthdr;
-       tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data),
+       tcphoff = ipv6_skip_exthdr(oldskb, ((u8 *)(oip6h + 1) - oldskb->data),
                                   &proto, &frag_off);
 
        if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
@@ -224,7 +224,7 @@ static bool reject6_csum_ok(struct sk_buff *skb, int hook)
                return true;
 
        proto = ip6h->nexthdr;
-       thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
+       thoff = ipv6_skip_exthdr(skb, ((u8 *)(ip6h + 1) - skb->data), &proto, &fo);
 
        if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
                return false;
index 9df75bd7c94a2225635b5bfeb84f523ed25e129f..71d995ff3108fe001fc2318aa0e7c76c3cd9b1f8 100644 (file)
@@ -61,11 +61,11 @@ static const struct nf_chain_type nft_chain_route_ipv6 = {
        .name           = "route",
        .type           = NFT_CHAIN_T_ROUTE,
        .family         = NFPROTO_IPV6,
-        .owner         = THIS_MODULE,
+       .owner          = THIS_MODULE,
        .hook_mask      = (1 << NF_INET_LOCAL_OUT),
        .hooks          = {
-                [NF_INET_LOCAL_OUT]    = nf_route_table_hook,
-        },
+               [NF_INET_LOCAL_OUT]     = nf_route_table_hook,
+       },
 };
 
 static int __init nft_chain_route_init(void)
index db5b54ad59125f0fd2fddf1d7a06078633d294fb..2701cb3d88e9372cd226d0f5f58fbdc3d9f7582d 100644 (file)
@@ -61,6 +61,7 @@
 #include <net/nexthop.h>
 #include <net/lwtunnel.h>
 #include <net/ip_tunnels.h>
+#include <net/l3mdev.h>
 
 #include <asm/uaccess.h>
 
@@ -142,6 +143,9 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
        struct net_device *loopback_dev = net->loopback_dev;
        int cpu;
 
+       if (dev == loopback_dev)
+               return;
+
        for_each_possible_cpu(cpu) {
                struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
                struct rt6_info *rt;
@@ -151,14 +155,12 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
                        struct inet6_dev *rt_idev = rt->rt6i_idev;
                        struct net_device *rt_dev = rt->dst.dev;
 
-                       if (rt_idev && (rt_idev->dev == dev || !dev) &&
-                           rt_idev->dev != loopback_dev) {
+                       if (rt_idev->dev == dev) {
                                rt->rt6i_idev = in6_dev_get(loopback_dev);
                                in6_dev_put(rt_idev);
                        }
 
-                       if (rt_dev && (rt_dev == dev || !dev) &&
-                           rt_dev != loopback_dev) {
+                       if (rt_dev == dev) {
                                rt->dst.dev = loopback_dev;
                                dev_hold(rt->dst.dev);
                                dev_put(rt_dev);
@@ -247,12 +249,6 @@ static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
 {
 }
 
-static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
-                                        unsigned long old)
-{
-       return NULL;
-}
-
 static struct dst_ops ip6_dst_blackhole_ops = {
        .family                 =       AF_INET6,
        .destroy                =       ip6_dst_destroy,
@@ -261,7 +257,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
        .default_advmss         =       ip6_default_advmss,
        .update_pmtu            =       ip6_rt_blackhole_update_pmtu,
        .redirect               =       ip6_rt_blackhole_redirect,
-       .cow_metrics            =       ip6_rt_blackhole_cow_metrics,
+       .cow_metrics            =       dst_cow_metrics_generic,
        .neigh_lookup           =       ip6_neigh_lookup,
 };
 
@@ -318,6 +314,15 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
 
 #endif
 
+static void rt6_info_init(struct rt6_info *rt)
+{
+       struct dst_entry *dst = &rt->dst;
+
+       memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
+       INIT_LIST_HEAD(&rt->rt6i_siblings);
+       INIT_LIST_HEAD(&rt->rt6i_uncached);
+}
+
 /* allocate dst with ip6_dst_ops */
 static struct rt6_info *__ip6_dst_alloc(struct net *net,
                                        struct net_device *dev,
@@ -326,13 +331,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
        struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
                                        0, DST_OBSOLETE_FORCE_CHK, flags);
 
-       if (rt) {
-               struct dst_entry *dst = &rt->dst;
+       if (rt)
+               rt6_info_init(rt);
 
-               memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
-               INIT_LIST_HEAD(&rt->rt6i_siblings);
-               INIT_LIST_HEAD(&rt->rt6i_uncached);
-       }
        return rt;
 }
 
@@ -1044,6 +1045,9 @@ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
        fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
        saved_fn = fn;
 
+       if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
+               oif = 0;
+
 redo_rt6_select:
        rt = rt6_select(fn, oif, strict);
        if (rt->rt6i_nsiblings)
@@ -1141,7 +1145,7 @@ void ip6_route_input(struct sk_buff *skb)
        int flags = RT6_LOOKUP_F_HAS_SADDR;
        struct ip_tunnel_info *tun_info;
        struct flowi6 fl6 = {
-               .flowi6_iif = skb->dev->ifindex,
+               .flowi6_iif = l3mdev_fib_oif(skb->dev),
                .daddr = iph->daddr,
                .saddr = iph->saddr,
                .flowlabel = ip6_flowinfo(iph),
@@ -1165,15 +1169,22 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table
 struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
                                    struct flowi6 *fl6)
 {
+       struct dst_entry *dst;
        int flags = 0;
+       bool any_src;
+
+       dst = l3mdev_rt6_dst_by_oif(net, fl6);
+       if (dst)
+               return dst;
 
        fl6->flowi6_iif = LOOPBACK_IFINDEX;
 
+       any_src = ipv6_addr_any(&fl6->saddr);
        if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
-           fl6->flowi6_oif)
+           (fl6->flowi6_oif && any_src))
                flags |= RT6_LOOKUP_F_IFACE;
 
-       if (!ipv6_addr_any(&fl6->saddr))
+       if (!any_src)
                flags |= RT6_LOOKUP_F_HAS_SADDR;
        else if (sk)
                flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
@@ -1189,24 +1200,20 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
 
        rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
        if (rt) {
-               new = &rt->dst;
-
-               memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
+               rt6_info_init(rt);
 
+               new = &rt->dst;
                new->__use = 1;
                new->input = dst_discard;
                new->output = dst_discard_out;
 
-               if (dst_metrics_read_only(&ort->dst))
-                       new->_metrics = ort->dst._metrics;
-               else
-                       dst_copy_metrics(new, &ort->dst);
+               dst_copy_metrics(new, &ort->dst);
                rt->rt6i_idev = ort->rt6i_idev;
                if (rt->rt6i_idev)
                        in6_dev_hold(rt->rt6i_idev);
 
                rt->rt6i_gateway = ort->rt6i_gateway;
-               rt->rt6i_flags = ort->rt6i_flags;
+               rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
                rt->rt6i_metric = 0;
 
                memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
@@ -2263,7 +2270,6 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
                                           unsigned int pref)
 {
        struct fib6_config cfg = {
-               .fc_table       = RT6_TABLE_INFO,
                .fc_metric      = IP6_RT_PRIO_USER,
                .fc_ifindex     = ifindex,
                .fc_dst_len     = prefixlen,
@@ -2274,6 +2280,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
                .fc_nlinfo.nl_net = net,
        };
 
+       cfg.fc_table = l3mdev_fib_table_by_index(net, ifindex) ? : RT6_TABLE_INFO;
        cfg.fc_dst = *prefix;
        cfg.fc_gateway = *gwaddr;
 
@@ -2314,7 +2321,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
                                     unsigned int pref)
 {
        struct fib6_config cfg = {
-               .fc_table       = RT6_TABLE_DFLT,
+               .fc_table       = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
                .fc_metric      = IP6_RT_PRIO_USER,
                .fc_ifindex     = dev->ifindex,
                .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
@@ -2361,7 +2368,8 @@ static void rtmsg_to_fib6_config(struct net *net,
 {
        memset(cfg, 0, sizeof(*cfg));
 
-       cfg->fc_table = RT6_TABLE_MAIN;
+       cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
+                        : RT6_TABLE_MAIN;
        cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
        cfg->fc_metric = rtmsg->rtmsg_metric;
        cfg->fc_expires = rtmsg->rtmsg_info;
@@ -2470,6 +2478,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
                                    const struct in6_addr *addr,
                                    bool anycast)
 {
+       u32 tb_id;
        struct net *net = dev_net(idev->dev);
        struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
                                            DST_NOCOUNT);
@@ -2492,7 +2501,8 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
        rt->rt6i_gateway  = *addr;
        rt->rt6i_dst.addr = *addr;
        rt->rt6i_dst.plen = 128;
-       rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
+       tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
+       rt->rt6i_table = fib6_get_table(net, tb_id);
        rt->dst.flags |= DST_NOCACHE;
 
        atomic_set(&rt->dst.__refcnt, 1);
@@ -2597,7 +2607,8 @@ void rt6_ifdown(struct net *net, struct net_device *dev)
 
        fib6_clean_all(net, fib6_ifdown, &adn);
        icmp6_clean_all(fib6_ifdown, &adn);
-       rt6_uncached_list_flush_dev(net, dev);
+       if (dev)
+               rt6_uncached_list_flush_dev(net, dev);
 }
 
 struct rt6_mtu_change_arg {
@@ -3254,6 +3265,11 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
        } else {
                fl6.flowi6_oif = oif;
 
+               if (netif_index_is_l3_master(net, oif)) {
+                       fl6.flowi6_flags = FLOWI_FLAG_L3MDEV_SRC |
+                                          FLOWI_FLAG_SKIP_NH_OIF;
+               }
+
                rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
        }
 
index 2887c8474b650468a76919fe9d25aff43361e1ba..714bc5ad096e9beda5226c4caf8ddd4efae038bf 100644 (file)
@@ -437,7 +437,6 @@ out:
 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
                              struct flowi *fl,
                              struct request_sock *req,
-                             u16 queue_mapping,
                              struct tcp_fastopen_cookie *foc,
                              bool attach_req)
 {
@@ -462,7 +461,6 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
                if (np->repflow && ireq->pktopts)
                        fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 
-               skb_set_queue_mapping(skb, queue_mapping);
                err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
                err = net_xmit_eval(err);
        }
@@ -967,7 +965,9 @@ drop:
 
 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
                                         struct request_sock *req,
-                                        struct dst_entry *dst)
+                                        struct dst_entry *dst,
+                                        struct request_sock *req_unhash,
+                                        bool *own_req)
 {
        struct inet_request_sock *ireq;
        struct ipv6_pinfo *newnp;
@@ -986,7 +986,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
                 *      v6 mapped
                 */
 
-               newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
+               newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
+                                            req_unhash, own_req);
 
                if (!newsk)
                        return NULL;
@@ -1147,7 +1148,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
                tcp_done(newsk);
                goto out;
        }
-       __inet_hash(newsk, NULL);
+       *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
 
        return newsk;
 
@@ -1363,6 +1364,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
        th = tcp_hdr(skb);
        hdr = ipv6_hdr(skb);
 
+lookup:
        sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
                                inet6_iif(skb));
        if (!sk)
@@ -1382,8 +1384,12 @@ process:
                        reqsk_put(req);
                        goto discard_it;
                }
-               if (sk->sk_state == TCP_LISTEN)
+               if (likely(sk->sk_state == TCP_LISTEN)) {
                        nsk = tcp_check_req(sk, skb, req, false);
+               } else {
+                       inet_csk_reqsk_queue_drop_and_put(sk, req);
+                       goto lookup;
+               }
                if (!nsk) {
                        reqsk_put(req);
                        goto discard_it;
index 9db067a11b525c4bb026fa1d352c05a591e60124..4d09ce6fa90e666bfdeda09cbdc8c7b0cb8b5824 100644 (file)
@@ -79,6 +79,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
 
        if (!skb->ignore_df && skb->len > mtu) {
                skb->dev = dst->dev;
+               skb->protocol = htons(ETH_P_IPV6);
 
                if (xfrm6_local_dontfrag(skb))
                        xfrm6_local_rxpmtu(skb, mtu);
@@ -143,6 +144,7 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
        struct dst_entry *dst = skb_dst(skb);
        struct xfrm_state *x = dst->xfrm;
        int mtu;
+       bool toobig;
 
 #ifdef CONFIG_NETFILTER
        if (!x) {
@@ -151,25 +153,29 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
        }
 #endif
 
+       if (x->props.mode != XFRM_MODE_TUNNEL)
+               goto skip_frag;
+
        if (skb->protocol == htons(ETH_P_IPV6))
                mtu = ip6_skb_dst_mtu(skb);
        else
                mtu = dst_mtu(skb_dst(skb));
 
-       if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
+       toobig = skb->len > mtu && !skb_is_gso(skb);
+
+       if (toobig && xfrm6_local_dontfrag(skb)) {
                xfrm6_local_rxpmtu(skb, mtu);
                return -EMSGSIZE;
-       } else if (!skb->ignore_df && skb->len > mtu && skb->sk) {
+       } else if (!skb->ignore_df && toobig && skb->sk) {
                xfrm_local_error(skb, mtu);
                return -EMSGSIZE;
        }
 
-       if (x->props.mode == XFRM_MODE_TUNNEL &&
-           ((skb->len > mtu && !skb_is_gso(skb)) ||
-               dst_allfrag(skb_dst(skb)))) {
+       if (toobig || dst_allfrag(skb_dst(skb)))
                return ip6_fragment(net, sk, skb,
                                    __xfrm6_output_finish);
-       }
+
+skip_frag:
        return x->outer_mode->afinfo->output_finish(sk, skb);
 }
 
index 08c9c93f352737ba21eebad04f6e8b3f268dbdde..2cc5840f943d566ad8d2012d95a8ccac03dd4927 100644 (file)
@@ -177,7 +177,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
                        return;
 
                case IPPROTO_ICMPV6:
-                       if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
+                       if (!onlyproto && (nh + offset + 2 < skb->data ||
+                           pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
                                u8 *icmp;
 
                                nh = skb_network_header(skb);
@@ -191,7 +192,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
 #if IS_ENABLED(CONFIG_IPV6_MIP6)
                case IPPROTO_MH:
                        offset += ipv6_optlen(exthdr);
-                       if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
+                       if (!onlyproto && (nh + offset + 3 < skb->data ||
+                           pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
                                struct ip6_mh *mh;
 
                                nh = skb_network_header(skb);
index a26c401ef4a4431b2957d5b46c05c0c2a35c90bd..43964594aa12d9864c00a3b778d77060084e6a14 100644 (file)
@@ -1839,7 +1839,7 @@ static void *irlmp_seq_hb_idx(struct irlmp_iter_state *iter, loff_t *off)
        for (element = hashbin_get_first(iter->hashbin);
             element != NULL;
             element = hashbin_get_next(iter->hashbin)) {
-               if (!off || *off-- == 0) {
+               if (!off || (*off)-- == 0) {
                        /* NB: hashbin left locked */
                        return element;
                }
index 83a70688784b8449603e13f32ec26ff6fce06639..f9c9ecb0cdd3b3eea618538fda2e884583f9bc09 100644 (file)
@@ -261,7 +261,7 @@ static int pfkey_broadcast(struct sk_buff *skb,
 
                err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
 
-               /* Error is cleare after succecful sending to at least one
+               /* Error is cleared after successful sending to at least one
                 * registered KM */
                if ((broadcast_flags & BROADCAST_REGISTERED) && err)
                        err = err2;
index 3636b45440ab40ecc3c618a916b1305bacfe1bc7..4d2aaebd4f97d8692758263e3d9759d4300abdb6 100644 (file)
@@ -151,7 +151,7 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
 
        for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) {
                if (test_bit(i, local->hw.flags))
-                       pos += scnprintf(pos, end - pos, "%s",
+                       pos += scnprintf(pos, end - pos, "%s\n",
                                         hw_flag_names[i]);
        }
 
index 7d14bbf8682bad30a19a4a6dd6edb6b7dcb7ca0d..5bad05e9af90fc76201f4bd9a54b931ffd5c61bc 100644 (file)
@@ -101,6 +101,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
         * when it wakes up for the next time.
         */
        set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT);
+       ieee80211_clear_fast_xmit(sta);
 
        /*
         * This code races in the following way:
index 09e38a860a59ec59a445d404dfb1ae0b55c6cce0..bdc224d5053ae3478a01565754ec6942c2d197a6 100644 (file)
@@ -1218,8 +1218,10 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
 
        if (!tx->sta)
                info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
-       else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT))
+       else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) {
                info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
+               ieee80211_check_fast_xmit(tx->sta);
+       }
 
        info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
 
@@ -2451,7 +2453,8 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
 
        if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
            test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
-           test_sta_flag(sta, WLAN_STA_PS_DELIVER))
+           test_sta_flag(sta, WLAN_STA_PS_DELIVER) ||
+           test_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT))
                goto out;
 
        if (sdata->noack_map)
index 7799d3c41fe2aa2f9e507579464bcacbcda7d645..a13d02b7cee47401357f1fd7ff5e0dc74becc9ad 100644 (file)
@@ -55,7 +55,7 @@ void mac802154_llsec_destroy(struct mac802154_llsec *sec)
 
                msl = container_of(sl, struct mac802154_llsec_seclevel, level);
                list_del(&sl->list);
-               kfree(msl);
+               kzfree(msl);
        }
 
        list_for_each_entry_safe(dev, dn, &sec->table.devices, list) {
@@ -72,7 +72,7 @@ void mac802154_llsec_destroy(struct mac802154_llsec *sec)
                mkey = container_of(key->key, struct mac802154_llsec_key, key);
                list_del(&key->list);
                llsec_key_put(mkey);
-               kfree(key);
+               kzfree(key);
        }
 }
 
@@ -161,7 +161,7 @@ err_tfm:
                if (key->tfm[i])
                        crypto_free_aead(key->tfm[i]);
 
-       kfree(key);
+       kzfree(key);
        return NULL;
 }
 
@@ -176,7 +176,7 @@ static void llsec_key_release(struct kref *ref)
                crypto_free_aead(key->tfm[i]);
 
        crypto_free_blkcipher(key->tfm0);
-       kfree(key);
+       kzfree(key);
 }
 
 static struct mac802154_llsec_key*
@@ -267,7 +267,7 @@ int mac802154_llsec_key_add(struct mac802154_llsec *sec,
        return 0;
 
 fail:
-       kfree(new);
+       kzfree(new);
        return -ENOMEM;
 }
 
@@ -347,10 +347,10 @@ static void llsec_dev_free(struct mac802154_llsec_device *dev)
                                      devkey);
 
                list_del(&pos->list);
-               kfree(devkey);
+               kzfree(devkey);
        }
 
-       kfree(dev);
+       kzfree(dev);
 }
 
 int mac802154_llsec_dev_add(struct mac802154_llsec *sec,
@@ -681,7 +681,7 @@ llsec_do_encrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
 
        rc = crypto_aead_encrypt(req);
 
-       kfree(req);
+       kzfree(req);
 
        return rc;
 }
@@ -881,7 +881,7 @@ llsec_do_decrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
 
        rc = crypto_aead_decrypt(req);
 
-       kfree(req);
+       kzfree(req);
        skb_trim(skb, skb->len - authlen);
 
        return rc;
@@ -921,7 +921,7 @@ llsec_update_devkey_record(struct mac802154_llsec_device *dev,
                if (!devkey)
                        list_add_rcu(&next->devkey.list, &dev->dev.keys);
                else
-                       kfree(next);
+                       kzfree(next);
 
                spin_unlock_bh(&dev->lock);
        }
index bb185a28de9890d2f4b3c57d1ca7af7600f9b2aa..c70d750148b66759ce47525c6f6b348c2e69efaa 100644 (file)
 #include <net/ipv6.h>
 #include <net/addrconf.h>
 #endif
+#include <net/nexthop.h>
 #include "internal.h"
 
-#define LABEL_NOT_SPECIFIED (1<<20)
-#define MAX_NEW_LABELS 2
-
-/* This maximum ha length copied from the definition of struct neighbour */
-#define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, sizeof(unsigned long)))
-
-enum mpls_payload_type {
-       MPT_UNSPEC, /* IPv4 or IPv6 */
-       MPT_IPV4 = 4,
-       MPT_IPV6 = 6,
-
-       /* Other types not implemented:
-        *  - Pseudo-wire with or without control word (RFC4385)
-        *  - GAL (RFC5586)
-        */
-};
-
-struct mpls_route { /* next hop label forwarding entry */
-       struct net_device __rcu *rt_dev;
-       struct rcu_head         rt_rcu;
-       u32                     rt_label[MAX_NEW_LABELS];
-       u8                      rt_protocol; /* routing protocol that set this entry */
-       u8                      rt_payload_type;
-       u8                      rt_labels;
-       u8                      rt_via_alen;
-       u8                      rt_via_table;
-       u8                      rt_via[0];
-};
+/* Maximum number of labels to look ahead at when selecting a path of
+ * a multipath route
+ */
+#define MAX_MP_SELECT_LABELS 4
 
 static int zero = 0;
 static int label_limit = (1 << 20) - 1;
@@ -80,10 +57,24 @@ bool mpls_output_possible(const struct net_device *dev)
 }
 EXPORT_SYMBOL_GPL(mpls_output_possible);
 
-static unsigned int mpls_rt_header_size(const struct mpls_route *rt)
+static u8 *__mpls_nh_via(struct mpls_route *rt, struct mpls_nh *nh)
+{
+       u8 *nh0_via = PTR_ALIGN((u8 *)&rt->rt_nh[rt->rt_nhn], VIA_ALEN_ALIGN);
+       int nh_index = nh - rt->rt_nh;
+
+       return nh0_via + rt->rt_max_alen * nh_index;
+}
+
+static const u8 *mpls_nh_via(const struct mpls_route *rt,
+                            const struct mpls_nh *nh)
+{
+       return __mpls_nh_via((struct mpls_route *)rt, (struct mpls_nh *)nh);
+}
+
+static unsigned int mpls_nh_header_size(const struct mpls_nh *nh)
 {
        /* The size of the layer 2.5 labels to be added for this route */
-       return rt->rt_labels * sizeof(struct mpls_shim_hdr);
+       return nh->nh_labels * sizeof(struct mpls_shim_hdr);
 }
 
 unsigned int mpls_dev_mtu(const struct net_device *dev)
@@ -105,6 +96,80 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 }
 EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
 
+static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
+                                            struct sk_buff *skb, bool bos)
+{
+       struct mpls_entry_decoded dec;
+       struct mpls_shim_hdr *hdr;
+       bool eli_seen = false;
+       int label_index;
+       int nh_index = 0;
+       u32 hash = 0;
+
+       /* No need to look further into packet if there's only
+        * one path
+        */
+       if (rt->rt_nhn == 1)
+               goto out;
+
+       for (label_index = 0; label_index < MAX_MP_SELECT_LABELS && !bos;
+            label_index++) {
+               if (!pskb_may_pull(skb, sizeof(*hdr) * label_index))
+                       break;
+
+               /* Read and decode the current label */
+               hdr = mpls_hdr(skb) + label_index;
+               dec = mpls_entry_decode(hdr);
+
+               /* RFC6790 - reserved labels MUST NOT be used as keys
+                * for the load-balancing function
+                */
+               if (likely(dec.label >= MPLS_LABEL_FIRST_UNRESERVED)) {
+                       hash = jhash_1word(dec.label, hash);
+
+                       /* The entropy label follows the entropy label
+                        * indicator, so this means that the entropy
+                        * label was just added to the hash - no need to
+                        * go any deeper either in the label stack or in the
+                        * payload
+                        */
+                       if (eli_seen)
+                               break;
+               } else if (dec.label == MPLS_LABEL_ENTROPY) {
+                       eli_seen = true;
+               }
+
+               bos = dec.bos;
+               if (bos && pskb_may_pull(skb, sizeof(*hdr) * label_index +
+                                        sizeof(struct iphdr))) {
+                       const struct iphdr *v4hdr;
+
+                       v4hdr = (const struct iphdr *)(mpls_hdr(skb) +
+                                                      label_index);
+                       if (v4hdr->version == 4) {
+                               hash = jhash_3words(ntohl(v4hdr->saddr),
+                                                   ntohl(v4hdr->daddr),
+                                                   v4hdr->protocol, hash);
+                       } else if (v4hdr->version == 6 &&
+                               pskb_may_pull(skb, sizeof(*hdr) * label_index +
+                                             sizeof(struct ipv6hdr))) {
+                               const struct ipv6hdr *v6hdr;
+
+                               v6hdr = (const struct ipv6hdr *)(mpls_hdr(skb) +
+                                                               label_index);
+
+                               hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
+                               hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
+                               hash = jhash_1word(v6hdr->nexthdr, hash);
+                       }
+               }
+       }
+
+       nh_index = hash % rt->rt_nhn;
+out:
+       return &rt->rt_nh[nh_index];
+}
+
 static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
                        struct mpls_entry_decoded dec)
 {
@@ -159,6 +224,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
        struct net *net = dev_net(dev);
        struct mpls_shim_hdr *hdr;
        struct mpls_route *rt;
+       struct mpls_nh *nh;
        struct mpls_entry_decoded dec;
        struct net_device *out_dev;
        struct mpls_dev *mdev;
@@ -196,8 +262,12 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
        if (!rt)
                goto drop;
 
+       nh = mpls_select_multipath(rt, skb, dec.bos);
+       if (!nh)
+               goto drop;
+
        /* Find the output device */
-       out_dev = rcu_dereference(rt->rt_dev);
+       out_dev = rcu_dereference(nh->nh_dev);
        if (!mpls_output_possible(out_dev))
                goto drop;
 
@@ -212,7 +282,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
        dec.ttl -= 1;
 
        /* Verify the destination can hold the packet */
-       new_header_size = mpls_rt_header_size(rt);
+       new_header_size = mpls_nh_header_size(nh);
        mtu = mpls_dev_mtu(out_dev);
        if (mpls_pkt_too_big(skb, mtu - new_header_size))
                goto drop;
@@ -240,13 +310,14 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
                /* Push the new labels */
                hdr = mpls_hdr(skb);
                bos = dec.bos;
-               for (i = rt->rt_labels - 1; i >= 0; i--) {
-                       hdr[i] = mpls_entry_encode(rt->rt_label[i], dec.ttl, 0, bos);
+               for (i = nh->nh_labels - 1; i >= 0; i--) {
+                       hdr[i] = mpls_entry_encode(nh->nh_label[i],
+                                                  dec.ttl, 0, bos);
                        bos = false;
                }
        }
 
-       err = neigh_xmit(rt->rt_via_table, out_dev, rt->rt_via, skb);
+       err = neigh_xmit(nh->nh_via_table, out_dev, mpls_nh_via(rt, nh), skb);
        if (err)
                net_dbg_ratelimited("%s: packet transmission failed: %d\n",
                                    __func__, err);
@@ -270,24 +341,33 @@ static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
 struct mpls_route_config {
        u32                     rc_protocol;
        u32                     rc_ifindex;
-       u16                     rc_via_table;
-       u16                     rc_via_alen;
+       u                     rc_via_table;
+       u                     rc_via_alen;
        u8                      rc_via[MAX_VIA_ALEN];
        u32                     rc_label;
-       u32                     rc_output_labels;
+       u                     rc_output_labels;
        u32                     rc_output_label[MAX_NEW_LABELS];
        u32                     rc_nlflags;
        enum mpls_payload_type  rc_payload_type;
        struct nl_info          rc_nlinfo;
+       struct rtnexthop        *rc_mp;
+       int                     rc_mp_len;
 };
 
-static struct mpls_route *mpls_rt_alloc(size_t alen)
+static struct mpls_route *mpls_rt_alloc(int num_nh, u8 max_alen)
 {
+       u8 max_alen_aligned = ALIGN(max_alen, VIA_ALEN_ALIGN);
        struct mpls_route *rt;
 
-       rt = kzalloc(sizeof(*rt) + alen, GFP_KERNEL);
-       if (rt)
-               rt->rt_via_alen = alen;
+       rt = kzalloc(ALIGN(sizeof(*rt) + num_nh * sizeof(*rt->rt_nh),
+                          VIA_ALEN_ALIGN) +
+                    num_nh * max_alen_aligned,
+                    GFP_KERNEL);
+       if (rt) {
+               rt->rt_nhn = num_nh;
+               rt->rt_max_alen = max_alen_aligned;
+       }
+
        return rt;
 }
 
@@ -312,25 +392,22 @@ static void mpls_notify_route(struct net *net, unsigned index,
 }
 
 static void mpls_route_update(struct net *net, unsigned index,
-                             struct net_device *dev, struct mpls_route *new,
+                             struct mpls_route *new,
                              const struct nl_info *info)
 {
        struct mpls_route __rcu **platform_label;
-       struct mpls_route *rt, *old = NULL;
+       struct mpls_route *rt;
 
        ASSERT_RTNL();
 
        platform_label = rtnl_dereference(net->mpls.platform_label);
        rt = rtnl_dereference(platform_label[index]);
-       if (!dev || (rt && (rtnl_dereference(rt->rt_dev) == dev))) {
-               rcu_assign_pointer(platform_label[index], new);
-               old = rt;
-       }
+       rcu_assign_pointer(platform_label[index], new);
 
-       mpls_notify_route(net, index, old, new, info);
+       mpls_notify_route(net, index, rt, new, info);
 
        /* If we removed a route free it now */
-       mpls_rt_free(old);
+       mpls_rt_free(rt);
 }
 
 static unsigned find_free_label(struct net *net)
@@ -350,7 +427,8 @@ static unsigned find_free_label(struct net *net)
 }
 
 #if IS_ENABLED(CONFIG_INET)
-static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
+static struct net_device *inet_fib_lookup_dev(struct net *net,
+                                             const void *addr)
 {
        struct net_device *dev;
        struct rtable *rt;
@@ -369,14 +447,16 @@ static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
        return dev;
 }
 #else
-static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
+static struct net_device *inet_fib_lookup_dev(struct net *net,
+                                             const void *addr)
 {
        return ERR_PTR(-EAFNOSUPPORT);
 }
 #endif
 
 #if IS_ENABLED(CONFIG_IPV6)
-static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
+static struct net_device *inet6_fib_lookup_dev(struct net *net,
+                                              const void *addr)
 {
        struct net_device *dev;
        struct dst_entry *dst;
@@ -399,47 +479,234 @@ static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
        return dev;
 }
 #else
-static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
+static struct net_device *inet6_fib_lookup_dev(struct net *net,
+                                              const void *addr)
 {
        return ERR_PTR(-EAFNOSUPPORT);
 }
 #endif
 
 static struct net_device *find_outdev(struct net *net,
-                                     struct mpls_route_config *cfg)
+                                     struct mpls_route *rt,
+                                     struct mpls_nh *nh, int oif)
 {
        struct net_device *dev = NULL;
 
-       if (!cfg->rc_ifindex) {
-               switch (cfg->rc_via_table) {
+       if (!oif) {
+               switch (nh->nh_via_table) {
                case NEIGH_ARP_TABLE:
-                       dev = inet_fib_lookup_dev(net, cfg->rc_via);
+                       dev = inet_fib_lookup_dev(net, mpls_nh_via(rt, nh));
                        break;
                case NEIGH_ND_TABLE:
-                       dev = inet6_fib_lookup_dev(net, cfg->rc_via);
+                       dev = inet6_fib_lookup_dev(net, mpls_nh_via(rt, nh));
                        break;
                case NEIGH_LINK_TABLE:
                        break;
                }
        } else {
-               dev = dev_get_by_index(net, cfg->rc_ifindex);
+               dev = dev_get_by_index(net, oif);
        }
 
        if (!dev)
                return ERR_PTR(-ENODEV);
 
+       /* The caller is holding rtnl anyways, so release the dev reference */
+       dev_put(dev);
+
        return dev;
 }
 
+static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
+                             struct mpls_nh *nh, int oif)
+{
+       struct net_device *dev = NULL;
+       int err = -ENODEV;
+
+       dev = find_outdev(net, rt, nh, oif);
+       if (IS_ERR(dev)) {
+               err = PTR_ERR(dev);
+               dev = NULL;
+               goto errout;
+       }
+
+       /* Ensure this is a supported device */
+       err = -EINVAL;
+       if (!mpls_dev_get(dev))
+               goto errout;
+
+       RCU_INIT_POINTER(nh->nh_dev, dev);
+
+       return 0;
+
+errout:
+       return err;
+}
+
+static int mpls_nh_build_from_cfg(struct mpls_route_config *cfg,
+                                 struct mpls_route *rt)
+{
+       struct net *net = cfg->rc_nlinfo.nl_net;
+       struct mpls_nh *nh = rt->rt_nh;
+       int err;
+       int i;
+
+       if (!nh)
+               return -ENOMEM;
+
+       err = -EINVAL;
+       /* Ensure only a supported number of labels are present */
+       if (cfg->rc_output_labels > MAX_NEW_LABELS)
+               goto errout;
+
+       nh->nh_labels = cfg->rc_output_labels;
+       for (i = 0; i < nh->nh_labels; i++)
+               nh->nh_label[i] = cfg->rc_output_label[i];
+
+       nh->nh_via_table = cfg->rc_via_table;
+       memcpy(__mpls_nh_via(rt, nh), cfg->rc_via, cfg->rc_via_alen);
+       nh->nh_via_alen = cfg->rc_via_alen;
+
+       err = mpls_nh_assign_dev(net, rt, nh, cfg->rc_ifindex);
+       if (err)
+               goto errout;
+
+       return 0;
+
+errout:
+       return err;
+}
+
+static int mpls_nh_build(struct net *net, struct mpls_route *rt,
+                        struct mpls_nh *nh, int oif,
+                        struct nlattr *via, struct nlattr *newdst)
+{
+       int err = -ENOMEM;
+
+       if (!nh)
+               goto errout;
+
+       if (newdst) {
+               err = nla_get_labels(newdst, MAX_NEW_LABELS,
+                                    &nh->nh_labels, nh->nh_label);
+               if (err)
+                       goto errout;
+       }
+
+       err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table,
+                         __mpls_nh_via(rt, nh));
+       if (err)
+               goto errout;
+
+       err = mpls_nh_assign_dev(net, rt, nh, oif);
+       if (err)
+               goto errout;
+
+       return 0;
+
+errout:
+       return err;
+}
+
+static int mpls_count_nexthops(struct rtnexthop *rtnh, int len,
+                              u8 cfg_via_alen, u8 *max_via_alen)
+{
+       int nhs = 0;
+       int remaining = len;
+
+       if (!rtnh) {
+               *max_via_alen = cfg_via_alen;
+               return 1;
+       }
+
+       *max_via_alen = 0;
+
+       while (rtnh_ok(rtnh, remaining)) {
+               struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
+               int attrlen;
+
+               attrlen = rtnh_attrlen(rtnh);
+               nla = nla_find(attrs, attrlen, RTA_VIA);
+               if (nla && nla_len(nla) >=
+                   offsetof(struct rtvia, rtvia_addr)) {
+                       int via_alen = nla_len(nla) -
+                               offsetof(struct rtvia, rtvia_addr);
+
+                       if (via_alen <= MAX_VIA_ALEN)
+                               *max_via_alen = max_t(u16, *max_via_alen,
+                                                     via_alen);
+               }
+
+               nhs++;
+               rtnh = rtnh_next(rtnh, &remaining);
+       }
+
+       /* leftover implies invalid nexthop configuration, discard it */
+       return remaining > 0 ? 0 : nhs;
+}
+
+static int mpls_nh_build_multi(struct mpls_route_config *cfg,
+                              struct mpls_route *rt)
+{
+       struct rtnexthop *rtnh = cfg->rc_mp;
+       struct nlattr *nla_via, *nla_newdst;
+       int remaining = cfg->rc_mp_len;
+       int nhs = 0;
+       int err = 0;
+
+       change_nexthops(rt) {
+               int attrlen;
+
+               nla_via = NULL;
+               nla_newdst = NULL;
+
+               err = -EINVAL;
+               if (!rtnh_ok(rtnh, remaining))
+                       goto errout;
+
+               /* neither weighted multipath nor any flags
+                * are supported
+                */
+               if (rtnh->rtnh_hops || rtnh->rtnh_flags)
+                       goto errout;
+
+               attrlen = rtnh_attrlen(rtnh);
+               if (attrlen > 0) {
+                       struct nlattr *attrs = rtnh_attrs(rtnh);
+
+                       nla_via = nla_find(attrs, attrlen, RTA_VIA);
+                       nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST);
+               }
+
+               if (!nla_via)
+                       goto errout;
+
+               err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh,
+                                   rtnh->rtnh_ifindex, nla_via,
+                                   nla_newdst);
+               if (err)
+                       goto errout;
+
+               rtnh = rtnh_next(rtnh, &remaining);
+               nhs++;
+       } endfor_nexthops(rt);
+
+       rt->rt_nhn = nhs;
+
+       return 0;
+
+errout:
+       return err;
+}
+
 static int mpls_route_add(struct mpls_route_config *cfg)
 {
        struct mpls_route __rcu **platform_label;
        struct net *net = cfg->rc_nlinfo.nl_net;
-       struct net_device *dev = NULL;
        struct mpls_route *rt, *old;
-       unsigned index;
-       int i;
        int err = -EINVAL;
+       u8 max_via_alen;
+       unsigned index;
+       int nhs;
 
        index = cfg->rc_label;
 
@@ -457,27 +724,6 @@ static int mpls_route_add(struct mpls_route_config *cfg)
        if (index >= net->mpls.platform_labels)
                goto errout;
 
-       /* Ensure only a supported number of labels are present */
-       if (cfg->rc_output_labels > MAX_NEW_LABELS)
-               goto errout;
-
-       dev = find_outdev(net, cfg);
-       if (IS_ERR(dev)) {
-               err = PTR_ERR(dev);
-               dev = NULL;
-               goto errout;
-       }
-
-       /* Ensure this is a supported device */
-       err = -EINVAL;
-       if (!mpls_dev_get(dev))
-               goto errout;
-
-       err = -EINVAL;
-       if ((cfg->rc_via_table == NEIGH_LINK_TABLE) &&
-           (dev->addr_len != cfg->rc_via_alen))
-               goto errout;
-
        /* Append makes no sense with mpls */
        err = -EOPNOTSUPP;
        if (cfg->rc_nlflags & NLM_F_APPEND)
@@ -497,28 +743,34 @@ static int mpls_route_add(struct mpls_route_config *cfg)
        if (!(cfg->rc_nlflags & NLM_F_CREATE) && !old)
                goto errout;
 
+       err = -EINVAL;
+       nhs = mpls_count_nexthops(cfg->rc_mp, cfg->rc_mp_len,
+                                 cfg->rc_via_alen, &max_via_alen);
+       if (nhs == 0)
+               goto errout;
+
        err = -ENOMEM;
-       rt = mpls_rt_alloc(cfg->rc_via_alen);
+       rt = mpls_rt_alloc(nhs, max_via_alen);
        if (!rt)
                goto errout;
 
-       rt->rt_labels = cfg->rc_output_labels;
-       for (i = 0; i < rt->rt_labels; i++)
-               rt->rt_label[i] = cfg->rc_output_label[i];
        rt->rt_protocol = cfg->rc_protocol;
-       RCU_INIT_POINTER(rt->rt_dev, dev);
        rt->rt_payload_type = cfg->rc_payload_type;
-       rt->rt_via_table = cfg->rc_via_table;
-       memcpy(rt->rt_via, cfg->rc_via, cfg->rc_via_alen);
 
-       mpls_route_update(net, index, NULL, rt, &cfg->rc_nlinfo);
+       if (cfg->rc_mp)
+               err = mpls_nh_build_multi(cfg, rt);
+       else
+               err = mpls_nh_build_from_cfg(cfg, rt);
+       if (err)
+               goto freert;
+
+       mpls_route_update(net, index, rt, &cfg->rc_nlinfo);
 
-       dev_put(dev);
        return 0;
 
+freert:
+       mpls_rt_free(rt);
 errout:
-       if (dev)
-               dev_put(dev);
        return err;
 }
 
@@ -538,7 +790,7 @@ static int mpls_route_del(struct mpls_route_config *cfg)
        if (index >= net->mpls.platform_labels)
                goto errout;
 
-       mpls_route_update(net, index, NULL, NULL, &cfg->rc_nlinfo);
+       mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
 
        err = 0;
 errout:
@@ -635,9 +887,11 @@ static void mpls_ifdown(struct net_device *dev)
                struct mpls_route *rt = rtnl_dereference(platform_label[index]);
                if (!rt)
                        continue;
-               if (rtnl_dereference(rt->rt_dev) != dev)
-                       continue;
-               rt->rt_dev = NULL;
+               for_nexthops(rt) {
+                       if (rtnl_dereference(nh->nh_dev) != dev)
+                               continue;
+                       nh->nh_dev = NULL;
+               } endfor_nexthops(rt);
        }
 
        mdev = mpls_dev_get(dev);
@@ -736,7 +990,7 @@ int nla_put_labels(struct sk_buff *skb, int attrtype,
 EXPORT_SYMBOL_GPL(nla_put_labels);
 
 int nla_get_labels(const struct nlattr *nla,
-                  u32 max_labels, u32 *labels, u32 label[])
+                  u32 max_labels, u8 *labels, u32 label[])
 {
        unsigned len = nla_len(nla);
        unsigned nla_labels;
@@ -781,6 +1035,48 @@ int nla_get_labels(const struct nlattr *nla,
 }
 EXPORT_SYMBOL_GPL(nla_get_labels);
 
+int nla_get_via(const struct nlattr *nla, u8 *via_alen,
+               u8 *via_table, u8 via_addr[])
+{
+       struct rtvia *via = nla_data(nla);
+       int err = -EINVAL;
+       int alen;
+
+       if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr))
+               goto errout;
+       alen = nla_len(nla) -
+                       offsetof(struct rtvia, rtvia_addr);
+       if (alen > MAX_VIA_ALEN)
+               goto errout;
+
+       /* Validate the address family */
+       switch (via->rtvia_family) {
+       case AF_PACKET:
+               *via_table = NEIGH_LINK_TABLE;
+               break;
+       case AF_INET:
+               *via_table = NEIGH_ARP_TABLE;
+               if (alen != 4)
+                       goto errout;
+               break;
+       case AF_INET6:
+               *via_table = NEIGH_ND_TABLE;
+               if (alen != 16)
+                       goto errout;
+               break;
+       default:
+               /* Unsupported address family */
+               goto errout;
+       }
+
+       memcpy(via_addr, via->rtvia_addr, alen);
+       *via_alen = alen;
+       err = 0;
+
+errout:
+       return err;
+}
+
 static int rtm_to_route_config(struct sk_buff *skb,  struct nlmsghdr *nlh,
                               struct mpls_route_config *cfg)
 {
@@ -844,7 +1140,7 @@ static int rtm_to_route_config(struct sk_buff *skb,  struct nlmsghdr *nlh,
                        break;
                case RTA_DST:
                {
-                       u32 label_count;
+                       u8 label_count;
                        if (nla_get_labels(nla, 1, &label_count,
                                           &cfg->rc_label))
                                goto errout;
@@ -857,35 +1153,15 @@ static int rtm_to_route_config(struct sk_buff *skb,  struct nlmsghdr *nlh,
                }
                case RTA_VIA:
                {
-                       struct rtvia *via = nla_data(nla);
-                       if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr))
-                               goto errout;
-                       cfg->rc_via_alen   = nla_len(nla) -
-                               offsetof(struct rtvia, rtvia_addr);
-                       if (cfg->rc_via_alen > MAX_VIA_ALEN)
+                       if (nla_get_via(nla, &cfg->rc_via_alen,
+                                       &cfg->rc_via_table, cfg->rc_via))
                                goto errout;
-
-                       /* Validate the address family */
-                       switch(via->rtvia_family) {
-                       case AF_PACKET:
-                               cfg->rc_via_table = NEIGH_LINK_TABLE;
-                               break;
-                       case AF_INET:
-                               cfg->rc_via_table = NEIGH_ARP_TABLE;
-                               if (cfg->rc_via_alen != 4)
-                                       goto errout;
-                               break;
-                       case AF_INET6:
-                               cfg->rc_via_table = NEIGH_ND_TABLE;
-                               if (cfg->rc_via_alen != 16)
-                                       goto errout;
-                               break;
-                       default:
-                               /* Unsupported address family */
-                               goto errout;
-                       }
-
-                       memcpy(cfg->rc_via, via->rtvia_addr, cfg->rc_via_alen);
+                       break;
+               }
+               case RTA_MULTIPATH:
+               {
+                       cfg->rc_mp = nla_data(nla);
+                       cfg->rc_mp_len = nla_len(nla);
                        break;
                }
                default:
@@ -946,16 +1222,52 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
        rtm->rtm_type = RTN_UNICAST;
        rtm->rtm_flags = 0;
 
-       if (rt->rt_labels &&
-           nla_put_labels(skb, RTA_NEWDST, rt->rt_labels, rt->rt_label))
-               goto nla_put_failure;
-       if (nla_put_via(skb, rt->rt_via_table, rt->rt_via, rt->rt_via_alen))
-               goto nla_put_failure;
-       dev = rtnl_dereference(rt->rt_dev);
-       if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
-               goto nla_put_failure;
        if (nla_put_labels(skb, RTA_DST, 1, &label))
                goto nla_put_failure;
+       if (rt->rt_nhn == 1) {
+               const struct mpls_nh *nh = rt->rt_nh;
+
+               if (nh->nh_labels &&
+                   nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
+                                  nh->nh_label))
+                       goto nla_put_failure;
+               if (nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
+                               nh->nh_via_alen))
+                       goto nla_put_failure;
+               dev = rtnl_dereference(nh->nh_dev);
+               if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
+                       goto nla_put_failure;
+       } else {
+               struct rtnexthop *rtnh;
+               struct nlattr *mp;
+
+               mp = nla_nest_start(skb, RTA_MULTIPATH);
+               if (!mp)
+                       goto nla_put_failure;
+
+               for_nexthops(rt) {
+                       rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
+                       if (!rtnh)
+                               goto nla_put_failure;
+
+                       dev = rtnl_dereference(nh->nh_dev);
+                       if (dev)
+                               rtnh->rtnh_ifindex = dev->ifindex;
+                       if (nh->nh_labels && nla_put_labels(skb, RTA_NEWDST,
+                                                           nh->nh_labels,
+                                                           nh->nh_label))
+                               goto nla_put_failure;
+                       if (nla_put_via(skb, nh->nh_via_table,
+                                       mpls_nh_via(rt, nh),
+                                       nh->nh_via_alen))
+                               goto nla_put_failure;
+
+                       /* length of rtnetlink header + attributes */
+                       rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
+               } endfor_nexthops(rt);
+
+               nla_nest_end(skb, mp);
+       }
 
        nlmsg_end(skb, nlh);
        return 0;
@@ -1000,12 +1312,30 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
 {
        size_t payload =
                NLMSG_ALIGN(sizeof(struct rtmsg))
-               + nla_total_size(2 + rt->rt_via_alen)   /* RTA_VIA */
                + nla_total_size(4);                    /* RTA_DST */
-       if (rt->rt_labels)                              /* RTA_NEWDST */
-               payload += nla_total_size(rt->rt_labels * 4);
-       if (rt->rt_dev)                                 /* RTA_OIF */
-               payload += nla_total_size(4);
+
+       if (rt->rt_nhn == 1) {
+               struct mpls_nh *nh = rt->rt_nh;
+
+               if (nh->nh_dev)
+                       payload += nla_total_size(4); /* RTA_OIF */
+               payload += nla_total_size(2 + nh->nh_via_alen); /* RTA_VIA */
+               if (nh->nh_labels) /* RTA_NEWDST */
+                       payload += nla_total_size(nh->nh_labels * 4);
+       } else {
+               /* each nexthop is packed in an attribute */
+               size_t nhsize = 0;
+
+               for_nexthops(rt) {
+                       nhsize += nla_total_size(sizeof(struct rtnexthop));
+                       nhsize += nla_total_size(2 + nh->nh_via_alen);
+                       if (nh->nh_labels)
+                               nhsize += nla_total_size(nh->nh_labels * 4);
+               } endfor_nexthops(rt);
+               /* nested attribute */
+               payload += nla_total_size(nhsize);
+       }
+
        return payload;
 }
 
@@ -1057,25 +1387,29 @@ static int resize_platform_label_table(struct net *net, size_t limit)
        /* In case the predefined labels need to be populated */
        if (limit > MPLS_LABEL_IPV4NULL) {
                struct net_device *lo = net->loopback_dev;
-               rt0 = mpls_rt_alloc(lo->addr_len);
+               rt0 = mpls_rt_alloc(1, lo->addr_len);
                if (!rt0)
                        goto nort0;
-               RCU_INIT_POINTER(rt0->rt_dev, lo);
+               RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
                rt0->rt_protocol = RTPROT_KERNEL;
                rt0->rt_payload_type = MPT_IPV4;
-               rt0->rt_via_table = NEIGH_LINK_TABLE;
-               memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len);
+               rt0->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
+               rt0->rt_nh->nh_via_alen = lo->addr_len;
+               memcpy(__mpls_nh_via(rt0, rt0->rt_nh), lo->dev_addr,
+                      lo->addr_len);
        }
        if (limit > MPLS_LABEL_IPV6NULL) {
                struct net_device *lo = net->loopback_dev;
-               rt2 = mpls_rt_alloc(lo->addr_len);
+               rt2 = mpls_rt_alloc(1, lo->addr_len);
                if (!rt2)
                        goto nort2;
-               RCU_INIT_POINTER(rt2->rt_dev, lo);
+               RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
                rt2->rt_protocol = RTPROT_KERNEL;
                rt2->rt_payload_type = MPT_IPV6;
-               rt2->rt_via_table = NEIGH_LINK_TABLE;
-               memcpy(rt2->rt_via, lo->dev_addr, lo->addr_len);
+               rt2->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
+               rt2->rt_nh->nh_via_alen = lo->addr_len;
+               memcpy(__mpls_nh_via(rt2, rt2->rt_nh), lo->dev_addr,
+                      lo->addr_len);
        }
 
        rtnl_lock();
@@ -1085,7 +1419,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
 
        /* Free any labels beyond the new table */
        for (index = limit; index < old_limit; index++)
-               mpls_route_update(net, index, NULL, NULL, NULL);
+               mpls_route_update(net, index, NULL, NULL);
 
        /* Copy over the old labels */
        cp_size = size;
index 2681a4ba6c375f3faf83498150350ddea7392ccc..bde52ce88c949e76083ec704ed010493b60d6466 100644 (file)
@@ -21,6 +21,76 @@ struct mpls_dev {
 
 struct sk_buff;
 
+#define LABEL_NOT_SPECIFIED (1 << 20)
+#define MAX_NEW_LABELS 2
+
+/* This maximum ha length copied from the definition of struct neighbour */
+#define VIA_ALEN_ALIGN sizeof(unsigned long)
+#define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, VIA_ALEN_ALIGN))
+
+enum mpls_payload_type {
+       MPT_UNSPEC, /* IPv4 or IPv6 */
+       MPT_IPV4 = 4,
+       MPT_IPV6 = 6,
+
+       /* Other types not implemented:
+        *  - Pseudo-wire with or without control word (RFC4385)
+        *  - GAL (RFC5586)
+        */
+};
+
+struct mpls_nh { /* next hop label forwarding entry */
+       struct net_device __rcu *nh_dev;
+       u32                     nh_label[MAX_NEW_LABELS];
+       u8                      nh_labels;
+       u8                      nh_via_alen;
+       u8                      nh_via_table;
+};
+
+/* The route, nexthops and vias are stored together in the same memory
+ * block:
+ *
+ * +----------------------+
+ * | mpls_route           |
+ * +----------------------+
+ * | mpls_nh 0            |
+ * +----------------------+
+ * | ...                  |
+ * +----------------------+
+ * | mpls_nh n-1          |
+ * +----------------------+
+ * | alignment padding    |
+ * +----------------------+
+ * | via[rt_max_alen] 0   |
+ * +----------------------+
+ * | ...                  |
+ * +----------------------+
+ * | via[rt_max_alen] n-1 |
+ * +----------------------+
+ */
+struct mpls_route { /* next hop label forwarding entry */
+       struct rcu_head         rt_rcu;
+       u8                      rt_protocol;
+       u8                      rt_payload_type;
+       u8                      rt_max_alen;
+       unsigned int            rt_nhn;
+       struct mpls_nh          rt_nh[0];
+};
+
+#define for_nexthops(rt) {                                             \
+       int nhsel; struct mpls_nh *nh;                  \
+       for (nhsel = 0, nh = (rt)->rt_nh;                               \
+            nhsel < (rt)->rt_nhn;                                      \
+            nh++, nhsel++)
+
+#define change_nexthops(rt) {                                          \
+       int nhsel; struct mpls_nh *nh;                          \
+       for (nhsel = 0, nh = (struct mpls_nh *)((rt)->rt_nh);   \
+            nhsel < (rt)->rt_nhn;                                      \
+            nh++, nhsel++)
+
+#define endfor_nexthops(rt) }
+
 static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
 {
        return (struct mpls_shim_hdr *)skb_network_header(skb);
@@ -52,8 +122,10 @@ static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr *
 
 int nla_put_labels(struct sk_buff *skb, int attrtype,  u8 labels,
                   const u32 label[]);
-int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels,
+int nla_get_labels(const struct nlattr *nla, u32 max_labels, u8 *labels,
                   u32 label[]);
+int nla_get_via(const struct nlattr *nla, u8 *via_alen, u8 *via_table,
+               u8 via[]);
 bool mpls_output_possible(const struct net_device *dev);
 unsigned int mpls_dev_mtu(const struct net_device *dev);
 bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu);
index 3e1b4abf1897a5bdeca9e5fa061bd06d9b858263..e22349ea725605a05ce03cb3701142856daf420c 100644 (file)
@@ -354,7 +354,7 @@ config NF_CT_NETLINK_HELPER
        select NETFILTER_NETLINK
        depends on NF_CT_NETLINK
        depends on NETFILTER_NETLINK_QUEUE
-       depends on NETFILTER_NETLINK_QUEUE_CT
+       depends on NETFILTER_NETLINK_GLUE_CT
        depends on NETFILTER_ADVANCED
        help
          This option enables the user-space connection tracking helpers
@@ -362,13 +362,14 @@ config NF_CT_NETLINK_HELPER
 
          If unsure, say `N'.
 
-config NETFILTER_NETLINK_QUEUE_CT
-        bool "NFQUEUE integration with Connection Tracking"
-        default n
-        depends on NETFILTER_NETLINK_QUEUE
+config NETFILTER_NETLINK_GLUE_CT
+       bool "NFQUEUE and NFLOG integration with Connection Tracking"
+       default n
+       depends on (NETFILTER_NETLINK_QUEUE || NETFILTER_NETLINK_LOG) && NF_CT_NETLINK
        help
-         If this option is enabled, NFQUEUE can include Connection Tracking
-         information together with the packet is the enqueued via NFNETLINK.
+         If this option is enabled, NFQUEUE and NFLOG can include
+         Connection Tracking information together with the packet is
+         the enqueued via NFNETLINK.
 
 config NF_NAT
        tristate
index 70d026d46fe7d07a3ee1d942b7e877782bb37957..7638c36b498ccd00618bd073252331147912d125 100644 (file)
@@ -10,8 +10,6 @@ obj-$(CONFIG_NETFILTER) = netfilter.o
 
 obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o
 obj-$(CONFIG_NETFILTER_NETLINK_ACCT) += nfnetlink_acct.o
-nfnetlink_queue-y := nfnetlink_queue_core.o
-nfnetlink_queue-$(CONFIG_NETFILTER_NETLINK_QUEUE_CT) += nfnetlink_queue_ct.o
 obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o
 obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
 
index 2e907335ee81e85b5e0bcb595b9404e961f7e51a..f39276d1c2d76788dfc018f2ebdc07d2855f977d 100644 (file)
@@ -152,6 +152,8 @@ void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
 #endif
        synchronize_net();
        nf_queue_nf_hook_drop(net, &entry->ops);
+       /* other cpu might still process nfqueue verdict that used reg */
+       synchronize_net();
        kfree(entry);
 }
 EXPORT_SYMBOL(nf_unregister_net_hook);
@@ -313,8 +315,6 @@ next_hook:
                int err = nf_queue(skb, elem, state,
                                   verdict >> NF_VERDICT_QBITS);
                if (err < 0) {
-                       if (err == -ECANCELED)
-                               goto next_hook;
                        if (err == -ESRCH &&
                           (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
                                goto next_hook;
@@ -348,6 +348,12 @@ int skb_make_writable(struct sk_buff *skb, unsigned int writable_len)
 }
 EXPORT_SYMBOL(skb_make_writable);
 
+/* This needs to be compiled in any case to avoid dependencies between the
+ * nfnetlink_queue code and nf_conntrack.
+ */
+struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
+EXPORT_SYMBOL_GPL(nfnl_ct_hook);
+
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
 /* This does not belong here, but locally generated errors need it if connection
    tracking in use: without this, connection may not be in hash table, and hence
@@ -385,9 +391,6 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct)
 }
 EXPORT_SYMBOL(nf_conntrack_destroy);
 
-struct nfq_ct_hook __rcu *nfq_ct_hook __read_mostly;
-EXPORT_SYMBOL_GPL(nfq_ct_hook);
-
 /* Built-in default zone used e.g. by modules. */
 const struct nf_conntrack_zone nf_ct_zone_dflt = {
        .id     = NF_CT_DEFAULT_ZONE_ID,
index a1fe5377a2b3376d29f29b18d77ebfe08fc988e1..5a30ce6e8c90d278ac37cb0115f45a4390f9a6d7 100644 (file)
@@ -297,7 +297,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
              ip_set_timeout_expired(ext_timeout(n, set))))
                n =  NULL;
 
-       e = kzalloc(set->dsize, GFP_KERNEL);
+       e = kzalloc(set->dsize, GFP_ATOMIC);
        if (!e)
                return -ENOMEM;
        e->id = d->id;
index d1d168c7fc686e93941d6919371ce1263e09b089..85ca189bdc3d2b01ee4f202348fdf606f2585cf1 100644 (file)
@@ -1347,23 +1347,20 @@ flush_again:
  */
 int __net_init ip_vs_conn_net_init(struct netns_ipvs *ipvs)
 {
-       struct net *net = ipvs->net;
-
        atomic_set(&ipvs->conn_count, 0);
 
-       proc_create("ip_vs_conn", 0, net->proc_net, &ip_vs_conn_fops);
-       proc_create("ip_vs_conn_sync", 0, net->proc_net, &ip_vs_conn_sync_fops);
+       proc_create("ip_vs_conn", 0, ipvs->net->proc_net, &ip_vs_conn_fops);
+       proc_create("ip_vs_conn_sync", 0, ipvs->net->proc_net,
+                   &ip_vs_conn_sync_fops);
        return 0;
 }
 
 void __net_exit ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs)
 {
-       struct net *net = ipvs->net;
-
        /* flush all the connection entries first */
        ip_vs_conn_flush(ipvs);
-       remove_proc_entry("ip_vs_conn", net->proc_net);
-       remove_proc_entry("ip_vs_conn_sync", net->proc_net);
+       remove_proc_entry("ip_vs_conn", ipvs->net->proc_net);
+       remove_proc_entry("ip_vs_conn_sync", ipvs->net->proc_net);
 }
 
 int __init ip_vs_conn_init(void)
index 07a791ecdfbab9fee7f6c78bcc53a6f9204cbe90..1e24fff53e4b5eef8fab776b2e3dc93a3ae4545c 100644 (file)
@@ -547,7 +547,6 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
        return cp;
 }
 
-#ifdef CONFIG_SYSCTL
 static inline int ip_vs_addr_is_unicast(struct net *net, int af,
                                        union nf_inet_addr *addr)
 {
@@ -557,7 +556,6 @@ static inline int ip_vs_addr_is_unicast(struct net *net, int af,
 #endif
        return (inet_addr_type(net, addr->ip) == RTN_UNICAST);
 }
-#endif
 
 /*
  *  Pass or drop the packet.
@@ -1174,7 +1172,6 @@ drop:
 static unsigned int
 ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
 {
-       struct net *net = ipvs->net;
        struct ip_vs_iphdr iph;
        struct ip_vs_protocol *pp;
        struct ip_vs_proto_data *pd;
@@ -1274,7 +1271,7 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
 #ifdef CONFIG_IP_VS_IPV6
                                if (af == AF_INET6) {
                                        if (!skb->dev)
-                                               skb->dev = net->loopback_dev;
+                                               skb->dev = ipvs->net->loopback_dev;
                                        icmpv6_send(skb,
                                                    ICMPV6_DEST_UNREACH,
                                                    ICMPV6_PORT_UNREACH,
@@ -1926,7 +1923,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
        /* After packet filtering, change source only for VS/NAT */
        {
                .hook           = ip_vs_reply4,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_LOCAL_IN,
                .priority       = NF_IP_PRI_NAT_SRC - 2,
@@ -1936,7 +1932,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
         * applied to IPVS. */
        {
                .hook           = ip_vs_remote_request4,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_LOCAL_IN,
                .priority       = NF_IP_PRI_NAT_SRC - 1,
@@ -1944,7 +1939,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
        /* Before ip_vs_in, change source only for VS/NAT */
        {
                .hook           = ip_vs_local_reply4,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_LOCAL_OUT,
                .priority       = NF_IP_PRI_NAT_DST + 1,
@@ -1952,7 +1946,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
        /* After mangle, schedule and forward local requests */
        {
                .hook           = ip_vs_local_request4,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_LOCAL_OUT,
                .priority       = NF_IP_PRI_NAT_DST + 2,
@@ -1961,7 +1954,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
         * destined for 0.0.0.0/0, which is for incoming IPVS connections */
        {
                .hook           = ip_vs_forward_icmp,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_FORWARD,
                .priority       = 99,
@@ -1969,7 +1961,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
        /* After packet filtering, change source only for VS/NAT */
        {
                .hook           = ip_vs_reply4,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV4,
                .hooknum        = NF_INET_FORWARD,
                .priority       = 100,
@@ -1978,7 +1969,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
        /* After packet filtering, change source only for VS/NAT */
        {
                .hook           = ip_vs_reply6,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_LOCAL_IN,
                .priority       = NF_IP6_PRI_NAT_SRC - 2,
@@ -1988,7 +1978,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
         * applied to IPVS. */
        {
                .hook           = ip_vs_remote_request6,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_LOCAL_IN,
                .priority       = NF_IP6_PRI_NAT_SRC - 1,
@@ -1996,7 +1985,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
        /* Before ip_vs_in, change source only for VS/NAT */
        {
                .hook           = ip_vs_local_reply6,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_LOCAL_OUT,
                .priority       = NF_IP6_PRI_NAT_DST + 1,
@@ -2004,7 +1992,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
        /* After mangle, schedule and forward local requests */
        {
                .hook           = ip_vs_local_request6,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_LOCAL_OUT,
                .priority       = NF_IP6_PRI_NAT_DST + 2,
@@ -2013,7 +2000,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
         * destined for 0.0.0.0/0, which is for incoming IPVS connections */
        {
                .hook           = ip_vs_forward_icmp_v6,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_FORWARD,
                .priority       = 99,
@@ -2021,7 +2007,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
        /* After packet filtering, change source only for VS/NAT */
        {
                .hook           = ip_vs_reply6,
-               .owner          = THIS_MODULE,
                .pf             = NFPROTO_IPV6,
                .hooknum        = NF_INET_FORWARD,
                .priority       = 100,
index 09d1d19b2ab94f5085fef1f69fc372ba917197a7..3cb3cb831591ef79515b4bde66d5db825a732afb 100644 (file)
@@ -940,10 +940,13 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
        }
 
        timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
-       if (timeout_ext)
-               timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
-       else
+       if (timeout_ext) {
+               timeouts = nf_ct_timeout_data(timeout_ext);
+               if (unlikely(!timeouts))
+                       timeouts = l4proto->get_timeouts(net);
+       } else {
                timeouts = l4proto->get_timeouts(net);
+       }
 
        if (!l4proto->new(ct, skb, dataoff, timeouts)) {
                nf_conntrack_free(ct);
@@ -952,7 +955,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
        }
 
        if (timeout_ext)
-               nf_ct_timeout_ext_add(ct, timeout_ext->timeout, GFP_ATOMIC);
+               nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
+                                     GFP_ATOMIC);
 
        nf_ct_acct_ext_add(ct, GFP_ATOMIC);
        nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
index 94a66541e0b76a1764ad6b2d73bf8f6cd1310a6b..9f5272968abb095cb3e758b3906cb6dcbd86ab24 100644 (file)
@@ -2133,9 +2133,9 @@ ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
                       struct nf_conntrack_tuple *tuple,
                       struct nf_conntrack_tuple *mask);
 
-#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
+#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
 static size_t
-ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
+ctnetlink_glue_build_size(const struct nf_conn *ct)
 {
        return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
               + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
@@ -2162,8 +2162,19 @@ ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
               ;
 }
 
-static int
-ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
+static struct nf_conn *ctnetlink_glue_get_ct(const struct sk_buff *skb,
+                                            enum ip_conntrack_info *ctinfo)
+{
+       struct nf_conn *ct;
+
+       ct = nf_ct_get(skb, ctinfo);
+       if (ct && nf_ct_is_untracked(ct))
+               ct = NULL;
+
+       return ct;
+}
+
+static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
 {
        const struct nf_conntrack_zone *zone;
        struct nlattr *nest_parms;
@@ -2236,7 +2247,32 @@ nla_put_failure:
 }
 
 static int
-ctnetlink_nfqueue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
+ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct,
+                    enum ip_conntrack_info ctinfo,
+                    u_int16_t ct_attr, u_int16_t ct_info_attr)
+{
+       struct nlattr *nest_parms;
+
+       nest_parms = nla_nest_start(skb, ct_attr | NLA_F_NESTED);
+       if (!nest_parms)
+               goto nla_put_failure;
+
+       if (__ctnetlink_glue_build(skb, ct) < 0)
+               goto nla_put_failure;
+
+       nla_nest_end(skb, nest_parms);
+
+       if (nla_put_be32(skb, ct_info_attr, htonl(ctinfo)))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -ENOSPC;
+}
+
+static int
+ctnetlink_glue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
 {
        int err;
 
@@ -2276,7 +2312,7 @@ ctnetlink_nfqueue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
 }
 
 static int
-ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
+ctnetlink_glue_parse(const struct nlattr *attr, struct nf_conn *ct)
 {
        struct nlattr *cda[CTA_MAX+1];
        int ret;
@@ -2286,16 +2322,16 @@ ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
                return ret;
 
        spin_lock_bh(&nf_conntrack_expect_lock);
-       ret = ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct);
+       ret = ctnetlink_glue_parse_ct((const struct nlattr **)cda, ct);
        spin_unlock_bh(&nf_conntrack_expect_lock);
 
        return ret;
 }
 
-static int ctnetlink_nfqueue_exp_parse(const struct nlattr * const *cda,
-                                      const struct nf_conn *ct,
-                                      struct nf_conntrack_tuple *tuple,
-                                      struct nf_conntrack_tuple *mask)
+static int ctnetlink_glue_exp_parse(const struct nlattr * const *cda,
+                                   const struct nf_conn *ct,
+                                   struct nf_conntrack_tuple *tuple,
+                                   struct nf_conntrack_tuple *mask)
 {
        int err;
 
@@ -2309,8 +2345,8 @@ static int ctnetlink_nfqueue_exp_parse(const struct nlattr * const *cda,
 }
 
 static int
-ctnetlink_nfqueue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
-                               u32 portid, u32 report)
+ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
+                            u32 portid, u32 report)
 {
        struct nlattr *cda[CTA_EXPECT_MAX+1];
        struct nf_conntrack_tuple tuple, mask;
@@ -2322,8 +2358,8 @@ ctnetlink_nfqueue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
        if (err < 0)
                return err;
 
-       err = ctnetlink_nfqueue_exp_parse((const struct nlattr * const *)cda,
-                                         ct, &tuple, &mask);
+       err = ctnetlink_glue_exp_parse((const struct nlattr * const *)cda,
+                                      ct, &tuple, &mask);
        if (err < 0)
                return err;
 
@@ -2350,14 +2386,24 @@ ctnetlink_nfqueue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
        return 0;
 }
 
-static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
-       .build_size     = ctnetlink_nfqueue_build_size,
-       .build          = ctnetlink_nfqueue_build,
-       .parse          = ctnetlink_nfqueue_parse,
-       .attach_expect  = ctnetlink_nfqueue_attach_expect,
-       .seq_adjust     = nf_ct_tcp_seqadj_set,
+static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,
+                                 enum ip_conntrack_info ctinfo, int diff)
+{
+       if (!(ct->status & IPS_NAT_MASK))
+               return;
+
+       nf_ct_tcp_seqadj_set(skb, ct, ctinfo, diff);
+}
+
+static struct nfnl_ct_hook ctnetlink_glue_hook = {
+       .get_ct         = ctnetlink_glue_get_ct,
+       .build_size     = ctnetlink_glue_build_size,
+       .build          = ctnetlink_glue_build,
+       .parse          = ctnetlink_glue_parse,
+       .attach_expect  = ctnetlink_glue_attach_expect,
+       .seq_adjust     = ctnetlink_glue_seqadj,
 };
-#endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
+#endif /* CONFIG_NETFILTER_NETLINK_GLUE_CT */
 
 /***********************************************************************
  * EXPECT
@@ -3341,9 +3387,9 @@ static int __init ctnetlink_init(void)
                pr_err("ctnetlink_init: cannot register pernet operations\n");
                goto err_unreg_exp_subsys;
        }
-#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
+#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
        /* setup interaction between nf_queue and nf_conntrack_netlink. */
-       RCU_INIT_POINTER(nfq_ct_hook, &ctnetlink_nfqueue_hook);
+       RCU_INIT_POINTER(nfnl_ct_hook, &ctnetlink_glue_hook);
 #endif
        return 0;
 
@@ -3362,8 +3408,8 @@ static void __exit ctnetlink_exit(void)
        unregister_pernet_subsys(&ctnetlink_net_ops);
        nfnetlink_subsys_unregister(&ctnl_exp_subsys);
        nfnetlink_subsys_unregister(&ctnl_subsys);
-#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
-       RCU_INIT_POINTER(nfq_ct_hook, NULL);
+#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
+       RCU_INIT_POINTER(nfnl_ct_hook, NULL);
 #endif
 }
 
index 34f628e16a4cf7e460342311cbdbf579199c158b..5baa8e24e6ac1b512250c676cf0caf115f0b17f9 100644 (file)
@@ -69,19 +69,14 @@ void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
                        dev_put(physdev);
        }
 #endif
-       /* Drop reference to owner of hook which queued us. */
-       module_put(entry->elem->owner);
 }
 EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
 
 /* Bump dev refs so they don't vanish while packet is out */
-bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
+void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
 {
        struct nf_hook_state *state = &entry->state;
 
-       if (!try_module_get(entry->elem->owner))
-               return false;
-
        if (state->in)
                dev_hold(state->in);
        if (state->out)
@@ -100,8 +95,6 @@ bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
                        dev_hold(physdev);
        }
 #endif
-
-       return true;
 }
 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
 
@@ -131,22 +124,20 @@ int nf_queue(struct sk_buff *skb,
        const struct nf_queue_handler *qh;
 
        /* QUEUE == DROP if no one is waiting, to be safe. */
-       rcu_read_lock();
-
        qh = rcu_dereference(queue_handler);
        if (!qh) {
                status = -ESRCH;
-               goto err_unlock;
+               goto err;
        }
 
        afinfo = nf_get_afinfo(state->pf);
        if (!afinfo)
-               goto err_unlock;
+               goto err;
 
        entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
        if (!entry) {
                status = -ENOMEM;
-               goto err_unlock;
+               goto err;
        }
 
        *entry = (struct nf_queue_entry) {
@@ -156,16 +147,11 @@ int nf_queue(struct sk_buff *skb,
                .size   = sizeof(*entry) + afinfo->route_key_size,
        };
 
-       if (!nf_queue_entry_get_refs(entry)) {
-               status = -ECANCELED;
-               goto err_unlock;
-       }
+       nf_queue_entry_get_refs(entry);
        skb_dst_force(skb);
        afinfo->saveroute(skb, entry);
        status = qh->outfn(entry, queuenum);
 
-       rcu_read_unlock();
-
        if (status < 0) {
                nf_queue_entry_release_refs(entry);
                goto err;
@@ -173,8 +159,6 @@ int nf_queue(struct sk_buff *skb,
 
        return 0;
 
-err_unlock:
-       rcu_read_unlock();
 err:
        kfree(entry);
        return status;
@@ -187,15 +171,11 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
        const struct nf_afinfo *afinfo;
        int err;
 
-       rcu_read_lock();
-
        nf_queue_entry_release_refs(entry);
 
        /* Continue traversal iff userspace said ok... */
-       if (verdict == NF_REPEAT) {
-               elem = list_entry(elem->list.prev, struct nf_hook_ops, list);
-               verdict = NF_ACCEPT;
-       }
+       if (verdict == NF_REPEAT)
+               verdict = elem->hook(elem->priv, skb, &entry->state);
 
        if (verdict == NF_ACCEPT) {
                afinfo = nf_get_afinfo(entry->state.pf);
@@ -222,8 +202,6 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
                err = nf_queue(skb, elem, &entry->state,
                               verdict >> NF_VERDICT_QBITS);
                if (err < 0) {
-                       if (err == -ECANCELED)
-                               goto next_hook;
                        if (err == -ESRCH &&
                           (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
                                goto next_hook;
@@ -235,7 +213,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
        default:
                kfree_skb(skb);
        }
-       rcu_read_unlock();
+
        kfree(entry);
 }
 EXPORT_SYMBOL(nf_reinject);
index 4a41eb92bcc0ab934f12305ad64a18c8804a52b2..93cc4737018fdf3d13a2896c19a9c65891355f9e 100644 (file)
@@ -1433,7 +1433,6 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                for (i = 0; i < afi->nops; i++) {
                        ops = &basechain->ops[i];
                        ops->pf         = family;
-                       ops->owner      = afi->owner;
                        ops->hooknum    = hooknum;
                        ops->priority   = priority;
                        ops->priv       = chain;
index 476accd171452fcfbfbe01018dcadd55fee41f67..c7a2d0e1c462cd9284ede6a0ea1b1d70db95c8c1 100644 (file)
@@ -291,6 +291,34 @@ cttimeout_get_timeout(struct sock *ctnl, struct sk_buff *skb,
        return ret;
 }
 
+static void untimeout(struct nf_conntrack_tuple_hash *i,
+                     struct ctnl_timeout *timeout)
+{
+       struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i);
+       struct nf_conn_timeout *timeout_ext = nf_ct_timeout_find(ct);
+
+       if (timeout_ext && (!timeout || timeout_ext->timeout == timeout))
+               RCU_INIT_POINTER(timeout_ext->timeout, NULL);
+}
+
+static void ctnl_untimeout(struct ctnl_timeout *timeout)
+{
+       struct nf_conntrack_tuple_hash *h;
+       const struct hlist_nulls_node *nn;
+       int i;
+
+       local_bh_disable();
+       for (i = 0; i < init_net.ct.htable_size; i++) {
+               spin_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
+               if (i < init_net.ct.htable_size) {
+                       hlist_nulls_for_each_entry(h, nn, &init_net.ct.hash[i], hnnode)
+                               untimeout(h, timeout);
+               }
+               spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
+       }
+       local_bh_enable();
+}
+
 /* try to delete object, fail if it is still in use. */
 static int ctnl_timeout_try_del(struct ctnl_timeout *timeout)
 {
@@ -301,6 +329,7 @@ static int ctnl_timeout_try_del(struct ctnl_timeout *timeout)
                /* We are protected by nfnl mutex. */
                list_del_rcu(&timeout->head);
                nf_ct_l4proto_put(timeout->l4proto);
+               ctnl_untimeout(timeout);
                kfree_rcu(timeout, rcu_head);
        } else {
                /* still in use, restore reference counter. */
@@ -567,6 +596,10 @@ static void __exit cttimeout_exit(void)
        pr_info("cttimeout: unregistering from nfnetlink.\n");
 
        nfnetlink_subsys_unregister(&cttimeout_subsys);
+
+       /* Make sure no conntrack objects refer to custom timeouts anymore. */
+       ctnl_untimeout(NULL);
+
        list_for_each_entry_safe(cur, tmp, &cttimeout_list, head) {
                list_del_rcu(&cur->head);
                /* We are sure that our objects have no clients at this point,
@@ -579,6 +612,7 @@ static void __exit cttimeout_exit(void)
        RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
        RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
+       rcu_barrier();
 }
 
 module_init(cttimeout_init);
index cc2300f4e177136c96763a06c90e19614e79e2f7..06eb48fceb42e4c31dba2d91c0d182bfc82e5b19 100644 (file)
@@ -27,6 +27,7 @@
 #include <net/netlink.h>
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_log.h>
+#include <linux/netfilter/nf_conntrack_common.h>
 #include <linux/spinlock.h>
 #include <linux/sysctl.h>
 #include <linux/proc_fs.h>
@@ -401,7 +402,9 @@ __build_packet_message(struct nfnl_log_net *log,
                        unsigned int hooknum,
                        const struct net_device *indev,
                        const struct net_device *outdev,
-                       const char *prefix, unsigned int plen)
+                       const char *prefix, unsigned int plen,
+                       const struct nfnl_ct_hook *nfnl_ct,
+                       struct nf_conn *ct, enum ip_conntrack_info ctinfo)
 {
        struct nfulnl_msg_packet_hdr pmsg;
        struct nlmsghdr *nlh;
@@ -575,6 +578,10 @@ __build_packet_message(struct nfnl_log_net *log,
                         htonl(atomic_inc_return(&log->global_seq))))
                goto nla_put_failure;
 
+       if (ct && nfnl_ct->build(inst->skb, ct, ctinfo,
+                                NFULA_CT, NFULA_CT_INFO) < 0)
+               goto nla_put_failure;
+
        if (data_len) {
                struct nlattr *nla;
                int size = nla_attr_size(data_len);
@@ -620,12 +627,16 @@ nfulnl_log_packet(struct net *net,
                  const struct nf_loginfo *li_user,
                  const char *prefix)
 {
-       unsigned int size, data_len;
+       size_t size;
+       unsigned int data_len;
        struct nfulnl_instance *inst;
        const struct nf_loginfo *li;
        unsigned int qthreshold;
        unsigned int plen;
        struct nfnl_log_net *log = nfnl_log_pernet(net);
+       const struct nfnl_ct_hook *nfnl_ct = NULL;
+       struct nf_conn *ct = NULL;
+       enum ip_conntrack_info uninitialized_var(ctinfo);
 
        if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
                li = li_user;
@@ -671,6 +682,14 @@ nfulnl_log_packet(struct net *net,
                size += nla_total_size(sizeof(u_int32_t));
        if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
                size += nla_total_size(sizeof(u_int32_t));
+       if (inst->flags & NFULNL_CFG_F_CONNTRACK) {
+               nfnl_ct = rcu_dereference(nfnl_ct_hook);
+               if (nfnl_ct != NULL) {
+                       ct = nfnl_ct->get_ct(skb, &ctinfo);
+                       if (ct != NULL)
+                               size += nfnl_ct->build_size(ct);
+               }
+       }
 
        qthreshold = inst->qthreshold;
        /* per-rule qthreshold overrides per-instance */
@@ -715,7 +734,8 @@ nfulnl_log_packet(struct net *net,
        inst->qlen++;
 
        __build_packet_message(log, inst, skb, data_len, pf,
-                               hooknum, in, out, prefix, plen);
+                               hooknum, in, out, prefix, plen,
+                               nfnl_ct, ct, ctinfo);
 
        if (inst->qlen >= qthreshold)
                __nfulnl_flush(inst);
@@ -805,6 +825,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
        struct net *net = sock_net(ctnl);
        struct nfnl_log_net *log = nfnl_log_pernet(net);
        int ret = 0;
+       u16 flags;
 
        if (nfula[NFULA_CFG_CMD]) {
                u_int8_t pf = nfmsg->nfgen_family;
@@ -826,6 +847,28 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
                goto out_put;
        }
 
+       /* Check if we support these flags in first place, dependencies should
+        * be there too not to break atomicity.
+        */
+       if (nfula[NFULA_CFG_FLAGS]) {
+               flags = ntohs(nla_get_be16(nfula[NFULA_CFG_FLAGS]));
+
+               if ((flags & NFULNL_CFG_F_CONNTRACK) &&
+                   !rcu_access_pointer(nfnl_ct_hook)) {
+#ifdef CONFIG_MODULES
+                       nfnl_unlock(NFNL_SUBSYS_ULOG);
+                       request_module("ip_conntrack_netlink");
+                       nfnl_lock(NFNL_SUBSYS_ULOG);
+                       if (rcu_access_pointer(nfnl_ct_hook)) {
+                               ret = -EAGAIN;
+                               goto out_put;
+                       }
+#endif
+                       ret = -EOPNOTSUPP;
+                       goto out_put;
+               }
+       }
+
        if (cmd != NULL) {
                switch (cmd->command) {
                case NFULNL_CFG_CMD_BIND:
@@ -854,16 +897,15 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
                        ret = -ENOTSUPP;
                        break;
                }
+       } else if (!inst) {
+               ret = -ENODEV;
+               goto out;
        }
 
        if (nfula[NFULA_CFG_MODE]) {
-               struct nfulnl_msg_config_mode *params;
-               params = nla_data(nfula[NFULA_CFG_MODE]);
+               struct nfulnl_msg_config_mode *params =
+                       nla_data(nfula[NFULA_CFG_MODE]);
 
-               if (!inst) {
-                       ret = -ENODEV;
-                       goto out;
-               }
                nfulnl_set_mode(inst, params->copy_mode,
                                ntohl(params->copy_range));
        }
@@ -871,42 +913,23 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
        if (nfula[NFULA_CFG_TIMEOUT]) {
                __be32 timeout = nla_get_be32(nfula[NFULA_CFG_TIMEOUT]);
 
-               if (!inst) {
-                       ret = -ENODEV;
-                       goto out;
-               }
                nfulnl_set_timeout(inst, ntohl(timeout));
        }
 
        if (nfula[NFULA_CFG_NLBUFSIZ]) {
                __be32 nlbufsiz = nla_get_be32(nfula[NFULA_CFG_NLBUFSIZ]);
 
-               if (!inst) {
-                       ret = -ENODEV;
-                       goto out;
-               }
                nfulnl_set_nlbufsiz(inst, ntohl(nlbufsiz));
        }
 
        if (nfula[NFULA_CFG_QTHRESH]) {
                __be32 qthresh = nla_get_be32(nfula[NFULA_CFG_QTHRESH]);
 
-               if (!inst) {
-                       ret = -ENODEV;
-                       goto out;
-               }
                nfulnl_set_qthresh(inst, ntohl(qthresh));
        }
 
-       if (nfula[NFULA_CFG_FLAGS]) {
-               __be16 flags = nla_get_be16(nfula[NFULA_CFG_FLAGS]);
-
-               if (!inst) {
-                       ret = -ENODEV;
-                       goto out;
-               }
-               nfulnl_set_flags(inst, ntohs(flags));
-       }
+       if (nfula[NFULA_CFG_FLAGS])
+               nfulnl_set_flags(inst, flags);
 
 out_put:
        instance_put(inst);
similarity index 95%
rename from net/netfilter/nfnetlink_queue_core.c
rename to net/netfilter/nfnetlink_queue.c
index 41583e30051b823bd401b23392875f7fd8079d45..7d81d280cb4ff3e8878af96bd644930f68a4340c 100644 (file)
 #include <linux/netfilter_bridge.h>
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_queue.h>
+#include <linux/netfilter/nf_conntrack_common.h>
 #include <linux/list.h>
 #include <net/sock.h>
 #include <net/tcp_states.h>
 #include <net/netfilter/nf_queue.h>
 #include <net/netns/generic.h>
-#include <net/netfilter/nfnetlink_queue.h>
 
 #include <linux/atomic.h>
 
@@ -313,6 +313,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
        struct net_device *outdev;
        struct nf_conn *ct = NULL;
        enum ip_conntrack_info uninitialized_var(ctinfo);
+       struct nfnl_ct_hook *nfnl_ct;
        bool csum_verify;
        char *secdata = NULL;
        u32 seclen = 0;
@@ -364,8 +365,14 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
                break;
        }
 
-       if (queue->flags & NFQA_CFG_F_CONNTRACK)
-               ct = nfqnl_ct_get(entskb, &size, &ctinfo);
+       if (queue->flags & NFQA_CFG_F_CONNTRACK) {
+               nfnl_ct = rcu_dereference(nfnl_ct_hook);
+               if (nfnl_ct != NULL) {
+                       ct = nfnl_ct->get_ct(entskb, &ctinfo);
+                       if (ct != NULL)
+                               size += nfnl_ct->build_size(ct);
+               }
+       }
 
        if (queue->flags & NFQA_CFG_F_UID_GID) {
                size +=  (nla_total_size(sizeof(u_int32_t))     /* uid */
@@ -493,9 +500,10 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
 
        if (entskb->tstamp.tv64) {
                struct nfqnl_msg_packet_timestamp ts;
-               struct timeval tv = ktime_to_timeval(entskb->tstamp);
-               ts.sec = cpu_to_be64(tv.tv_sec);
-               ts.usec = cpu_to_be64(tv.tv_usec);
+               struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
+
+               ts.sec = cpu_to_be64(kts.tv_sec);
+               ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
 
                if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
                        goto nla_put_failure;
@@ -508,7 +516,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
        if (seclen && nla_put(skb, NFQA_SECCTX, seclen, secdata))
                goto nla_put_failure;
 
-       if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
+       if (ct && nfnl_ct->build(skb, ct, ctinfo, NFQA_CT, NFQA_CT_INFO) < 0)
                goto nla_put_failure;
 
        if (cap_len > data_len &&
@@ -598,12 +606,9 @@ static struct nf_queue_entry *
 nf_queue_entry_dup(struct nf_queue_entry *e)
 {
        struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
-       if (entry) {
-               if (nf_queue_entry_get_refs(entry))
-                       return entry;
-               kfree(entry);
-       }
-       return NULL;
+       if (entry)
+               nf_queue_entry_get_refs(entry);
+       return entry;
 }
 
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
@@ -698,7 +703,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
        nf_bridge_adjust_skb_data(skb);
        segs = skb_gso_segment(skb, 0);
        /* Does not use PTR_ERR to limit the number of error codes that can be
-        * returned by nf_queue.  For instance, callers rely on -ECANCELED to
+        * returned by nf_queue.  For instance, callers rely on -ESRCH to
         * mean 'ignore this hook'.
         */
        if (IS_ERR_OR_NULL(segs))
@@ -1001,6 +1006,28 @@ nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb,
        return 0;
 }
 
+static struct nf_conn *nfqnl_ct_parse(struct nfnl_ct_hook *nfnl_ct,
+                                     const struct nlmsghdr *nlh,
+                                     const struct nlattr * const nfqa[],
+                                     struct nf_queue_entry *entry,
+                                     enum ip_conntrack_info *ctinfo)
+{
+       struct nf_conn *ct;
+
+       ct = nfnl_ct->get_ct(entry->skb, ctinfo);
+       if (ct == NULL)
+               return NULL;
+
+       if (nfnl_ct->parse(nfqa[NFQA_CT], ct) < 0)
+               return NULL;
+
+       if (nfqa[NFQA_EXP])
+               nfnl_ct->attach_expect(nfqa[NFQA_EXP], ct,
+                                     NETLINK_CB(entry->skb).portid,
+                                     nlmsg_report(nlh));
+       return ct;
+}
+
 static int
 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
                   const struct nlmsghdr *nlh,
@@ -1014,6 +1041,7 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
        unsigned int verdict;
        struct nf_queue_entry *entry;
        enum ip_conntrack_info uninitialized_var(ctinfo);
+       struct nfnl_ct_hook *nfnl_ct;
        struct nf_conn *ct = NULL;
 
        struct net *net = sock_net(ctnl);
@@ -1037,12 +1065,10 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
                return -ENOENT;
 
        if (nfqa[NFQA_CT]) {
-               ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
-               if (ct && nfqa[NFQA_EXP]) {
-                       nfqnl_attach_expect(ct, nfqa[NFQA_EXP],
-                                           NETLINK_CB(skb).portid,
-                                           nlmsg_report(nlh));
-               }
+               /* rcu lock already held from nfnl->call_rcu. */
+               nfnl_ct = rcu_dereference(nfnl_ct_hook);
+               if (nfnl_ct != NULL)
+                       ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo);
        }
 
        if (nfqa[NFQA_PAYLOAD]) {
@@ -1053,8 +1079,8 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
                                 payload_len, entry, diff) < 0)
                        verdict = NF_DROP;
 
-               if (ct)
-                       nfqnl_ct_seq_adjust(entry->skb, ct, ctinfo, diff);
+               if (ct && diff)
+                       nfnl_ct->seq_adjust(entry->skb, ct, ctinfo, diff);
        }
 
        if (nfqa[NFQA_MARK])
diff --git a/net/netfilter/nfnetlink_queue_ct.c b/net/netfilter/nfnetlink_queue_ct.c
deleted file mode 100644 (file)
index 96cac50..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/skbuff.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter/nfnetlink.h>
-#include <linux/netfilter/nfnetlink_queue.h>
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nfnetlink_queue.h>
-
-struct nf_conn *nfqnl_ct_get(struct sk_buff *entskb, size_t *size,
-                            enum ip_conntrack_info *ctinfo)
-{
-       struct nfq_ct_hook *nfq_ct;
-       struct nf_conn *ct;
-
-       /* rcu_read_lock()ed by __nf_queue already. */
-       nfq_ct = rcu_dereference(nfq_ct_hook);
-       if (nfq_ct == NULL)
-               return NULL;
-
-       ct = nf_ct_get(entskb, ctinfo);
-       if (ct) {
-               if (!nf_ct_is_untracked(ct))
-                       *size += nfq_ct->build_size(ct);
-               else
-                       ct = NULL;
-       }
-       return ct;
-}
-
-struct nf_conn *
-nfqnl_ct_parse(const struct sk_buff *skb, const struct nlattr *attr,
-              enum ip_conntrack_info *ctinfo)
-{
-       struct nfq_ct_hook *nfq_ct;
-       struct nf_conn *ct;
-
-       /* rcu_read_lock()ed by __nf_queue already. */
-       nfq_ct = rcu_dereference(nfq_ct_hook);
-       if (nfq_ct == NULL)
-               return NULL;
-
-       ct = nf_ct_get(skb, ctinfo);
-       if (ct && !nf_ct_is_untracked(ct))
-               nfq_ct->parse(attr, ct);
-
-       return ct;
-}
-
-int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct,
-                enum ip_conntrack_info ctinfo)
-{
-       struct nfq_ct_hook *nfq_ct;
-       struct nlattr *nest_parms;
-       u_int32_t tmp;
-
-       nfq_ct = rcu_dereference(nfq_ct_hook);
-       if (nfq_ct == NULL)
-               return 0;
-
-       nest_parms = nla_nest_start(skb, NFQA_CT | NLA_F_NESTED);
-       if (!nest_parms)
-               goto nla_put_failure;
-
-       if (nfq_ct->build(skb, ct) < 0)
-               goto nla_put_failure;
-
-       nla_nest_end(skb, nest_parms);
-
-       tmp = ctinfo;
-       if (nla_put_be32(skb, NFQA_CT_INFO, htonl(tmp)))
-               goto nla_put_failure;
-
-       return 0;
-
-nla_put_failure:
-       return -1;
-}
-
-void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
-                        enum ip_conntrack_info ctinfo, int diff)
-{
-       struct nfq_ct_hook *nfq_ct;
-
-       nfq_ct = rcu_dereference(nfq_ct_hook);
-       if (nfq_ct == NULL)
-               return;
-
-       if ((ct->status & IPS_NAT_MASK) && diff)
-               nfq_ct->seq_adjust(skb, ct, ctinfo, diff);
-}
-
-int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
-                       u32 portid, u32 report)
-{
-       struct nfq_ct_hook *nfq_ct;
-
-       if (nf_ct_is_untracked(ct))
-               return 0;
-
-       nfq_ct = rcu_dereference(nfq_ct_hook);
-       if (nfq_ct == NULL)
-               return -EOPNOTSUPP;
-
-       return nfq_ct->attach_expect(attr, ct, portid, report);
-}
index 9b42b5ea6dcd68c8398c501aa5af81b6dfa83ae8..d4aaad747ea99b0aa877016f437a5fd46ec768ee 100644 (file)
@@ -1193,7 +1193,6 @@ struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
                if (!(hook_mask & 1))
                        continue;
                ops[i].hook     = fn;
-               ops[i].owner    = table->me;
                ops[i].pf       = table->af;
                ops[i].hooknum  = hooknum;
                ops[i].priority = table->priority;
index faf32d888198a72a50c293312c014bcb63747654..e7ac07e53b5925d334d9e33fc8916768c3a3c1d4 100644 (file)
@@ -171,6 +171,9 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
        if (timeout_ext == NULL)
                ret = -ENOMEM;
 
+       rcu_read_unlock();
+       return ret;
+
 err_put_timeout:
        __xt_ct_tg_timeout_put(timeout);
 out:
@@ -318,8 +321,10 @@ static void xt_ct_destroy_timeout(struct nf_conn *ct)
 
        if (timeout_put) {
                timeout_ext = nf_ct_timeout_find(ct);
-               if (timeout_ext)
+               if (timeout_ext) {
                        timeout_put(timeout_ext->timeout);
+                       RCU_INIT_POINTER(timeout_ext->timeout, NULL);
+               }
        }
        rcu_read_unlock();
 #endif
index 8f060d7f9a0e107a410d3ffe71722f49059f7bc8..fafe33bdb61989e680dc4b26dbe99dcc1d4064b5 100644 (file)
@@ -2371,7 +2371,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
                int pos, idx, shift;
 
                err = 0;
-               netlink_table_grab();
+               netlink_lock_table();
                for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
                        if (len - pos < sizeof(u32))
                                break;
@@ -2386,7 +2386,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
                }
                if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
                        err = -EFAULT;
-               netlink_table_ungrab();
+               netlink_unlock_table();
                break;
        }
        case NETLINK_CAP_ACK:
@@ -2785,6 +2785,7 @@ static int netlink_dump(struct sock *sk)
        struct sk_buff *skb = NULL;
        struct nlmsghdr *nlh;
        int len, err = -ENOBUFS;
+       int alloc_min_size;
        int alloc_size;
 
        mutex_lock(nlk->cb_mutex);
@@ -2793,9 +2794,6 @@ static int netlink_dump(struct sock *sk)
                goto errout_skb;
        }
 
-       cb = &nlk->cb;
-       alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
-
        if (!netlink_rx_is_mmaped(sk) &&
            atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                goto errout_skb;
@@ -2805,23 +2803,35 @@ static int netlink_dump(struct sock *sk)
         * to reduce number of system calls on dump operations, if user
         * ever provided a big enough buffer.
         */
-       if (alloc_size < nlk->max_recvmsg_len) {
-               skb = netlink_alloc_skb(sk,
-                                       nlk->max_recvmsg_len,
-                                       nlk->portid,
+       cb = &nlk->cb;
+       alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
+
+       if (alloc_min_size < nlk->max_recvmsg_len) {
+               alloc_size = nlk->max_recvmsg_len;
+               skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
                                        GFP_KERNEL |
                                        __GFP_NOWARN |
                                        __GFP_NORETRY);
-               /* available room should be exact amount to avoid MSG_TRUNC */
-               if (skb)
-                       skb_reserve(skb, skb_tailroom(skb) -
-                                        nlk->max_recvmsg_len);
        }
-       if (!skb)
+       if (!skb) {
+               alloc_size = alloc_min_size;
                skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
                                        GFP_KERNEL);
+       }
        if (!skb)
                goto errout_skb;
+
+       /* Trim skb to allocated size. User is expected to provide buffer as
+        * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
+        * netlink_recvmsg())). dump will pack as many smaller messages as
+        * could fit within the allocated skb. skb is typically allocated
+        * with larger space than required (could be as much as near 2x the
+        * requested size with align to next power of 2 approach). Allowing
+        * dump to use the excess space makes it difficult for a user to have a
+        * reasonable static buffer based on the expected largest dump of a
+        * single netdev. The outcome is MSG_TRUNC error.
+        */
+       skb_reserve(skb, skb_tailroom(skb) - alloc_size);
        netlink_skb_set_owner_r(skb, sk);
 
        len = cb->dump(skb, cb);
index 1d21ab9d2b5c0fc2d9996859687fff87bce12a6b..221fa8b37a473ea3dd2a5f2987f9b4233ba6987d 100644 (file)
@@ -684,7 +684,7 @@ static void ovs_fragment(struct net *net, struct vport *vport,
 {
        if (skb_network_offset(skb) > MAX_L2_LEN) {
                OVS_NLERR(1, "L2 header too long to fragment");
-               return;
+               goto err;
        }
 
        if (ethertype == htons(ETH_P_IP)) {
@@ -708,8 +708,7 @@ static void ovs_fragment(struct net *net, struct vport *vport,
                struct rt6_info ovs_rt;
 
                if (!v6ops) {
-                       kfree_skb(skb);
-                       return;
+                       goto err;
                }
 
                prepare_frag(vport, skb);
@@ -728,8 +727,12 @@ static void ovs_fragment(struct net *net, struct vport *vport,
                WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
                          ovs_vport_name(vport), ntohs(ethertype), mru,
                          vport->dev->mtu);
-               kfree_skb(skb);
+               goto err;
        }
+
+       return;
+err:
+       kfree_skb(skb);
 }
 
 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
@@ -766,7 +769,6 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
                            struct sw_flow_key *key, const struct nlattr *attr,
                            const struct nlattr *actions, int actions_len)
 {
-       struct ip_tunnel_info info;
        struct dp_upcall_info upcall;
        const struct nlattr *a;
        int rem;
@@ -794,11 +796,9 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
                        if (vport) {
                                int err;
 
-                               upcall.egress_tun_info = &info;
-                               err = ovs_vport_get_egress_tun_info(vport, skb,
-                                                                   &upcall);
-                               if (err)
-                                       upcall.egress_tun_info = NULL;
+                               err = dev_fill_metadata_dst(vport->dev, skb);
+                               if (!err)
+                                       upcall.egress_tun_info = skb_tunnel_info(skb);
                        }
 
                        break;
@@ -969,7 +969,7 @@ static int execute_masked_set_action(struct sk_buff *skb,
        case OVS_KEY_ATTR_CT_STATE:
        case OVS_KEY_ATTR_CT_ZONE:
        case OVS_KEY_ATTR_CT_MARK:
-       case OVS_KEY_ATTR_CT_LABEL:
+       case OVS_KEY_ATTR_CT_LABELS:
                err = -EINVAL;
                break;
        }
@@ -1100,6 +1100,12 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
                        break;
 
                case OVS_ACTION_ATTR_CT:
+                       if (!is_flow_key_valid(key)) {
+                               err = ovs_flow_key_update(skb, key);
+                               if (err)
+                                       return err;
+                       }
+
                        err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
                                             nla_data(a));
 
index ad614267cc2a620249e18e4ef6bec35c7fa19f8f..bd165ee2bb1633d96db7d4fd45180f336a5c20f5 100644 (file)
@@ -37,9 +37,9 @@ struct md_mark {
 };
 
 /* Metadata label for masked write to conntrack label. */
-struct md_label {
-       struct ovs_key_ct_label value;
-       struct ovs_key_ct_label mask;
+struct md_labels {
+       struct ovs_key_ct_labels value;
+       struct ovs_key_ct_labels mask;
 };
 
 /* Conntrack action context for execution. */
@@ -47,10 +47,10 @@ struct ovs_conntrack_info {
        struct nf_conntrack_helper *helper;
        struct nf_conntrack_zone zone;
        struct nf_conn *ct;
-       u32 flags;
+       u8 commit : 1;
        u16 family;
        struct md_mark mark;
-       struct md_label label;
+       struct md_labels labels;
 };
 
 static u16 key_to_nfproto(const struct sw_flow_key *key)
@@ -109,21 +109,21 @@ static u32 ovs_ct_get_mark(const struct nf_conn *ct)
 #endif
 }
 
-static void ovs_ct_get_label(const struct nf_conn *ct,
-                            struct ovs_key_ct_label *label)
+static void ovs_ct_get_labels(const struct nf_conn *ct,
+                             struct ovs_key_ct_labels *labels)
 {
        struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
 
        if (cl) {
                size_t len = cl->words * sizeof(long);
 
-               if (len > OVS_CT_LABEL_LEN)
-                       len = OVS_CT_LABEL_LEN;
-               else if (len < OVS_CT_LABEL_LEN)
-                       memset(label, 0, OVS_CT_LABEL_LEN);
-               memcpy(label, cl->bits, len);
+               if (len > OVS_CT_LABELS_LEN)
+                       len = OVS_CT_LABELS_LEN;
+               else if (len < OVS_CT_LABELS_LEN)
+                       memset(labels, 0, OVS_CT_LABELS_LEN);
+               memcpy(labels, cl->bits, len);
        } else {
-               memset(label, 0, OVS_CT_LABEL_LEN);
+               memset(labels, 0, OVS_CT_LABELS_LEN);
        }
 }
 
@@ -134,7 +134,7 @@ static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state,
        key->ct.state = state;
        key->ct.zone = zone->id;
        key->ct.mark = ovs_ct_get_mark(ct);
-       ovs_ct_get_label(ct, &key->ct.label);
+       ovs_ct_get_labels(ct, &key->ct.labels);
 }
 
 /* Update 'key' based on skb->nfct. If 'post_ct' is true, then OVS has
@@ -151,6 +151,8 @@ static void ovs_ct_update_key(const struct sk_buff *skb,
        ct = nf_ct_get(skb, &ctinfo);
        if (ct) {
                state = ovs_ct_get_state(ctinfo);
+               if (!nf_ct_is_confirmed(ct))
+                       state |= OVS_CS_F_NEW;
                if (ct->master)
                        state |= OVS_CS_F_RELATED;
                zone = nf_ct_zone(ct);
@@ -167,7 +169,7 @@ void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key)
 
 int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb)
 {
-       if (nla_put_u8(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state))
+       if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state))
                return -EMSGSIZE;
 
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
@@ -179,8 +181,8 @@ int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb)
                return -EMSGSIZE;
 
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
-           nla_put(skb, OVS_KEY_ATTR_CT_LABEL, sizeof(key->ct.label),
-                   &key->ct.label))
+           nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(key->ct.labels),
+                   &key->ct.labels))
                return -EMSGSIZE;
 
        return 0;
@@ -213,18 +215,15 @@ static int ovs_ct_set_mark(struct sk_buff *skb, struct sw_flow_key *key,
 #endif
 }
 
-static int ovs_ct_set_label(struct sk_buff *skb, struct sw_flow_key *key,
-                           const struct ovs_key_ct_label *label,
-                           const struct ovs_key_ct_label *mask)
+static int ovs_ct_set_labels(struct sk_buff *skb, struct sw_flow_key *key,
+                            const struct ovs_key_ct_labels *labels,
+                            const struct ovs_key_ct_labels *mask)
 {
        enum ip_conntrack_info ctinfo;
        struct nf_conn_labels *cl;
        struct nf_conn *ct;
        int err;
 
-       if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS))
-               return -ENOTSUPP;
-
        /* The connection could be invalid, in which case set_label is no-op.*/
        ct = nf_ct_get(skb, &ctinfo);
        if (!ct)
@@ -235,15 +234,15 @@ static int ovs_ct_set_label(struct sk_buff *skb, struct sw_flow_key *key,
                nf_ct_labels_ext_add(ct);
                cl = nf_ct_labels_find(ct);
        }
-       if (!cl || cl->words * sizeof(long) < OVS_CT_LABEL_LEN)
+       if (!cl || cl->words * sizeof(long) < OVS_CT_LABELS_LEN)
                return -ENOSPC;
 
-       err = nf_connlabels_replace(ct, (u32 *)label, (u32 *)mask,
-                                   OVS_CT_LABEL_LEN / sizeof(u32));
+       err = nf_connlabels_replace(ct, (u32 *)labels, (u32 *)mask,
+                                   OVS_CT_LABELS_LEN / sizeof(u32));
        if (err)
                return err;
 
-       ovs_ct_get_label(ct, &key->ct.label);
+       ovs_ct_get_labels(ct, &key->ct.labels);
        return 0;
 }
 
@@ -377,7 +376,7 @@ static bool skb_nfct_cached(const struct net *net, const struct sk_buff *skb,
        return true;
 }
 
-static int __ovs_ct_lookup(struct net *net, const struct sw_flow_key *key,
+static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
                           const struct ovs_conntrack_info *info,
                           struct sk_buff *skb)
 {
@@ -408,6 +407,8 @@ static int __ovs_ct_lookup(struct net *net, const struct sw_flow_key *key,
                }
        }
 
+       ovs_ct_update_key(skb, key, true);
+
        return 0;
 }
 
@@ -430,8 +431,6 @@ static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
                err = __ovs_ct_lookup(net, key, info, skb);
                if (err)
                        return err;
-
-               ovs_ct_update_key(skb, key, true);
        }
 
        return 0;
@@ -460,17 +459,15 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
        if (nf_conntrack_confirm(skb) != NF_ACCEPT)
                return -EINVAL;
 
-       ovs_ct_update_key(skb, key, true);
-
        return 0;
 }
 
-static bool label_nonzero(const struct ovs_key_ct_label *label)
+static bool labels_nonzero(const struct ovs_key_ct_labels *labels)
 {
        size_t i;
 
-       for (i = 0; i < sizeof(*label); i++)
-               if (label->ct_label[i])
+       for (i = 0; i < sizeof(*labels); i++)
+               if (labels->ct_labels[i])
                        return true;
 
        return false;
@@ -493,7 +490,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
                        return err;
        }
 
-       if (info->flags & OVS_CT_F_COMMIT)
+       if (info->commit)
                err = ovs_ct_commit(net, key, info, skb);
        else
                err = ovs_ct_lookup(net, key, info, skb);
@@ -506,9 +503,9 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
                if (err)
                        goto err;
        }
-       if (label_nonzero(&info->label.mask))
-               err = ovs_ct_set_label(skb, key, &info->label.value,
-                                      &info->label.mask);
+       if (labels_nonzero(&info->labels.mask))
+               err = ovs_ct_set_labels(skb, key, &info->labels.value,
+                                       &info->labels.mask);
 err:
        skb_push(skb, nh_ofs);
        return err;
@@ -539,14 +536,13 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
 }
 
 static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = {
-       [OVS_CT_ATTR_FLAGS]     = { .minlen = sizeof(u32),
-                                   .maxlen = sizeof(u32) },
+       [OVS_CT_ATTR_COMMIT]    = { .minlen = 0, .maxlen = 0 },
        [OVS_CT_ATTR_ZONE]      = { .minlen = sizeof(u16),
                                    .maxlen = sizeof(u16) },
        [OVS_CT_ATTR_MARK]      = { .minlen = sizeof(struct md_mark),
                                    .maxlen = sizeof(struct md_mark) },
-       [OVS_CT_ATTR_LABEL]     = { .minlen = sizeof(struct md_label),
-                                   .maxlen = sizeof(struct md_label) },
+       [OVS_CT_ATTR_LABELS]    = { .minlen = sizeof(struct md_labels),
+                                   .maxlen = sizeof(struct md_labels) },
        [OVS_CT_ATTR_HELPER]    = { .minlen = 1,
                                    .maxlen = NF_CT_HELPER_NAME_LEN }
 };
@@ -576,8 +572,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
                }
 
                switch (type) {
-               case OVS_CT_ATTR_FLAGS:
-                       info->flags = nla_get_u32(a);
+               case OVS_CT_ATTR_COMMIT:
+                       info->commit = true;
                        break;
 #ifdef CONFIG_NF_CONNTRACK_ZONES
                case OVS_CT_ATTR_ZONE:
@@ -588,15 +584,23 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
                case OVS_CT_ATTR_MARK: {
                        struct md_mark *mark = nla_data(a);
 
+                       if (!mark->mask) {
+                               OVS_NLERR(log, "ct_mark mask cannot be 0");
+                               return -EINVAL;
+                       }
                        info->mark = *mark;
                        break;
                }
 #endif
 #ifdef CONFIG_NF_CONNTRACK_LABELS
-               case OVS_CT_ATTR_LABEL: {
-                       struct md_label *label = nla_data(a);
+               case OVS_CT_ATTR_LABELS: {
+                       struct md_labels *labels = nla_data(a);
 
-                       info->label = *label;
+                       if (!labels_nonzero(&labels->mask)) {
+                               OVS_NLERR(log, "ct_labels mask cannot be 0");
+                               return -EINVAL;
+                       }
+                       info->labels = *labels;
                        break;
                }
 #endif
@@ -633,7 +637,7 @@ bool ovs_ct_verify(struct net *net, enum ovs_key_attr attr)
            attr == OVS_KEY_ATTR_CT_MARK)
                return true;
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
-           attr == OVS_KEY_ATTR_CT_LABEL) {
+           attr == OVS_KEY_ATTR_CT_LABELS) {
                struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
 
                return ovs_net->xt_label;
@@ -701,18 +705,19 @@ int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info,
        if (!start)
                return -EMSGSIZE;
 
-       if (nla_put_u32(skb, OVS_CT_ATTR_FLAGS, ct_info->flags))
+       if (ct_info->commit && nla_put_flag(skb, OVS_CT_ATTR_COMMIT))
                return -EMSGSIZE;
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
            nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
                return -EMSGSIZE;
-       if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
+       if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && ct_info->mark.mask &&
            nla_put(skb, OVS_CT_ATTR_MARK, sizeof(ct_info->mark),
                    &ct_info->mark))
                return -EMSGSIZE;
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
-           nla_put(skb, OVS_CT_ATTR_LABEL, sizeof(ct_info->label),
-                   &ct_info->label))
+           labels_nonzero(&ct_info->labels.mask) &&
+           nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels),
+                   &ct_info->labels))
                return -EMSGSIZE;
        if (ct_info->helper) {
                if (nla_put_string(skb, OVS_CT_ATTR_HELPER,
@@ -737,7 +742,7 @@ void ovs_ct_free_action(const struct nlattr *a)
 
 void ovs_ct_init(struct net *net)
 {
-       unsigned int n_bits = sizeof(struct ovs_key_ct_label) * BITS_PER_BYTE;
+       unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE;
        struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
 
        if (nf_connlabels_get(net, n_bits)) {
index 43f5dd7a55774414aeb7aad8c0560db3e0596035..82e0dfc660280cbd24bfd61ac57c0523762b5998 100644 (file)
@@ -34,6 +34,10 @@ int ovs_ct_execute(struct net *, struct sk_buff *, struct sw_flow_key *,
 void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key);
 int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb);
 void ovs_ct_free_action(const struct nlattr *a);
+
+#define CT_SUPPORTED_MASK (OVS_CS_F_NEW | OVS_CS_F_ESTABLISHED | \
+                          OVS_CS_F_RELATED | OVS_CS_F_REPLY_DIR | \
+                          OVS_CS_F_INVALID | OVS_CS_F_TRACKED)
 #else
 #include <linux/errno.h>
 
@@ -72,7 +76,7 @@ static inline void ovs_ct_fill_key(const struct sk_buff *skb,
        key->ct.state = 0;
        key->ct.zone = 0;
        key->ct.mark = 0;
-       memset(&key->ct.label, 0, sizeof(key->ct.label));
+       memset(&key->ct.labels, 0, sizeof(key->ct.labels));
 }
 
 static inline int ovs_ct_put_key(const struct sw_flow_key *key,
@@ -82,5 +86,7 @@ static inline int ovs_ct_put_key(const struct sw_flow_key *key,
 }
 
 static inline void ovs_ct_free_action(const struct nlattr *a) { }
+
+#define CT_SUPPORTED_MASK 0
 #endif /* CONFIG_NF_CONNTRACK */
 #endif /* ovs_conntrack.h */
index a75828091e21fc477142d78d25accbd9a7cb5563..5633172b791ab98e297ba1605c34bf42e684ccfb 100644 (file)
@@ -489,9 +489,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
 
        if (upcall_info->egress_tun_info) {
                nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
-               err = ovs_nla_put_egress_tunnel_key(user_skb,
-                                                   upcall_info->egress_tun_info,
-                                                   upcall_info->egress_tun_opts);
+               err = ovs_nla_put_tunnel_info(user_skb,
+                                             upcall_info->egress_tun_info);
                BUG_ON(err);
                nla_nest_end(user_skb, nla);
        }
index f88038a99f4442bb753b4fc0cb3cc6c05c2bc2fc..67bdecd9fdc1f2b0544c2081aa1a557b16e0063a 100644 (file)
@@ -117,7 +117,6 @@ struct ovs_skb_cb {
  */
 struct dp_upcall_info {
        struct ip_tunnel_info *egress_tun_info;
-       const void *egress_tun_opts;
        const struct nlattr *userdata;
        const struct nlattr *actions;
        int actions_len;
index 5688e33e2de6192c414f7a1c0686a63941f5076b..1d055c559eafb118043ed21c66916d7313c41d8d 100644 (file)
@@ -117,7 +117,7 @@ struct sw_flow_key {
                u16 zone;
                u32 mark;
                u8 state;
-               struct ovs_key_ct_label label;
+               struct ovs_key_ct_labels labels;
        } ct;
 
 } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */
index 77850f177a47c68a9c17447e773d0c29c6d54b4e..907d6fd28ede695cc1b876570c101883ed0b4b0e 100644 (file)
@@ -291,10 +291,10 @@ size_t ovs_key_attr_size(void)
                + nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
                + nla_total_size(4)   /* OVS_KEY_ATTR_DP_HASH */
                + nla_total_size(4)   /* OVS_KEY_ATTR_RECIRC_ID */
-               + nla_total_size(1)   /* OVS_KEY_ATTR_CT_STATE */
+               + nla_total_size(4)   /* OVS_KEY_ATTR_CT_STATE */
                + nla_total_size(2)   /* OVS_KEY_ATTR_CT_ZONE */
                + nla_total_size(4)   /* OVS_KEY_ATTR_CT_MARK */
-               + nla_total_size(16)  /* OVS_KEY_ATTR_CT_LABEL */
+               + nla_total_size(16)  /* OVS_KEY_ATTR_CT_LABELS */
                + nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
                + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
                + nla_total_size(4)   /* OVS_KEY_ATTR_VLAN */
@@ -351,10 +351,10 @@ static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
        [OVS_KEY_ATTR_TUNNEL]    = { .len = OVS_ATTR_NESTED,
                                     .next = ovs_tunnel_key_lens, },
        [OVS_KEY_ATTR_MPLS]      = { .len = sizeof(struct ovs_key_mpls) },
-       [OVS_KEY_ATTR_CT_STATE]  = { .len = sizeof(u8) },
+       [OVS_KEY_ATTR_CT_STATE]  = { .len = sizeof(u32) },
        [OVS_KEY_ATTR_CT_ZONE]   = { .len = sizeof(u16) },
        [OVS_KEY_ATTR_CT_MARK]   = { .len = sizeof(u32) },
-       [OVS_KEY_ATTR_CT_LABEL]  = { .len = sizeof(struct ovs_key_ct_label) },
+       [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) },
 };
 
 static bool check_attr_len(unsigned int attr_len, unsigned int expected_len)
@@ -548,11 +548,11 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
                              struct sw_flow_match *match, bool is_mask,
                              bool log)
 {
+       bool ttl = false, ipv4 = false, ipv6 = false;
+       __be16 tun_flags = 0;
+       int opts_type = 0;
        struct nlattr *a;
        int rem;
-       bool ttl = false;
-       __be16 tun_flags = 0, ipv4 = false, ipv6 = false;
-       int opts_type = 0;
 
        nla_for_each_nested(a, attr, rem) {
                int type = nla_type(a);
@@ -764,7 +764,7 @@ static int __ip_tun_to_nlattr(struct sk_buff *skb,
        if ((output->tun_flags & TUNNEL_OAM) &&
            nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
                return -EMSGSIZE;
-       if (tun_opts) {
+       if (swkey_tun_opts_len) {
                if (output->tun_flags & TUNNEL_GENEVE_OPT &&
                    nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
                            swkey_tun_opts_len, tun_opts))
@@ -798,14 +798,13 @@ static int ip_tun_to_nlattr(struct sk_buff *skb,
        return 0;
 }
 
-int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb,
-                                 const struct ip_tunnel_info *egress_tun_info,
-                                 const void *egress_tun_opts)
+int ovs_nla_put_tunnel_info(struct sk_buff *skb,
+                           struct ip_tunnel_info *tun_info)
 {
-       return __ip_tun_to_nlattr(skb, &egress_tun_info->key,
-                                 egress_tun_opts,
-                                 egress_tun_info->options_len,
-                                 ip_tunnel_info_af(egress_tun_info));
+       return __ip_tun_to_nlattr(skb, &tun_info->key,
+                                 ip_tunnel_info_opts(tun_info),
+                                 tun_info->options_len,
+                                 ip_tunnel_info_af(tun_info));
 }
 
 static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
@@ -864,7 +863,13 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
 
        if (*attrs & (1 << OVS_KEY_ATTR_CT_STATE) &&
            ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) {
-               u8 ct_state = nla_get_u8(a[OVS_KEY_ATTR_CT_STATE]);
+               u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]);
+
+               if (ct_state & ~CT_SUPPORTED_MASK) {
+                       OVS_NLERR(log, "ct_state flags %08x unsupported",
+                                 ct_state);
+                       return -EINVAL;
+               }
 
                SW_FLOW_KEY_PUT(match, ct.state, ct_state, is_mask);
                *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE);
@@ -883,14 +888,14 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
                SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask);
                *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_MARK);
        }
-       if (*attrs & (1 << OVS_KEY_ATTR_CT_LABEL) &&
-           ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABEL)) {
-               const struct ovs_key_ct_label *cl;
+       if (*attrs & (1 << OVS_KEY_ATTR_CT_LABELS) &&
+           ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABELS)) {
+               const struct ovs_key_ct_labels *cl;
 
-               cl = nla_data(a[OVS_KEY_ATTR_CT_LABEL]);
-               SW_FLOW_KEY_MEMCPY(match, ct.label, cl->ct_label,
+               cl = nla_data(a[OVS_KEY_ATTR_CT_LABELS]);
+               SW_FLOW_KEY_MEMCPY(match, ct.labels, cl->ct_labels,
                                   sizeof(*cl), is_mask);
-               *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABEL);
+               *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABELS);
        }
        return 0;
 }
@@ -1143,6 +1148,9 @@ static void nlattr_set(struct nlattr *attr, u8 val,
                } else {
                        memset(nla_data(nla), val, nla_len(nla));
                }
+
+               if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
+                       *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
        }
 }
 
@@ -2025,7 +2033,7 @@ static int validate_set(const struct nlattr *a,
        case OVS_KEY_ATTR_PRIORITY:
        case OVS_KEY_ATTR_SKB_MARK:
        case OVS_KEY_ATTR_CT_MARK:
-       case OVS_KEY_ATTR_CT_LABEL:
+       case OVS_KEY_ATTR_CT_LABELS:
        case OVS_KEY_ATTR_ETHERNET:
                break;
 
@@ -2426,11 +2434,7 @@ static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
                if (!start)
                        return -EMSGSIZE;
 
-               err = ip_tun_to_nlattr(skb, &tun_info->key,
-                                      tun_info->options_len ?
-                                            ip_tunnel_info_opts(tun_info) : NULL,
-                                      tun_info->options_len,
-                                      ip_tunnel_info_af(tun_info));
+               err = ovs_nla_put_tunnel_info(skb, tun_info);
                if (err)
                        return err;
                nla_nest_end(skb, start);
index 6ca3f0baf449f05f82dd92f9796cd8cfa7abf141..47dd142eca1c0856c74406e1d75682fc822861f6 100644 (file)
@@ -55,9 +55,9 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb);
 int ovs_nla_get_match(struct net *, struct sw_flow_match *,
                      const struct nlattr *key, const struct nlattr *mask,
                      bool log);
-int ovs_nla_put_egress_tunnel_key(struct sk_buff *,
-                                 const struct ip_tunnel_info *,
-                                 const void *egress_tun_opts);
+
+int ovs_nla_put_tunnel_info(struct sk_buff *skb,
+                           struct ip_tunnel_info *tun_info);
 
 bool ovs_nla_get_ufid(struct sw_flow_id *, const struct nlattr *, bool log);
 int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
index 95dbcedf0bd4422f927956b042ca469eeba56ea7..d073fff82fdb8c6c8d39b57690d37eedeb12423a 100644 (file)
@@ -93,7 +93,8 @@ struct sw_flow *ovs_flow_alloc(void)
 
        /* Initialize the default stat node. */
        stats = kmem_cache_alloc_node(flow_stats_cache,
-                                     GFP_KERNEL | __GFP_ZERO, 0);
+                                     GFP_KERNEL | __GFP_ZERO,
+                                     node_online(0) ? 0 : NUMA_NO_NODE);
        if (!stats)
                goto err;
 
index 2735e9c4a3b88586165ef5644e429cf28079974d..efb736bb685545a0cb6a323d3eca87fc54eeb9f4 100644 (file)
@@ -52,18 +52,6 @@ static int geneve_get_options(const struct vport *vport,
        return 0;
 }
 
-static int geneve_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                     struct dp_upcall_info *upcall)
-{
-       struct geneve_port *geneve_port = geneve_vport(vport);
-       struct net *net = ovs_dp_get_net(vport->dp);
-       __be16 dport = htons(geneve_port->port_no);
-       __be16 sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
-
-       return ovs_tunnel_get_egress_info(upcall, ovs_dp_get_net(vport->dp),
-                                         skb, IPPROTO_UDP, sport, dport);
-}
-
 static struct vport *geneve_tnl_create(const struct vport_parms *parms)
 {
        struct net *net = ovs_dp_get_net(parms->dp);
@@ -128,9 +116,8 @@ static struct vport_ops ovs_geneve_vport_ops = {
        .create         = geneve_create,
        .destroy        = ovs_netdev_tunnel_destroy,
        .get_options    = geneve_get_options,
-       .send           = ovs_netdev_send,
+       .send           = dev_queue_xmit,
        .owner          = THIS_MODULE,
-       .get_egress_tun_info    = geneve_get_egress_tun_info,
 };
 
 static int __init ovs_geneve_tnl_init(void)
index 4d24481669c95197b06bb75d207b3e713b433508..c3257d78d3d28e6ed06e1be9c2e4a4f452c52886 100644 (file)
@@ -84,18 +84,10 @@ static struct vport *gre_create(const struct vport_parms *parms)
        return ovs_netdev_link(vport, parms->name);
 }
 
-static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                  struct dp_upcall_info *upcall)
-{
-       return ovs_tunnel_get_egress_info(upcall, ovs_dp_get_net(vport->dp),
-                                         skb, IPPROTO_GRE, 0, 0);
-}
-
 static struct vport_ops ovs_gre_vport_ops = {
        .type           = OVS_VPORT_TYPE_GRE,
        .create         = gre_create,
-       .send           = ovs_netdev_send,
-       .get_egress_tun_info    = gre_get_egress_tun_info,
+       .send           = dev_queue_xmit,
        .destroy        = ovs_netdev_tunnel_destroy,
        .owner          = THIS_MODULE,
 };
index 388b8a6bf112979f7f7291c5bb17fd6c7027e594..ec76398a792fbb7451c53b958304a2e001704604 100644 (file)
@@ -106,12 +106,45 @@ static void internal_dev_destructor(struct net_device *dev)
        free_netdev(dev);
 }
 
+static struct rtnl_link_stats64 *
+internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+       int i;
+
+       memset(stats, 0, sizeof(*stats));
+       stats->rx_errors  = dev->stats.rx_errors;
+       stats->tx_errors  = dev->stats.tx_errors;
+       stats->tx_dropped = dev->stats.tx_dropped;
+       stats->rx_dropped = dev->stats.rx_dropped;
+
+       for_each_possible_cpu(i) {
+               const struct pcpu_sw_netstats *percpu_stats;
+               struct pcpu_sw_netstats local_stats;
+               unsigned int start;
+
+               percpu_stats = per_cpu_ptr(dev->tstats, i);
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
+                       local_stats = *percpu_stats;
+               } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
+
+               stats->rx_bytes         += local_stats.rx_bytes;
+               stats->rx_packets       += local_stats.rx_packets;
+               stats->tx_bytes         += local_stats.tx_bytes;
+               stats->tx_packets       += local_stats.tx_packets;
+       }
+
+       return stats;
+}
+
 static const struct net_device_ops internal_dev_netdev_ops = {
        .ndo_open = internal_dev_open,
        .ndo_stop = internal_dev_stop,
        .ndo_start_xmit = internal_dev_xmit,
        .ndo_set_mac_address = eth_mac_addr,
        .ndo_change_mtu = internal_dev_change_mtu,
+       .ndo_get_stats64 = internal_get_stats,
 };
 
 static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
@@ -161,6 +194,11 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
                err = -ENOMEM;
                goto error_free_vport;
        }
+       vport->dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+       if (!vport->dev->tstats) {
+               err = -ENOMEM;
+               goto error_free_netdev;
+       }
 
        dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
        internal_dev = internal_dev_priv(vport->dev);
@@ -173,7 +211,7 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
        rtnl_lock();
        err = register_netdevice(vport->dev);
        if (err)
-               goto error_free_netdev;
+               goto error_unlock;
 
        dev_set_promiscuity(vport->dev, 1);
        rtnl_unlock();
@@ -181,8 +219,10 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
 
        return vport;
 
-error_free_netdev:
+error_unlock:
        rtnl_unlock();
+       free_percpu(vport->dev->tstats);
+error_free_netdev:
        free_netdev(vport->dev);
 error_free_vport:
        ovs_vport_free(vport);
@@ -198,26 +238,25 @@ static void internal_dev_destroy(struct vport *vport)
 
        /* unregister_netdevice() waits for an RCU grace period. */
        unregister_netdevice(vport->dev);
-
+       free_percpu(vport->dev->tstats);
        rtnl_unlock();
 }
 
-static void internal_dev_recv(struct vport *vport, struct sk_buff *skb)
+static netdev_tx_t internal_dev_recv(struct sk_buff *skb)
 {
-       struct net_device *netdev = vport->dev;
+       struct net_device *netdev = skb->dev;
        struct pcpu_sw_netstats *stats;
 
        if (unlikely(!(netdev->flags & IFF_UP))) {
                kfree_skb(skb);
                netdev->stats.rx_dropped++;
-               return;
+               return NETDEV_TX_OK;
        }
 
        skb_dst_drop(skb);
        nf_reset(skb);
        secpath_reset(skb);
 
-       skb->dev = netdev;
        skb->pkt_type = PACKET_HOST;
        skb->protocol = eth_type_trans(skb, netdev);
        skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
@@ -229,6 +268,7 @@ static void internal_dev_recv(struct vport *vport, struct sk_buff *skb)
        u64_stats_update_end(&stats->syncp);
 
        netif_rx(skb);
+       return NETDEV_TX_OK;
 }
 
 static struct vport_ops ovs_internal_vport_ops = {
index f7e8dcce7adaec0e162db7f33f7b84e5e29d69cc..b327368a3848238013cf0f6f62445569d7e29251 100644 (file)
@@ -190,37 +190,6 @@ void ovs_netdev_tunnel_destroy(struct vport *vport)
 }
 EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy);
 
-static unsigned int packet_length(const struct sk_buff *skb)
-{
-       unsigned int length = skb->len - ETH_HLEN;
-
-       if (skb->protocol == htons(ETH_P_8021Q))
-               length -= VLAN_HLEN;
-
-       return length;
-}
-
-void ovs_netdev_send(struct vport *vport, struct sk_buff *skb)
-{
-       int mtu = vport->dev->mtu;
-
-       if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
-               net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
-                                    vport->dev->name,
-                                    packet_length(skb), mtu);
-               vport->dev->stats.tx_errors++;
-               goto drop;
-       }
-
-       skb->dev = vport->dev;
-       dev_queue_xmit(skb);
-       return;
-
-drop:
-       kfree_skb(skb);
-}
-EXPORT_SYMBOL_GPL(ovs_netdev_send);
-
 /* Returns null if this device is not attached to a datapath. */
 struct vport *ovs_netdev_get_vport(struct net_device *dev)
 {
@@ -235,7 +204,7 @@ static struct vport_ops ovs_netdev_vport_ops = {
        .type           = OVS_VPORT_TYPE_NETDEV,
        .create         = netdev_create,
        .destroy        = netdev_destroy,
-       .send           = ovs_netdev_send,
+       .send           = dev_queue_xmit,
 };
 
 int __init ovs_netdev_init(void)
index bf22fcedbc69c3dd67312eef70251ef26c4d0925..19e29c12adcc562ed82b27563a46608ad1b95f33 100644 (file)
@@ -27,7 +27,6 @@
 struct vport *ovs_netdev_get_vport(struct net_device *dev);
 
 struct vport *ovs_netdev_link(struct vport *vport, const char *name);
-void ovs_netdev_send(struct vport *vport, struct sk_buff *skb);
 void ovs_netdev_detach_dev(struct vport *);
 
 int __init ovs_netdev_init(void);
index fb3cdb85905d5a0e660ed135e4cc0d74169791d4..1605691d94144aee0fc50ffb17be05eca2b59675 100644 (file)
@@ -146,32 +146,12 @@ static struct vport *vxlan_create(const struct vport_parms *parms)
        return ovs_netdev_link(vport, parms->name);
 }
 
-static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                    struct dp_upcall_info *upcall)
-{
-       struct vxlan_dev *vxlan = netdev_priv(vport->dev);
-       struct net *net = ovs_dp_get_net(vport->dp);
-       unsigned short family = ip_tunnel_info_af(upcall->egress_tun_info);
-       __be16 dst_port = vxlan_dev_dst_port(vxlan, family);
-       __be16 src_port;
-       int port_min;
-       int port_max;
-
-       inet_get_local_port_range(net, &port_min, &port_max);
-       src_port = udp_flow_src_port(net, skb, 0, 0, true);
-
-       return ovs_tunnel_get_egress_info(upcall, net,
-                                         skb, IPPROTO_UDP,
-                                         src_port, dst_port);
-}
-
 static struct vport_ops ovs_vxlan_netdev_vport_ops = {
        .type                   = OVS_VPORT_TYPE_VXLAN,
        .create                 = vxlan_create,
        .destroy                = ovs_netdev_tunnel_destroy,
        .get_options            = vxlan_get_options,
-       .send                   = ovs_netdev_send,
-       .get_egress_tun_info    = vxlan_get_egress_tun_info,
+       .send                   = dev_queue_xmit,
 };
 
 static int __init ovs_vxlan_tnl_init(void)
index dc81dc619aa2344a5c7912def9d6852fcd37ebda..0ac0fd004d7ed885c009560d966da5b29b47f242 100644 (file)
@@ -280,35 +280,19 @@ void ovs_vport_del(struct vport *vport)
  */
 void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
 {
-       struct net_device *dev = vport->dev;
-       int i;
-
-       memset(stats, 0, sizeof(*stats));
-       stats->rx_errors  = dev->stats.rx_errors;
-       stats->tx_errors  = dev->stats.tx_errors;
-       stats->tx_dropped = dev->stats.tx_dropped;
-       stats->rx_dropped = dev->stats.rx_dropped;
-
-       stats->rx_dropped += atomic_long_read(&dev->rx_dropped);
-       stats->tx_dropped += atomic_long_read(&dev->tx_dropped);
-
-       for_each_possible_cpu(i) {
-               const struct pcpu_sw_netstats *percpu_stats;
-               struct pcpu_sw_netstats local_stats;
-               unsigned int start;
-
-               percpu_stats = per_cpu_ptr(dev->tstats, i);
-
-               do {
-                       start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
-                       local_stats = *percpu_stats;
-               } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
-
-               stats->rx_bytes         += local_stats.rx_bytes;
-               stats->rx_packets       += local_stats.rx_packets;
-               stats->tx_bytes         += local_stats.tx_bytes;
-               stats->tx_packets       += local_stats.tx_packets;
-       }
+       const struct rtnl_link_stats64 *dev_stats;
+       struct rtnl_link_stats64 temp;
+
+       dev_stats = dev_get_stats(vport->dev, &temp);
+       stats->rx_errors  = dev_stats->rx_errors;
+       stats->tx_errors  = dev_stats->tx_errors;
+       stats->tx_dropped = dev_stats->tx_dropped;
+       stats->rx_dropped = dev_stats->rx_dropped;
+
+       stats->rx_bytes   = dev_stats->rx_bytes;
+       stats->rx_packets = dev_stats->rx_packets;
+       stats->tx_bytes   = dev_stats->tx_bytes;
+       stats->tx_packets = dev_stats->tx_packets;
 }
 
 /**
@@ -460,6 +444,15 @@ int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
 
        OVS_CB(skb)->input_vport = vport;
        OVS_CB(skb)->mru = 0;
+       if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
+               u32 mark;
+
+               mark = skb->mark;
+               skb_scrub_packet(skb, true);
+               skb->mark = mark;
+               tun_info = NULL;
+       }
+
        /* Extract flow from 'skb' into 'key'. */
        error = ovs_flow_key_extract(tun_info, skb, &key);
        if (unlikely(error)) {
@@ -487,60 +480,32 @@ void ovs_vport_deferred_free(struct vport *vport)
 }
 EXPORT_SYMBOL_GPL(ovs_vport_deferred_free);
 
-int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall,
-                              struct net *net,
-                              struct sk_buff *skb,
-                              u8 ipproto,
-                              __be16 tp_src,
-                              __be16 tp_dst)
+static unsigned int packet_length(const struct sk_buff *skb)
 {
-       struct ip_tunnel_info *egress_tun_info = upcall->egress_tun_info;
-       const struct ip_tunnel_info *tun_info = skb_tunnel_info(skb);
-       const struct ip_tunnel_key *tun_key;
-       u32 skb_mark = skb->mark;
-       struct rtable *rt;
-       struct flowi4 fl;
-
-       if (unlikely(!tun_info))
-               return -EINVAL;
-       if (ip_tunnel_info_af(tun_info) != AF_INET)
-               return -EINVAL;
-
-       tun_key = &tun_info->key;
+       unsigned int length = skb->len - ETH_HLEN;
 
-       /* Route lookup to get srouce IP address.
-        * The process may need to be changed if the corresponding process
-        * in vports ops changed.
-        */
-       rt = ovs_tunnel_route_lookup(net, tun_key, skb_mark, &fl, ipproto);
-       if (IS_ERR(rt))
-               return PTR_ERR(rt);
-
-       ip_rt_put(rt);
+       if (skb->protocol == htons(ETH_P_8021Q))
+               length -= VLAN_HLEN;
 
-       /* Generate egress_tun_info based on tun_info,
-        * saddr, tp_src and tp_dst
-        */
-       ip_tunnel_key_init(&egress_tun_info->key,
-                          fl.saddr, tun_key->u.ipv4.dst,
-                          tun_key->tos,
-                          tun_key->ttl,
-                          tp_src, tp_dst,
-                          tun_key->tun_id,
-                          tun_key->tun_flags);
-       egress_tun_info->options_len = tun_info->options_len;
-       egress_tun_info->mode = tun_info->mode;
-       upcall->egress_tun_opts = ip_tunnel_info_opts(egress_tun_info);
-       return 0;
+       return length;
 }
-EXPORT_SYMBOL_GPL(ovs_tunnel_get_egress_info);
 
-int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                 struct dp_upcall_info *upcall)
+void ovs_vport_send(struct vport *vport, struct sk_buff *skb)
 {
-       /* get_egress_tun_info() is only implemented on tunnel ports. */
-       if (unlikely(!vport->ops->get_egress_tun_info))
-               return -EINVAL;
+       int mtu = vport->dev->mtu;
+
+       if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
+               net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
+                                    vport->dev->name,
+                                    packet_length(skb), mtu);
+               vport->dev->stats.tx_errors++;
+               goto drop;
+       }
+
+       skb->dev = vport->dev;
+       vport->ops->send(skb);
+       return;
 
-       return vport->ops->get_egress_tun_info(vport, skb, upcall);
+drop:
+       kfree_skb(skb);
 }
index a413f3ae6a7b540ed7b34fd4b31f69424caeb39f..bdfd82a7c064948dc1dc83acbc85b6534c1bcf9b 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/u64_stats_sync.h>
-#include <net/route.h>
 
 #include "datapath.h"
 
@@ -53,16 +52,6 @@ int ovs_vport_set_upcall_portids(struct vport *, const struct nlattr *pids);
 int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *);
 u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *);
 
-int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall,
-                              struct net *net,
-                              struct sk_buff *,
-                              u8 ipproto,
-                              __be16 tp_src,
-                              __be16 tp_dst);
-
-int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                 struct dp_upcall_info *upcall);
-
 /**
  * struct vport_portids - array of netlink portids of a vport.
  *                        must be protected by rcu.
@@ -140,8 +129,6 @@ struct vport_parms {
  * have any configuration.
  * @send: Send a packet on the device.
  * zero for dropped packets or negative for error.
- * @get_egress_tun_info: Get the egress tunnel 5-tuple and other info for
- * a packet.
  */
 struct vport_ops {
        enum ovs_vport_type type;
@@ -153,10 +140,7 @@ struct vport_ops {
        int (*set_options)(struct vport *, struct nlattr *);
        int (*get_options)(const struct vport *, struct sk_buff *);
 
-       void (*send)(struct vport *, struct sk_buff *);
-       int (*get_egress_tun_info)(struct vport *, struct sk_buff *,
-                                  struct dp_upcall_info *upcall);
-
+       netdev_tx_t (*send) (struct sk_buff *skb);
        struct module *owner;
        struct list_head list;
 };
@@ -234,9 +218,6 @@ static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
        return rt;
 }
 
-static inline void ovs_vport_send(struct vport *vport, struct sk_buff *skb)
-{
-       vport->ops->send(vport, skb);
-}
+void ovs_vport_send(struct vport *vport, struct sk_buff *skb);
 
 #endif /* vport.h */
index bc6b93ecedb512bdcd75a9e765b2bd31cf2f0e81..61925667b7a43ca33fd2d2b692a741ee059c6e18 100644 (file)
@@ -196,7 +196,14 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                goto out;
 
        if (rs->rs_transport) { /* previously bound */
-               ret = 0;
+               trans = rs->rs_transport;
+               if (trans->laddr_check(sock_net(sock->sk),
+                                      sin->sin_addr.s_addr) != 0) {
+                       ret = -ENOPROTOOPT;
+                       rds_remove_bound(rs);
+               } else {
+                       ret = 0;
+               }
                goto out;
        }
        trans = rds_trans_get_preferred(sock_net(sock->sk),
index ee49c2556f4715ee7ad16cc4a4e376b9467af842..827155c2ead10376cb633c45c2f43917f5f5cd12 100644 (file)
@@ -1182,9 +1182,8 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
        rds_stats_inc(s_send_queued);
        rds_stats_inc(s_send_pong);
 
-       ret = rds_send_xmit(conn);
-       if (ret == -ENOMEM || ret == -EAGAIN)
-               queue_delayed_work(rds_wq, &conn->c_send_w, 1);
+       /* schedule the send work on rds_wq */
+       queue_delayed_work(rds_wq, &conn->c_send_w, 1);
 
        rds_message_put(rm);
        return 0;
index 1d90240e5d82d3c5c2d52c7f55c360a6f8aa7677..0936a4a32b476fdde5c7208fc465ec3324bbcf09 100644 (file)
@@ -125,6 +125,9 @@ int rds_tcp_accept_one(struct socket *sock)
                new_sock = NULL;
                ret = 0;
                goto out;
+       } else if (rs_tcp->t_sock) {
+               rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
+               conn->c_outgoing = 0;
        }
 
        rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
index 2d1be4a760fdc4361f23d0aa93a861298eaafe45..32fcdecdb9e2074bad6f3e3002738e9c289317c3 100644 (file)
 
 #define MIRRED_TAB_MASK     7
 static LIST_HEAD(mirred_list);
+static DEFINE_SPINLOCK(mirred_list_lock);
 
 static void tcf_mirred_release(struct tc_action *a, int bind)
 {
        struct tcf_mirred *m = to_mirred(a);
        struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1);
 
+       /* We could be called either in a RCU callback or with RTNL lock held. */
+       spin_lock_bh(&mirred_list_lock);
        list_del(&m->tcfm_list);
+       spin_unlock_bh(&mirred_list_lock);
        if (dev)
                dev_put(dev);
 }
@@ -103,10 +107,10 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        } else {
                if (bind)
                        return 0;
-               if (!ovr) {
-                       tcf_hash_release(a, bind);
+
+               tcf_hash_release(a, bind);
+               if (!ovr)
                        return -EEXIST;
-               }
        }
        m = to_mirred(a);
 
@@ -123,7 +127,9 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        }
 
        if (ret == ACT_P_CREATED) {
+               spin_lock_bh(&mirred_list_lock);
                list_add(&m->tcfm_list, &mirred_list);
+               spin_unlock_bh(&mirred_list_lock);
                tcf_hash_insert(a);
        }
 
@@ -173,6 +179,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
 
        skb2->skb_iif = skb->dev->ifindex;
        skb2->dev = dev;
+       skb_sender_cpu_clear(skb2);
        err = dev_queue_xmit(skb2);
 
        if (err) {
@@ -221,7 +228,8 @@ static int mirred_device_event(struct notifier_block *unused,
        struct tcf_mirred *m;
 
        ASSERT_RTNL();
-       if (event == NETDEV_UNREGISTER)
+       if (event == NETDEV_UNREGISTER) {
+               spin_lock_bh(&mirred_list_lock);
                list_for_each_entry(m, &mirred_list, tcfm_list) {
                        if (rcu_access_pointer(m->tcfm_dev) == dev) {
                                dev_put(dev);
@@ -231,6 +239,8 @@ static int mirred_device_event(struct notifier_block *unused,
                                RCU_INIT_POINTER(m->tcfm_dev, NULL);
                        }
                }
+               spin_unlock_bh(&mirred_list_lock);
+       }
 
        return NOTIFY_DONE;
 }
index 9d15cb6b8cb1f5e8424e96f6245e9dd206d92405..86b04e31e60b76027214b85ee0c4c0e0de1b04c4 100644 (file)
@@ -368,6 +368,15 @@ static unsigned int hhf_drop(struct Qdisc *sch)
        return bucket - q->buckets;
 }
 
+static unsigned int hhf_qdisc_drop(struct Qdisc *sch)
+{
+       unsigned int prev_backlog;
+
+       prev_backlog = sch->qstats.backlog;
+       hhf_drop(sch);
+       return prev_backlog - sch->qstats.backlog;
+}
+
 static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct hhf_sched_data *q = qdisc_priv(sch);
@@ -696,7 +705,7 @@ static struct Qdisc_ops hhf_qdisc_ops __read_mostly = {
        .enqueue        =       hhf_enqueue,
        .dequeue        =       hhf_dequeue,
        .peek           =       qdisc_peek_dequeued,
-       .drop           =       hhf_drop,
+       .drop           =       hhf_qdisc_drop,
        .init           =       hhf_init,
        .reset          =       hhf_reset,
        .destroy        =       hhf_destroy,
index cb51742840740f790d24797e585e7fb520646a09..f0c3ff67ca987427136baebf67034ad3bf58a27f 100644 (file)
@@ -136,7 +136,8 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
        ctxt->direction = DMA_FROM_DEVICE;
        ctxt->read_hdr = head;
        pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd);
-       read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
+       read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
+                    rs_length);
 
        for (pno = 0; pno < pages_needed; pno++) {
                int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
@@ -235,7 +236,8 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
        ctxt->direction = DMA_FROM_DEVICE;
        ctxt->frmr = frmr;
        pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len);
-       read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
+       read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
+                    rs_length);
 
        frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]);
        frmr->direction = DMA_FROM_DEVICE;
@@ -531,7 +533,7 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
        rqstp->rq_arg.page_base = head->arg.page_base;
 
        /* rq_respages starts after the last arg page */
-       rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
+       rqstp->rq_respages = &rqstp->rq_pages[page_no];
        rqstp->rq_next_page = rqstp->rq_respages + 1;
 
        /* Rebuild rq_arg head and tail. */
index 64443eb754ad0fe7fd0b16633c3aa10cebdc3e26..41e452bc580c0fea0f39fe71924b72dcdff6782f 100644 (file)
@@ -270,8 +270,8 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
 
        xprt_clear_connected(xprt);
 
-       rpcrdma_buffer_destroy(&r_xprt->rx_buf);
        rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia);
+       rpcrdma_buffer_destroy(&r_xprt->rx_buf);
        rpcrdma_ia_close(&r_xprt->rx_ia);
 
        xprt_rdma_free_addresses(xprt);
index eb081ad05e33bb65a89b4afb499177dff4d2de89..5502d4dade74aa8646f89305b011d215294352e0 100644 (file)
@@ -543,11 +543,8 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
        }
 
        if (memreg == RPCRDMA_FRMR) {
-               /* Requires both frmr reg and local dma lkey */
-               if (((devattr->device_cap_flags &
-                    (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) !=
-                   (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) ||
-                     (devattr->max_fast_reg_page_list_len == 0)) {
+               if (!(devattr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) ||
+                   (devattr->max_fast_reg_page_list_len == 0)) {
                        dprintk("RPC:       %s: FRMR registration "
                                "not supported by HCA\n", __func__);
                        memreg = RPCRDMA_MTHCAFMR;
@@ -557,6 +554,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
                if (!ia->ri_device->alloc_fmr) {
                        dprintk("RPC:       %s: MTHCAFMR registration "
                                "not supported by HCA\n", __func__);
+                       rc = -EINVAL;
                        goto out3;
                }
        }
@@ -755,19 +753,22 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
 
        cancel_delayed_work_sync(&ep->rep_connect_worker);
 
-       if (ia->ri_id->qp) {
+       if (ia->ri_id->qp)
                rpcrdma_ep_disconnect(ep, ia);
+
+       rpcrdma_clean_cq(ep->rep_attr.recv_cq);
+       rpcrdma_clean_cq(ep->rep_attr.send_cq);
+
+       if (ia->ri_id->qp) {
                rdma_destroy_qp(ia->ri_id);
                ia->ri_id->qp = NULL;
        }
 
-       rpcrdma_clean_cq(ep->rep_attr.recv_cq);
        rc = ib_destroy_cq(ep->rep_attr.recv_cq);
        if (rc)
                dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
                        __func__, rc);
 
-       rpcrdma_clean_cq(ep->rep_attr.send_cq);
        rc = ib_destroy_cq(ep->rep_attr.send_cq);
        if (rc)
                dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
index 7a9ab90363be1205dcff102fa486cd5698569e99..1eb76956b4390fef09825f38bda1f1b29a1ab352 100644 (file)
 #include <linux/mutex.h>
 #include <linux/notifier.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 #include <linux/if_bridge.h>
 #include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/if_vlan.h>
 #include <net/ip_fib.h>
 #include <net/switchdev.h>
 
@@ -92,6 +95,85 @@ static void switchdev_trans_items_warn_destroy(struct net_device *dev,
        switchdev_trans_items_destroy(trans);
 }
 
+static LIST_HEAD(deferred);
+static DEFINE_SPINLOCK(deferred_lock);
+
+typedef void switchdev_deferred_func_t(struct net_device *dev,
+                                      const void *data);
+
+struct switchdev_deferred_item {
+       struct list_head list;
+       struct net_device *dev;
+       switchdev_deferred_func_t *func;
+       unsigned long data[0];
+};
+
+static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
+{
+       struct switchdev_deferred_item *dfitem;
+
+       spin_lock_bh(&deferred_lock);
+       if (list_empty(&deferred)) {
+               dfitem = NULL;
+               goto unlock;
+       }
+       dfitem = list_first_entry(&deferred,
+                                 struct switchdev_deferred_item, list);
+       list_del(&dfitem->list);
+unlock:
+       spin_unlock_bh(&deferred_lock);
+       return dfitem;
+}
+
+/**
+ *     switchdev_deferred_process - Process ops in deferred queue
+ *
+ *     Called to flush the ops currently queued in deferred ops queue.
+ *     rtnl_lock must be held.
+ */
+void switchdev_deferred_process(void)
+{
+       struct switchdev_deferred_item *dfitem;
+
+       ASSERT_RTNL();
+
+       while ((dfitem = switchdev_deferred_dequeue())) {
+               dfitem->func(dfitem->dev, dfitem->data);
+               dev_put(dfitem->dev);
+               kfree(dfitem);
+       }
+}
+EXPORT_SYMBOL_GPL(switchdev_deferred_process);
+
+static void switchdev_deferred_process_work(struct work_struct *work)
+{
+       rtnl_lock();
+       switchdev_deferred_process();
+       rtnl_unlock();
+}
+
+static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
+
+static int switchdev_deferred_enqueue(struct net_device *dev,
+                                     const void *data, size_t data_len,
+                                     switchdev_deferred_func_t *func)
+{
+       struct switchdev_deferred_item *dfitem;
+
+       dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
+       if (!dfitem)
+               return -ENOMEM;
+       dfitem->dev = dev;
+       dfitem->func = func;
+       memcpy(dfitem->data, data, data_len);
+       dev_hold(dev);
+       spin_lock_bh(&deferred_lock);
+       list_add_tail(&dfitem->list, &deferred);
+       spin_unlock_bh(&deferred_lock);
+       schedule_work(&deferred_process_work);
+       return 0;
+}
+
 /**
  *     switchdev_port_attr_get - Get port attribute
  *
@@ -135,7 +217,7 @@ int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
 EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
 
 static int __switchdev_port_attr_set(struct net_device *dev,
-                                    struct switchdev_attr *attr,
+                                    const struct switchdev_attr *attr,
                                     struct switchdev_trans *trans)
 {
        const struct switchdev_ops *ops = dev->switchdev_ops;
@@ -170,74 +252,12 @@ done:
        return err;
 }
 
-struct switchdev_attr_set_work {
-       struct work_struct work;
-       struct net_device *dev;
-       struct switchdev_attr attr;
-};
-
-static void switchdev_port_attr_set_work(struct work_struct *work)
-{
-       struct switchdev_attr_set_work *asw =
-               container_of(work, struct switchdev_attr_set_work, work);
-       int err;
-
-       rtnl_lock();
-       err = switchdev_port_attr_set(asw->dev, &asw->attr);
-       if (err && err != -EOPNOTSUPP)
-               netdev_err(asw->dev, "failed (err=%d) to set attribute (id=%d)\n",
-                          err, asw->attr.id);
-       rtnl_unlock();
-
-       dev_put(asw->dev);
-       kfree(work);
-}
-
-static int switchdev_port_attr_set_defer(struct net_device *dev,
-                                        struct switchdev_attr *attr)
-{
-       struct switchdev_attr_set_work *asw;
-
-       asw = kmalloc(sizeof(*asw), GFP_ATOMIC);
-       if (!asw)
-               return -ENOMEM;
-
-       INIT_WORK(&asw->work, switchdev_port_attr_set_work);
-
-       dev_hold(dev);
-       asw->dev = dev;
-       memcpy(&asw->attr, attr, sizeof(asw->attr));
-
-       schedule_work(&asw->work);
-
-       return 0;
-}
-
-/**
- *     switchdev_port_attr_set - Set port attribute
- *
- *     @dev: port device
- *     @attr: attribute to set
- *
- *     Use a 2-phase prepare-commit transaction model to ensure
- *     system is not left in a partially updated state due to
- *     failure from driver/device.
- */
-int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
+static int switchdev_port_attr_set_now(struct net_device *dev,
+                                      const struct switchdev_attr *attr)
 {
        struct switchdev_trans trans;
        int err;
 
-       if (!rtnl_is_locked()) {
-               /* Running prepare-commit transaction across stacked
-                * devices requires nothing moves, so if rtnl_lock is
-                * not held, schedule a worker thread to hold rtnl_lock
-                * while setting attr.
-                */
-
-               return switchdev_port_attr_set_defer(dev, attr);
-       }
-
        switchdev_trans_init(&trans);
 
        /* Phase I: prepare for attr set. Driver/device should fail
@@ -274,6 +294,47 @@ int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
 
        return err;
 }
+
+static void switchdev_port_attr_set_deferred(struct net_device *dev,
+                                            const void *data)
+{
+       const struct switchdev_attr *attr = data;
+       int err;
+
+       err = switchdev_port_attr_set_now(dev, attr);
+       if (err && err != -EOPNOTSUPP)
+               netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
+                          err, attr->id);
+}
+
+static int switchdev_port_attr_set_defer(struct net_device *dev,
+                                        const struct switchdev_attr *attr)
+{
+       return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
+                                         switchdev_port_attr_set_deferred);
+}
+
+/**
+ *     switchdev_port_attr_set - Set port attribute
+ *
+ *     @dev: port device
+ *     @attr: attribute to set
+ *
+ *     Use a 2-phase prepare-commit transaction model to ensure
+ *     system is not left in a partially updated state due to
+ *     failure from driver/device.
+ *
+ *     rtnl_lock must be held and must not be in atomic section,
+ *     in case SWITCHDEV_F_DEFER flag is not set.
+ */
+int switchdev_port_attr_set(struct net_device *dev,
+                           const struct switchdev_attr *attr)
+{
+       if (attr->flags & SWITCHDEV_F_DEFER)
+               return switchdev_port_attr_set_defer(dev, attr);
+       ASSERT_RTNL();
+       return switchdev_port_attr_set_now(dev, attr);
+}
 EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
 
 static int __switchdev_port_obj_add(struct net_device *dev,
@@ -302,21 +363,8 @@ static int __switchdev_port_obj_add(struct net_device *dev,
        return err;
 }
 
-/**
- *     switchdev_port_obj_add - Add port object
- *
- *     @dev: port device
- *     @id: object ID
- *     @obj: object to add
- *
- *     Use a 2-phase prepare-commit transaction model to ensure
- *     system is not left in a partially updated state due to
- *     failure from driver/device.
- *
- *     rtnl_lock must be held.
- */
-int switchdev_port_obj_add(struct net_device *dev,
-                          const struct switchdev_obj *obj)
+static int switchdev_port_obj_add_now(struct net_device *dev,
+                                     const struct switchdev_obj *obj)
 {
        struct switchdev_trans trans;
        int err;
@@ -358,17 +406,52 @@ int switchdev_port_obj_add(struct net_device *dev,
 
        return err;
 }
-EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
+
+static void switchdev_port_obj_add_deferred(struct net_device *dev,
+                                           const void *data)
+{
+       const struct switchdev_obj *obj = data;
+       int err;
+
+       err = switchdev_port_obj_add_now(dev, obj);
+       if (err && err != -EOPNOTSUPP)
+               netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
+                          err, obj->id);
+}
+
+static int switchdev_port_obj_add_defer(struct net_device *dev,
+                                       const struct switchdev_obj *obj)
+{
+       return switchdev_deferred_enqueue(dev, obj, sizeof(*obj),
+                                         switchdev_port_obj_add_deferred);
+}
 
 /**
- *     switchdev_port_obj_del - Delete port object
+ *     switchdev_port_obj_add - Add port object
  *
  *     @dev: port device
  *     @id: object ID
- *     @obj: object to delete
+ *     @obj: object to add
+ *
+ *     Use a 2-phase prepare-commit transaction model to ensure
+ *     system is not left in a partially updated state due to
+ *     failure from driver/device.
+ *
+ *     rtnl_lock must be held and must not be in atomic section,
+ *     in case SWITCHDEV_F_DEFER flag is not set.
  */
-int switchdev_port_obj_del(struct net_device *dev,
+int switchdev_port_obj_add(struct net_device *dev,
                           const struct switchdev_obj *obj)
+{
+       if (obj->flags & SWITCHDEV_F_DEFER)
+               return switchdev_port_obj_add_defer(dev, obj);
+       ASSERT_RTNL();
+       return switchdev_port_obj_add_now(dev, obj);
+}
+EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
+
+static int switchdev_port_obj_del_now(struct net_device *dev,
+                                     const struct switchdev_obj *obj)
 {
        const struct switchdev_ops *ops = dev->switchdev_ops;
        struct net_device *lower_dev;
@@ -384,13 +467,51 @@ int switchdev_port_obj_del(struct net_device *dev,
         */
 
        netdev_for_each_lower_dev(dev, lower_dev, iter) {
-               err = switchdev_port_obj_del(lower_dev, obj);
+               err = switchdev_port_obj_del_now(lower_dev, obj);
                if (err)
                        break;
        }
 
        return err;
 }
+
+static void switchdev_port_obj_del_deferred(struct net_device *dev,
+                                           const void *data)
+{
+       const struct switchdev_obj *obj = data;
+       int err;
+
+       err = switchdev_port_obj_del_now(dev, obj);
+       if (err && err != -EOPNOTSUPP)
+               netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
+                          err, obj->id);
+}
+
+static int switchdev_port_obj_del_defer(struct net_device *dev,
+                                       const struct switchdev_obj *obj)
+{
+       return switchdev_deferred_enqueue(dev, obj, sizeof(*obj),
+                                         switchdev_port_obj_del_deferred);
+}
+
+/**
+ *     switchdev_port_obj_del - Delete port object
+ *
+ *     @dev: port device
+ *     @id: object ID
+ *     @obj: object to delete
+ *
+ *     rtnl_lock must be held and must not be in atomic section,
+ *     in case SWITCHDEV_F_DEFER flag is not set.
+ */
+int switchdev_port_obj_del(struct net_device *dev,
+                          const struct switchdev_obj *obj)
+{
+       if (obj->flags & SWITCHDEV_F_DEFER)
+               return switchdev_port_obj_del_defer(dev, obj);
+       ASSERT_RTNL();
+       return switchdev_port_obj_del_now(dev, obj);
+}
 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
 
 /**
@@ -400,6 +521,8 @@ EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
  *     @id: object ID
  *     @obj: object to dump
  *     @cb: function to call with a filled object
+ *
+ *     rtnl_lock must be held.
  */
 int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
                            switchdev_obj_dump_cb_t *cb)
@@ -409,6 +532,8 @@ int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
        struct list_head *iter;
        int err = -EOPNOTSUPP;
 
+       ASSERT_RTNL();
+
        if (ops && ops->switchdev_port_obj_dump)
                return ops->switchdev_port_obj_dump(dev, obj, cb);
 
@@ -722,11 +847,16 @@ static int switchdev_port_br_afspec(struct net_device *dev,
                if (nla_len(attr) != sizeof(struct bridge_vlan_info))
                        return -EINVAL;
                vinfo = nla_data(attr);
+               if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
+                       return -EINVAL;
                vlan.flags = vinfo->flags;
                if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
                        if (vlan.vid_begin)
                                return -EINVAL;
                        vlan.vid_begin = vinfo->vid;
+                       /* don't allow range of pvids */
+                       if (vlan.flags & BRIDGE_VLAN_INFO_PVID)
+                               return -EINVAL;
                } else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) {
                        if (!vlan.vid_begin)
                                return -EINVAL;
@@ -829,10 +959,10 @@ int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 {
        struct switchdev_obj_port_fdb fdb = {
                .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
-               .addr = addr,
                .vid = vid,
        };
 
+       ether_addr_copy(fdb.addr, addr);
        return switchdev_port_obj_add(dev, &fdb.obj);
 }
 EXPORT_SYMBOL_GPL(switchdev_port_fdb_add);
@@ -854,10 +984,10 @@ int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
 {
        struct switchdev_obj_port_fdb fdb = {
                .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
-               .addr = addr,
                .vid = vid,
        };
 
+       ether_addr_copy(fdb.addr, addr);
        return switchdev_port_obj_del(dev, &fdb.obj);
 }
 EXPORT_SYMBOL_GPL(switchdev_port_fdb_del);
@@ -974,6 +1104,8 @@ static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
        struct net_device *dev = NULL;
        int nhsel;
 
+       ASSERT_RTNL();
+
        /* For this route, all nexthop devs must be on the same switch. */
 
        for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
@@ -1019,7 +1151,6 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
                .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
                .dst = dst,
                .dst_len = dst_len,
-               .fi = fi,
                .tos = tos,
                .type = type,
                .nlflags = nlflags,
@@ -1028,6 +1159,8 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
        struct net_device *dev;
        int err = 0;
 
+       memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi));
+
        /* Don't offload route if using custom ip rules or if
         * IPv4 FIB offloading has been disabled completely.
         */
@@ -1071,7 +1204,6 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
                .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
                .dst = dst,
                .dst_len = dst_len,
-               .fi = fi,
                .tos = tos,
                .type = type,
                .nlflags = 0,
@@ -1080,6 +1212,8 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
        struct net_device *dev;
        int err = 0;
 
+       memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi));
+
        if (!(fi->fib_flags & RTNH_F_OFFLOAD))
                return 0;
 
@@ -1202,10 +1336,11 @@ void switchdev_port_fwd_mark_set(struct net_device *dev,
        u32 mark = dev->ifindex;
        u32 reset_mark = 0;
 
-       if (group_dev && joining) {
-               mark = switchdev_port_fwd_mark_get(dev, group_dev);
-       } else if (group_dev && !joining) {
-               if (dev->offload_fwd_mark == mark)
+       if (group_dev) {
+               ASSERT_RTNL();
+               if (joining)
+                       mark = switchdev_port_fwd_mark_get(dev, group_dev);
+               else if (dev->offload_fwd_mark == mark)
                        /* Ohoh, this port was the mark reference port,
                         * but it's leaving the group, so reset the
                         * mark for the remaining ports in the group.
index e7000be321b0148469264524ed6fce75c3952955..ed98c1fc3de1428560ea370413084102af9dff7f 100644 (file)
@@ -94,10 +94,14 @@ __init int net_sysctl_init(void)
                goto out;
        ret = register_pernet_subsys(&sysctl_pernet_ops);
        if (ret)
-               goto out;
+               goto out1;
        register_sysctl_root(&net_sysctl_root);
 out:
        return ret;
+out1:
+       unregister_sysctl_table(net_header);
+       net_header = NULL;
+       goto out;
 }
 
 struct ctl_table_header *register_net_sysctl(struct net *net,
index 41042de3ae9bcfad4504e0bcbb29d3bea4512bb5..9dc239dfe19211f2a6367f9581bf79527f73af31 100644 (file)
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <linux/tipc_config.h>
 #include "socket.h"
 #include "msg.h"
 #include "bcast.h"
 #include "name_distr.h"
-#include "core.h"
+#include "link.h"
+#include "node.h"
 
-#define        MAX_PKT_DEFAULT_MCAST   1500    /* bcast link max packet size (fixed) */
-#define        BCLINK_WIN_DEFAULT      20      /* bcast link window size (default) */
+#define        BCLINK_WIN_DEFAULT      50      /* bcast link window size (default) */
+#define        BCLINK_WIN_MIN          32      /* bcast minimum link window size */
 
 const char tipc_bclink_name[] = "broadcast-link";
 
-static void tipc_nmap_diff(struct tipc_node_map *nm_a,
-                          struct tipc_node_map *nm_b,
-                          struct tipc_node_map *nm_diff);
-static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
-static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
-
-static void tipc_bclink_lock(struct net *net)
-{
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-       spin_lock_bh(&tn->bclink->lock);
-}
-
-static void tipc_bclink_unlock(struct net *net)
-{
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-       spin_unlock_bh(&tn->bclink->lock);
-}
-
-void tipc_bclink_input(struct net *net)
-{
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-       tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq);
-}
-
-uint  tipc_bclink_get_mtu(void)
-{
-       return MAX_PKT_DEFAULT_MCAST;
-}
-
-static u32 bcbuf_acks(struct sk_buff *buf)
-{
-       return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
-}
-
-static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
-{
-       TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
-}
-
-static void bcbuf_decr_acks(struct sk_buff *buf)
-{
-       bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
-}
+/**
+ * struct tipc_bc_base - base structure for keeping broadcast send state
+ * @link: broadcast send link structure
+ * @inputq: data input queue; will only carry SOCK_WAKEUP messages
+ * @dest: array keeping number of reachable destinations per bearer
+ * @primary_bearer: a bearer having links to all broadcast destinations, if any
+ */
+struct tipc_bc_base {
+       struct tipc_link *link;
+       struct sk_buff_head inputq;
+       int dests[MAX_BEARERS];
+       int primary_bearer;
+};
 
-void tipc_bclink_add_node(struct net *net, u32 addr)
+static struct tipc_bc_base *tipc_bc_base(struct net *net)
 {
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-       tipc_bclink_lock(net);
-       tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
-       tipc_bclink_unlock(net);
+       return tipc_net(net)->bcbase;
 }
 
-void tipc_bclink_remove_node(struct net *net, u32 addr)
+int tipc_bcast_get_mtu(struct net *net)
 {
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-       tipc_bclink_lock(net);
-       tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
-
-       /* Last node? => reset backlog queue */
-       if (!tn->bclink->bcast_nodes.count)
-               tipc_link_purge_backlog(&tn->bclink->link);
-
-       tipc_bclink_unlock(net);
+       return tipc_link_mtu(tipc_bc_sndlink(net));
 }
 
-static void bclink_set_last_sent(struct net *net)
+/* tipc_bcbase_select_primary(): find a bearer with links to all destinations,
+ *                               if any, and make it primary bearer
+ */
+static void tipc_bcbase_select_primary(struct net *net)
 {
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_link *bcl = tn->bcl;
+       struct tipc_bc_base *bb = tipc_bc_base(net);
+       int all_dests =  tipc_link_bc_peers(bb->link);
+       int i, mtu;
 
-       bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
-}
+       bb->primary_bearer = INVALID_BEARER_ID;
 
-u32 tipc_bclink_get_last_sent(struct net *net)
-{
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       if (!all_dests)
+               return;
 
-       return tn->bcl->silent_intv_cnt;
-}
+       for (i = 0; i < MAX_BEARERS; i++) {
+               if (!bb->dests[i])
+                       continue;
 
-static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
-{
-       node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
-                                               seqno : node->bclink.last_sent;
-}
+               mtu = tipc_bearer_mtu(net, i);
+               if (mtu < tipc_link_mtu(bb->link))
+                       tipc_link_set_mtu(bb->link, mtu);
 
-/**
- * tipc_bclink_retransmit_to - get most recent node to request retransmission
- *
- * Called with bclink_lock locked
- */
-struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
-{
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-       return tn->bclink->retransmit_to;
-}
+               if (bb->dests[i] < all_dests)
+                       continue;
 
-/**
- * bclink_retransmit_pkt - retransmit broadcast packets
- * @after: sequence number of last packet to *not* retransmit
- * @to: sequence number of last packet to retransmit
- *
- * Called with bclink_lock locked
- */
-static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
-{
-       struct sk_buff *skb;
-       struct tipc_link *bcl = tn->bcl;
+               bb->primary_bearer = i;
 
-       skb_queue_walk(&bcl->transmq, skb) {
-               if (more(buf_seqno(skb), after)) {
-                       tipc_link_retransmit(bcl, skb, mod(to - after));
+               /* Reduce risk that all nodes select same primary */
+               if ((i ^ tipc_own_addr(net)) & 1)
                        break;
-               }
        }
 }
 
-/**
- * bclink_prepare_wakeup - prepare users for wakeup after congestion
- * @bcl: broadcast link
- * @resultq: queue for users which can be woken up
- * Move a number of waiting users, as permitted by available space in
- * the send queue, from link wait queue to specified queue for wakeup
- */
-static void bclink_prepare_wakeup(struct tipc_link *bcl, struct sk_buff_head *resultq)
+void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id)
 {
-       int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
-       int imp, lim;
-       struct sk_buff *skb, *tmp;
-
-       skb_queue_walk_safe(&bcl->wakeupq, skb, tmp) {
-               imp = TIPC_SKB_CB(skb)->chain_imp;
-               lim = bcl->window + bcl->backlog[imp].limit;
-               pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
-               if ((pnd[imp] + bcl->backlog[imp].len) >= lim)
-                       continue;
-               skb_unlink(skb, &bcl->wakeupq);
-               skb_queue_tail(resultq, skb);
-       }
-}
+       struct tipc_bc_base *bb = tipc_bc_base(net);
 
-/**
- * tipc_bclink_wakeup_users - wake up pending users
- *
- * Called with no locks taken
- */
-void tipc_bclink_wakeup_users(struct net *net)
-{
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_link *bcl = tn->bcl;
-       struct sk_buff_head resultq;
-
-       skb_queue_head_init(&resultq);
-       bclink_prepare_wakeup(bcl, &resultq);
-       tipc_sk_rcv(net, &resultq);
+       tipc_bcast_lock(net);
+       bb->dests[bearer_id]++;
+       tipc_bcbase_select_primary(net);
+       tipc_bcast_unlock(net);
 }
 
-/**
- * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
- * @n_ptr: node that sent acknowledgement info
- * @acked: broadcast sequence # that has been acknowledged
- *
- * Node is locked, bclink_lock unlocked.
- */
-void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
+void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id)
 {
-       struct sk_buff *skb, *tmp;
-       unsigned int released = 0;
-       struct net *net = n_ptr->net;
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-       if (unlikely(!n_ptr->bclink.recv_permitted))
-               return;
-
-       tipc_bclink_lock(net);
-
-       /* Bail out if tx queue is empty (no clean up is required) */
-       skb = skb_peek(&tn->bcl->transmq);
-       if (!skb)
-               goto exit;
-
-       /* Determine which messages need to be acknowledged */
-       if (acked == INVALID_LINK_SEQ) {
-               /*
-                * Contact with specified node has been lost, so need to
-                * acknowledge sent messages only (if other nodes still exist)
-                * or both sent and unsent messages (otherwise)
-                */
-               if (tn->bclink->bcast_nodes.count)
-                       acked = tn->bcl->silent_intv_cnt;
-               else
-                       acked = tn->bcl->snd_nxt;
-       } else {
-               /*
-                * Bail out if specified sequence number does not correspond
-                * to a message that has been sent and not yet acknowledged
-                */
-               if (less(acked, buf_seqno(skb)) ||
-                   less(tn->bcl->silent_intv_cnt, acked) ||
-                   less_eq(acked, n_ptr->bclink.acked))
-                       goto exit;
-       }
-
-       /* Skip over packets that node has previously acknowledged */
-       skb_queue_walk(&tn->bcl->transmq, skb) {
-               if (more(buf_seqno(skb), n_ptr->bclink.acked))
-                       break;
-       }
-
-       /* Update packets that node is now acknowledging */
-       skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
-               if (more(buf_seqno(skb), acked))
-                       break;
-               bcbuf_decr_acks(skb);
-               bclink_set_last_sent(net);
-               if (bcbuf_acks(skb) == 0) {
-                       __skb_unlink(skb, &tn->bcl->transmq);
-                       kfree_skb(skb);
-                       released = 1;
-               }
-       }
-       n_ptr->bclink.acked = acked;
+       struct tipc_bc_base *bb = tipc_bc_base(net);
 
-       /* Try resolving broadcast link congestion, if necessary */
-       if (unlikely(skb_peek(&tn->bcl->backlogq))) {
-               tipc_link_push_packets(tn->bcl);
-               bclink_set_last_sent(net);
-       }
-       if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
-               n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
-exit:
-       tipc_bclink_unlock(net);
+       tipc_bcast_lock(net);
+       bb->dests[bearer_id]--;
+       tipc_bcbase_select_primary(net);
+       tipc_bcast_unlock(net);
 }
 
-/**
- * tipc_bclink_update_link_state - update broadcast link state
+/* tipc_bcbase_xmit - broadcast a packet queue across one or more bearers
  *
- * RCU and node lock set
+ * Note that number of reachable destinations, as indicated in the dests[]
+ * array, may transitionally differ from the number of destinations indicated
+ * in each sent buffer. We can sustain this. Excess destination nodes will
+ * drop and never acknowledge the unexpected packets, and missing destinations
+ * will either require retransmission (if they are just about to be added to
+ * the bearer), or be removed from the buffer's 'ackers' counter (if they
+ * just went down)
  */
-void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
-                                  u32 last_sent)
+static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
 {
-       struct sk_buff *buf;
-       struct net *net = n_ptr->net;
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       int bearer_id;
+       struct tipc_bc_base *bb = tipc_bc_base(net);
+       struct sk_buff *skb, *_skb;
+       struct sk_buff_head _xmitq;
 
-       /* Ignore "stale" link state info */
-       if (less_eq(last_sent, n_ptr->bclink.last_in))
+       if (skb_queue_empty(xmitq))
                return;
 
-       /* Update link synchronization state; quit if in sync */
-       bclink_update_last_sent(n_ptr, last_sent);
-
-       if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
+       /* The typical case: at least one bearer has links to all nodes */
+       bearer_id = bb->primary_bearer;
+       if (bearer_id >= 0) {
+               tipc_bearer_bc_xmit(net, bearer_id, xmitq);
                return;
-
-       /* Update out-of-sync state; quit if loss is still unconfirmed */
-       if ((++n_ptr->bclink.oos_state) == 1) {
-               if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
-                       return;
-               n_ptr->bclink.oos_state++;
        }
 
-       /* Don't NACK if one has been recently sent (or seen) */
-       if (n_ptr->bclink.oos_state & 0x1)
-               return;
-
-       /* Send NACK */
-       buf = tipc_buf_acquire(INT_H_SIZE);
-       if (buf) {
-               struct tipc_msg *msg = buf_msg(buf);
-               struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
-               u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
-
-               tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
-                             INT_H_SIZE, n_ptr->addr);
-               msg_set_non_seq(msg, 1);
-               msg_set_mc_netid(msg, tn->net_id);
-               msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
-               msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
-               msg_set_bcgap_to(msg, to);
-
-               tipc_bclink_lock(net);
-               tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
-               tn->bcl->stats.sent_nacks++;
-               tipc_bclink_unlock(net);
-               kfree_skb(buf);
-
-               n_ptr->bclink.oos_state++;
-       }
-}
-
-void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *hdr)
-{
-       u16 last = msg_last_bcast(hdr);
-       int mtyp = msg_type(hdr);
+       /* We have to transmit across all bearers */
+       skb_queue_head_init(&_xmitq);
+       for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+               if (!bb->dests[bearer_id])
+                       continue;
 
-       if (unlikely(msg_user(hdr) != LINK_PROTOCOL))
-               return;
-       if (mtyp == STATE_MSG) {
-               tipc_bclink_update_link_state(n, last);
-               return;
+               skb_queue_walk(xmitq, skb) {
+                       _skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
+                       if (!_skb)
+                               break;
+                       __skb_queue_tail(&_xmitq, _skb);
+               }
+               tipc_bearer_bc_xmit(net, bearer_id, &_xmitq);
        }
-       /* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
-        * and transfer synch info in LINK_PROTOCOL messages.
-        */
-       if (tipc_node_is_up(n))
-               return;
-       if ((mtyp != RESET_MSG) && (mtyp != ACTIVATE_MSG))
-               return;
-       n->bclink.last_sent = last;
-       n->bclink.last_in = last;
-       n->bclink.oos_state = 0;
+       __skb_queue_purge(xmitq);
+       __skb_queue_purge(&_xmitq);
 }
 
-/**
- * bclink_peek_nack - monitor retransmission requests sent by other nodes
- *
- * Delay any upcoming NACK by this node if another node has already
- * requested the first message this node is going to ask for.
- */
-static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
-{
-       struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
-
-       if (unlikely(!n_ptr))
-               return;
-
-       tipc_node_lock(n_ptr);
-       if (n_ptr->bclink.recv_permitted &&
-           (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
-           (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
-               n_ptr->bclink.oos_state = 2;
-       tipc_node_unlock(n_ptr);
-       tipc_node_put(n_ptr);
-}
-
-/* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
+/* tipc_bcast_xmit - deliver buffer chain to all nodes in cluster
  *                    and to identified node local sockets
  * @net: the applicable net namespace
  * @list: chain of buffers containing message
  * Consumes the buffer chain, except when returning -ELINKCONG
  * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
  */
-int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
+int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list)
 {
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_link *bcl = tn->bcl;
-       struct tipc_bclink *bclink = tn->bclink;
+       struct tipc_link *l = tipc_bc_sndlink(net);
+       struct sk_buff_head xmitq, inputq, rcvq;
        int rc = 0;
-       int bc = 0;
-       struct sk_buff *skb;
-       struct sk_buff_head arrvq;
-       struct sk_buff_head inputq;
 
-       /* Prepare clone of message for local node */
-       skb = tipc_msg_reassemble(list);
-       if (unlikely(!skb))
-               return -EHOSTUNREACH;
+       __skb_queue_head_init(&rcvq);
+       __skb_queue_head_init(&xmitq);
+       skb_queue_head_init(&inputq);
 
-       /* Broadcast to all nodes */
-       if (likely(bclink)) {
-               tipc_bclink_lock(net);
-               if (likely(bclink->bcast_nodes.count)) {
-                       rc = __tipc_link_xmit(net, bcl, list);
-                       if (likely(!rc)) {
-                               u32 len = skb_queue_len(&bcl->transmq);
-
-                               bclink_set_last_sent(net);
-                               bcl->stats.queue_sz_counts++;
-                               bcl->stats.accu_queue_sz += len;
-                       }
-                       bc = 1;
-               }
-               tipc_bclink_unlock(net);
-       }
+       /* Prepare message clone for local node */
+       if (unlikely(!tipc_msg_reassemble(list, &rcvq)))
+               return -EHOSTUNREACH;
 
-       if (unlikely(!bc))
-               __skb_queue_purge(list);
+       tipc_bcast_lock(net);
+       if (tipc_link_bc_peers(l))
+               rc = tipc_link_xmit(l, list, &xmitq);
+       tipc_bcast_unlock(net);
 
+       /* Don't send to local node if adding to link failed */
        if (unlikely(rc)) {
-               kfree_skb(skb);
+               __skb_queue_purge(&rcvq);
                return rc;
        }
-       /* Deliver message clone */
-       __skb_queue_head_init(&arrvq);
-       skb_queue_head_init(&inputq);
-       __skb_queue_tail(&arrvq, skb);
-       tipc_sk_mcast_rcv(net, &arrvq, &inputq);
-       return rc;
-}
 
-/**
- * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
- *
- * Called with both sending node's lock and bclink_lock taken.
- */
-static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
-{
-       struct tipc_net *tn = net_generic(node->net, tipc_net_id);
-
-       bclink_update_last_sent(node, seqno);
-       node->bclink.last_in = seqno;
-       node->bclink.oos_state = 0;
-       tn->bcl->stats.recv_info++;
-
-       /*
-        * Unicast an ACK periodically, ensuring that
-        * all nodes in the cluster don't ACK at the same time
-        */
-       if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
-               tipc_link_proto_xmit(node_active_link(node, node->addr),
-                                    STATE_MSG, 0, 0, 0, 0);
-               tn->bcl->stats.sent_acks++;
-       }
+       /* Broadcast to all nodes, inluding local node */
+       tipc_bcbase_xmit(net, &xmitq);
+       tipc_sk_mcast_rcv(net, &rcvq, &inputq);
+       __skb_queue_purge(list);
+       return 0;
 }
 
-/**
- * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
+/* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link
  *
  * RCU is locked, no other locks set
  */
-void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
+int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
 {
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_link *bcl = tn->bcl;
-       struct tipc_msg *msg = buf_msg(buf);
-       struct tipc_node *node;
-       u32 next_in;
-       u32 seqno;
-       int deferred = 0;
-       int pos = 0;
-       struct sk_buff *iskb;
-       struct sk_buff_head *arrvq, *inputq;
-
-       /* Screen out unwanted broadcast messages */
-       if (msg_mc_netid(msg) != tn->net_id)
-               goto exit;
-
-       node = tipc_node_find(net, msg_prevnode(msg));
-       if (unlikely(!node))
-               goto exit;
-
-       tipc_node_lock(node);
-       if (unlikely(!node->bclink.recv_permitted))
-               goto unlock;
-
-       /* Handle broadcast protocol message */
-       if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
-               if (msg_type(msg) != STATE_MSG)
-                       goto unlock;
-               if (msg_destnode(msg) == tn->own_addr) {
-                       tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
-                       tipc_bclink_lock(net);
-                       bcl->stats.recv_nacks++;
-                       tn->bclink->retransmit_to = node;
-                       bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
-                                             msg_bcgap_to(msg));
-                       tipc_bclink_unlock(net);
-                       tipc_node_unlock(node);
-               } else {
-                       tipc_node_unlock(node);
-                       bclink_peek_nack(net, msg);
-               }
-               tipc_node_put(node);
-               goto exit;
-       }
-
-       /* Handle in-sequence broadcast message */
-       seqno = msg_seqno(msg);
-       next_in = mod(node->bclink.last_in + 1);
-       arrvq = &tn->bclink->arrvq;
-       inputq = &tn->bclink->inputq;
-
-       if (likely(seqno == next_in)) {
-receive:
-               /* Deliver message to destination */
-               if (likely(msg_isdata(msg))) {
-                       tipc_bclink_lock(net);
-                       bclink_accept_pkt(node, seqno);
-                       spin_lock_bh(&inputq->lock);
-                       __skb_queue_tail(arrvq, buf);
-                       spin_unlock_bh(&inputq->lock);
-                       node->action_flags |= TIPC_BCAST_MSG_EVT;
-                       tipc_bclink_unlock(net);
-                       tipc_node_unlock(node);
-               } else if (msg_user(msg) == MSG_BUNDLER) {
-                       tipc_bclink_lock(net);
-                       bclink_accept_pkt(node, seqno);
-                       bcl->stats.recv_bundles++;
-                       bcl->stats.recv_bundled += msg_msgcnt(msg);
-                       pos = 0;
-                       while (tipc_msg_extract(buf, &iskb, &pos)) {
-                               spin_lock_bh(&inputq->lock);
-                               __skb_queue_tail(arrvq, iskb);
-                               spin_unlock_bh(&inputq->lock);
-                       }
-                       node->action_flags |= TIPC_BCAST_MSG_EVT;
-                       tipc_bclink_unlock(net);
-                       tipc_node_unlock(node);
-               } else if (msg_user(msg) == MSG_FRAGMENTER) {
-                       tipc_bclink_lock(net);
-                       bclink_accept_pkt(node, seqno);
-                       tipc_buf_append(&node->bclink.reasm_buf, &buf);
-                       if (unlikely(!buf && !node->bclink.reasm_buf)) {
-                               tipc_bclink_unlock(net);
-                               goto unlock;
-                       }
-                       bcl->stats.recv_fragments++;
-                       if (buf) {
-                               bcl->stats.recv_fragmented++;
-                               msg = buf_msg(buf);
-                               tipc_bclink_unlock(net);
-                               goto receive;
-                       }
-                       tipc_bclink_unlock(net);
-                       tipc_node_unlock(node);
-               } else {
-                       tipc_bclink_lock(net);
-                       bclink_accept_pkt(node, seqno);
-                       tipc_bclink_unlock(net);
-                       tipc_node_unlock(node);
-                       kfree_skb(buf);
-               }
-               buf = NULL;
-
-               /* Determine new synchronization state */
-               tipc_node_lock(node);
-               if (unlikely(!tipc_node_is_up(node)))
-                       goto unlock;
+       struct tipc_msg *hdr = buf_msg(skb);
+       struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+       struct sk_buff_head xmitq;
+       int rc;
 
-               if (node->bclink.last_in == node->bclink.last_sent)
-                       goto unlock;
+       __skb_queue_head_init(&xmitq);
 
-               if (skb_queue_empty(&node->bclink.deferdq)) {
-                       node->bclink.oos_state = 1;
-                       goto unlock;
-               }
-
-               msg = buf_msg(skb_peek(&node->bclink.deferdq));
-               seqno = msg_seqno(msg);
-               next_in = mod(next_in + 1);
-               if (seqno != next_in)
-                       goto unlock;
-
-               /* Take in-sequence message from deferred queue & deliver it */
-               buf = __skb_dequeue(&node->bclink.deferdq);
-               goto receive;
-       }
-
-       /* Handle out-of-sequence broadcast message */
-       if (less(next_in, seqno)) {
-               deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
-                                              buf);
-               bclink_update_last_sent(node, seqno);
-               buf = NULL;
+       if (msg_mc_netid(hdr) != tipc_netid(net) || !tipc_link_is_up(l)) {
+               kfree_skb(skb);
+               return 0;
        }
 
-       tipc_bclink_lock(net);
-
-       if (deferred)
-               bcl->stats.deferred_recv++;
+       tipc_bcast_lock(net);
+       if (msg_user(hdr) == BCAST_PROTOCOL)
+               rc = tipc_link_bc_nack_rcv(l, skb, &xmitq);
        else
-               bcl->stats.duplicates++;
+               rc = tipc_link_rcv(l, skb, NULL);
+       tipc_bcast_unlock(net);
 
-       tipc_bclink_unlock(net);
+       tipc_bcbase_xmit(net, &xmitq);
 
-unlock:
-       tipc_node_unlock(node);
-       tipc_node_put(node);
-exit:
-       kfree_skb(buf);
-}
+       /* Any socket wakeup messages ? */
+       if (!skb_queue_empty(inputq))
+               tipc_sk_rcv(net, inputq);
 
-u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
-{
-       return (n_ptr->bclink.recv_permitted &&
-               (tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
+       return rc;
 }
 
-
-/**
- * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
- *
- * Send packet over as many bearers as necessary to reach all nodes
- * that have joined the broadcast link.
+/* tipc_bcast_ack_rcv - receive and handle a broadcast acknowledge
  *
- * Returns 0 (packet sent successfully) under all circumstances,
- * since the broadcast link's pseudo-bearer never blocks
+ * RCU is locked, no other locks set
  */
-static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
-                             struct tipc_bearer *unused1,
-                             struct tipc_media_addr *unused2)
+void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked)
 {
-       int bp_index;
-       struct tipc_msg *msg = buf_msg(buf);
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_bcbearer *bcbearer = tn->bcbearer;
-       struct tipc_bclink *bclink = tn->bclink;
-
-       /* Prepare broadcast link message for reliable transmission,
-        * if first time trying to send it;
-        * preparation is skipped for broadcast link protocol messages
-        * since they are sent in an unreliable manner and don't need it
-        */
-       if (likely(!msg_non_seq(buf_msg(buf)))) {
-               bcbuf_set_acks(buf, bclink->bcast_nodes.count);
-               msg_set_non_seq(msg, 1);
-               msg_set_mc_netid(msg, tn->net_id);
-               tn->bcl->stats.sent_info++;
-               if (WARN_ON(!bclink->bcast_nodes.count)) {
-                       dump_stack();
-                       return 0;
-               }
-       }
+       struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+       struct sk_buff_head xmitq;
 
-       /* Send buffer over bearers until all targets reached */
-       bcbearer->remains = bclink->bcast_nodes;
-
-       for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
-               struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
-               struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
-               struct tipc_bearer *bp[2] = {p, s};
-               struct tipc_bearer *b = bp[msg_link_selector(msg)];
-               struct sk_buff *tbuf;
-
-               if (!p)
-                       break; /* No more bearers to try */
-               if (!b)
-                       b = p;
-               tipc_nmap_diff(&bcbearer->remains, &b->nodes,
-                              &bcbearer->remains_new);
-               if (bcbearer->remains_new.count == bcbearer->remains.count)
-                       continue; /* Nothing added by bearer pair */
-
-               if (bp_index == 0) {
-                       /* Use original buffer for first bearer */
-                       tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
-               } else {
-                       /* Avoid concurrent buffer access */
-                       tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
-                       if (!tbuf)
-                               break;
-                       tipc_bearer_send(net, b->identity, tbuf,
-                                        &b->bcast_addr);
-                       kfree_skb(tbuf); /* Bearer keeps a clone */
-               }
-               if (bcbearer->remains_new.count == 0)
-                       break; /* All targets reached */
+       __skb_queue_head_init(&xmitq);
 
-               bcbearer->remains = bcbearer->remains_new;
-       }
+       tipc_bcast_lock(net);
+       tipc_link_bc_ack_rcv(l, acked, &xmitq);
+       tipc_bcast_unlock(net);
 
-       return 0;
+       tipc_bcbase_xmit(net, &xmitq);
+
+       /* Any socket wakeup messages ? */
+       if (!skb_queue_empty(inputq))
+               tipc_sk_rcv(net, inputq);
 }
 
-/**
- * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
+/* tipc_bcast_synch_rcv -  check and update rcv link with peer's send state
+ *
+ * RCU is locked, no other locks set
  */
-void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
-                       u32 node, bool action)
+void tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
+                        struct tipc_msg *hdr)
 {
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_bcbearer *bcbearer = tn->bcbearer;
-       struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
-       struct tipc_bcbearer_pair *bp_curr;
-       struct tipc_bearer *b;
-       int b_index;
-       int pri;
+       struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+       struct sk_buff_head xmitq;
 
-       tipc_bclink_lock(net);
+       __skb_queue_head_init(&xmitq);
 
-       if (action)
-               tipc_nmap_add(nm_ptr, node);
-       else
-               tipc_nmap_remove(nm_ptr, node);
+       tipc_bcast_lock(net);
+       if (msg_type(hdr) == STATE_MSG) {
+               tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
+               tipc_link_bc_sync_rcv(l, hdr, &xmitq);
+       } else {
+               tipc_link_bc_init_rcv(l, hdr);
+       }
+       tipc_bcast_unlock(net);
 
-       /* Group bearers by priority (can assume max of two per priority) */
-       memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
+       tipc_bcbase_xmit(net, &xmitq);
 
-       rcu_read_lock();
-       for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
-               b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
-               if (!b || !b->nodes.count)
-                       continue;
-
-               if (!bp_temp[b->priority].primary)
-                       bp_temp[b->priority].primary = b;
-               else
-                       bp_temp[b->priority].secondary = b;
-       }
-       rcu_read_unlock();
+       /* Any socket wakeup messages ? */
+       if (!skb_queue_empty(inputq))
+               tipc_sk_rcv(net, inputq);
+}
 
-       /* Create array of bearer pairs for broadcasting */
-       bp_curr = bcbearer->bpairs;
-       memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
+/* tipc_bcast_add_peer - add a peer node to broadcast link and bearer
+ *
+ * RCU is locked, node lock is set
+ */
+void tipc_bcast_add_peer(struct net *net, struct tipc_link *uc_l,
+                        struct sk_buff_head *xmitq)
+{
+       struct tipc_link *snd_l = tipc_bc_sndlink(net);
 
-       for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
+       tipc_bcast_lock(net);
+       tipc_link_add_bc_peer(snd_l, uc_l, xmitq);
+       tipc_bcbase_select_primary(net);
+       tipc_bcast_unlock(net);
+}
 
-               if (!bp_temp[pri].primary)
-                       continue;
+/* tipc_bcast_remove_peer - remove a peer node from broadcast link and bearer
+ *
+ * RCU is locked, node lock is set
+ */
+void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l)
+{
+       struct tipc_link *snd_l = tipc_bc_sndlink(net);
+       struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+       struct sk_buff_head xmitq;
 
-               bp_curr->primary = bp_temp[pri].primary;
+       __skb_queue_head_init(&xmitq);
 
-               if (bp_temp[pri].secondary) {
-                       if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
-                                           &bp_temp[pri].secondary->nodes)) {
-                               bp_curr->secondary = bp_temp[pri].secondary;
-                       } else {
-                               bp_curr++;
-                               bp_curr->primary = bp_temp[pri].secondary;
-                       }
-               }
+       tipc_bcast_lock(net);
+       tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq);
+       tipc_bcbase_select_primary(net);
+       tipc_bcast_unlock(net);
 
-               bp_curr++;
-       }
+       tipc_bcbase_xmit(net, &xmitq);
 
-       tipc_bclink_unlock(net);
+       /* Any socket wakeup messages ? */
+       if (!skb_queue_empty(inputq))
+               tipc_sk_rcv(net, inputq);
 }
 
 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
@@ -835,7 +395,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
        if (!bcl)
                return 0;
 
-       tipc_bclink_lock(net);
+       tipc_bcast_lock(net);
 
        hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
                          NLM_F_MULTI, TIPC_NL_LINK_GET);
@@ -870,7 +430,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
        if (err)
                goto attr_msg_full;
 
-       tipc_bclink_unlock(net);
+       tipc_bcast_unlock(net);
        nla_nest_end(msg->skb, attrs);
        genlmsg_end(msg->skb, hdr);
 
@@ -881,7 +441,7 @@ prop_msg_full:
 attr_msg_full:
        nla_nest_cancel(msg->skb, attrs);
 msg_full:
-       tipc_bclink_unlock(net);
+       tipc_bcast_unlock(net);
        genlmsg_cancel(msg->skb, hdr);
 
        return -EMSGSIZE;
@@ -895,25 +455,25 @@ int tipc_bclink_reset_stats(struct net *net)
        if (!bcl)
                return -ENOPROTOOPT;
 
-       tipc_bclink_lock(net);
+       tipc_bcast_lock(net);
        memset(&bcl->stats, 0, sizeof(bcl->stats));
-       tipc_bclink_unlock(net);
+       tipc_bcast_unlock(net);
        return 0;
 }
 
-int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
+static int tipc_bc_link_set_queue_limits(struct net *net, u32 limit)
 {
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_link *bcl = tn->bcl;
+       struct tipc_link *l = tipc_bc_sndlink(net);
 
-       if (!bcl)
+       if (!l)
                return -ENOPROTOOPT;
-       if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
+       if (limit < BCLINK_WIN_MIN)
+               limit = BCLINK_WIN_MIN;
+       if (limit > TIPC_MAX_LINK_WIN)
                return -EINVAL;
-
-       tipc_bclink_lock(net);
-       tipc_link_set_queue_limits(bcl, limit);
-       tipc_bclink_unlock(net);
+       tipc_bcast_lock(net);
+       tipc_link_set_queue_limits(l, limit);
+       tipc_bcast_unlock(net);
        return 0;
 }
 
@@ -935,123 +495,51 @@ int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
 
        win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
 
-       return tipc_bclink_set_queue_limits(net, win);
+       return tipc_bc_link_set_queue_limits(net, win);
 }
 
-int tipc_bclink_init(struct net *net)
+int tipc_bcast_init(struct net *net)
 {
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_bcbearer *bcbearer;
-       struct tipc_bclink *bclink;
-       struct tipc_link *bcl;
-
-       bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
-       if (!bcbearer)
-               return -ENOMEM;
-
-       bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
-       if (!bclink) {
-               kfree(bcbearer);
-               return -ENOMEM;
-       }
+       struct tipc_net *tn = tipc_net(net);
+       struct tipc_bc_base *bb = NULL;
+       struct tipc_link *l = NULL;
 
-       bcl = &bclink->link;
-       bcbearer->bearer.media = &bcbearer->media;
-       bcbearer->media.send_msg = tipc_bcbearer_send;
-       sprintf(bcbearer->media.name, "tipc-broadcast");
-
-       spin_lock_init(&bclink->lock);
-       __skb_queue_head_init(&bcl->transmq);
-       __skb_queue_head_init(&bcl->backlogq);
-       __skb_queue_head_init(&bcl->deferdq);
-       skb_queue_head_init(&bcl->wakeupq);
-       bcl->snd_nxt = 1;
-       spin_lock_init(&bclink->node.lock);
-       __skb_queue_head_init(&bclink->arrvq);
-       skb_queue_head_init(&bclink->inputq);
-       bcl->owner = &bclink->node;
-       bcl->owner->net = net;
-       bcl->mtu = MAX_PKT_DEFAULT_MCAST;
-       tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
-       bcl->bearer_id = MAX_BEARERS;
-       rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
-       bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
-       msg_set_prevnode(bcl->pmsg, tn->own_addr);
-       strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
-       tn->bcbearer = bcbearer;
-       tn->bclink = bclink;
-       tn->bcl = bcl;
-       return 0;
-}
+       bb = kzalloc(sizeof(*bb), GFP_ATOMIC);
+       if (!bb)
+               goto enomem;
+       tn->bcbase = bb;
+       spin_lock_init(&tipc_net(net)->bclock);
 
-void tipc_bclink_stop(struct net *net)
-{
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-       tipc_bclink_lock(net);
-       tipc_link_purge_queues(tn->bcl);
-       tipc_bclink_unlock(net);
-
-       RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
-       synchronize_net();
-       kfree(tn->bcbearer);
-       kfree(tn->bclink);
+       if (!tipc_link_bc_create(net, 0, 0,
+                                U16_MAX,
+                                BCLINK_WIN_DEFAULT,
+                                0,
+                                &bb->inputq,
+                                NULL,
+                                NULL,
+                                &l))
+               goto enomem;
+       bb->link = l;
+       tn->bcl = l;
+       return 0;
+enomem:
+       kfree(bb);
+       kfree(l);
+       return -ENOMEM;
 }
 
-/**
- * tipc_nmap_add - add a node to a node map
- */
-static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
+void tipc_bcast_reinit(struct net *net)
 {
-       int n = tipc_node(node);
-       int w = n / WSIZE;
-       u32 mask = (1 << (n % WSIZE));
+       struct tipc_bc_base *b = tipc_bc_base(net);
 
-       if ((nm_ptr->map[w] & mask) == 0) {
-               nm_ptr->count++;
-               nm_ptr->map[w] |= mask;
-       }
+       msg_set_prevnode(b->link->pmsg, tipc_own_addr(net));
 }
 
-/**
- * tipc_nmap_remove - remove a node from a node map
- */
-static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
+void tipc_bcast_stop(struct net *net)
 {
-       int n = tipc_node(node);
-       int w = n / WSIZE;
-       u32 mask = (1 << (n % WSIZE));
-
-       if ((nm_ptr->map[w] & mask) != 0) {
-               nm_ptr->map[w] &= ~mask;
-               nm_ptr->count--;
-       }
-}
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-/**
- * tipc_nmap_diff - find differences between node maps
- * @nm_a: input node map A
- * @nm_b: input node map B
- * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
- */
-static void tipc_nmap_diff(struct tipc_node_map *nm_a,
-                          struct tipc_node_map *nm_b,
-                          struct tipc_node_map *nm_diff)
-{
-       int stop = ARRAY_SIZE(nm_a->map);
-       int w;
-       int b;
-       u32 map;
-
-       memset(nm_diff, 0, sizeof(*nm_diff));
-       for (w = 0; w < stop; w++) {
-               map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
-               nm_diff->map[w] = map;
-               if (map != 0) {
-                       for (b = 0 ; b < WSIZE; b++) {
-                               if (map & (1 << b))
-                                       nm_diff->count++;
-                       }
-               }
-       }
+       synchronize_net();
+       kfree(tn->bcbase);
+       kfree(tn->bcl);
 }
index d74c69bcf60bda5e04ddc61a988afd6fa94bee66..2855b9356a1527224d76186d001aad2a5a44e50d 100644 (file)
 #ifndef _TIPC_BCAST_H
 #define _TIPC_BCAST_H
 
-#include <linux/tipc_config.h>
-#include "link.h"
-#include "node.h"
+#include "core.h"
 
-/**
- * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
- * @primary: pointer to primary bearer
- * @secondary: pointer to secondary bearer
- *
- * Bearers must have same priority and same set of reachable destinations
- * to be paired.
- */
-
-struct tipc_bcbearer_pair {
-       struct tipc_bearer *primary;
-       struct tipc_bearer *secondary;
-};
-
-#define        BCBEARER                MAX_BEARERS
-
-/**
- * struct tipc_bcbearer - bearer used by broadcast link
- * @bearer: (non-standard) broadcast bearer structure
- * @media: (non-standard) broadcast media structure
- * @bpairs: array of bearer pairs
- * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
- * @remains: temporary node map used by tipc_bcbearer_send()
- * @remains_new: temporary node map used tipc_bcbearer_send()
- *
- * Note: The fields labelled "temporary" are incorporated into the bearer
- * to avoid consuming potentially limited stack space through the use of
- * large local variables within multicast routines.  Concurrent access is
- * prevented through use of the spinlock "bclink_lock".
- */
-struct tipc_bcbearer {
-       struct tipc_bearer bearer;
-       struct tipc_media media;
-       struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
-       struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
-       struct tipc_node_map remains;
-       struct tipc_node_map remains_new;
-};
+struct tipc_node;
+struct tipc_msg;
+struct tipc_nl_msg;
+struct tipc_node_map;
 
-/**
- * struct tipc_bclink - link used for broadcast messages
- * @lock: spinlock governing access to structure
- * @link: (non-standard) broadcast link structure
- * @node: (non-standard) node structure representing b'cast link's peer node
- * @bcast_nodes: map of broadcast-capable nodes
- * @retransmit_to: node that most recently requested a retransmit
- *
- * Handles sequence numbering, fragmentation, bundling, etc.
- */
-struct tipc_bclink {
-       spinlock_t lock;
-       struct tipc_link link;
-       struct tipc_node node;
-       struct sk_buff_head arrvq;
-       struct sk_buff_head inputq;
-       struct tipc_node_map bcast_nodes;
-       struct tipc_node *retransmit_to;
-};
+int tipc_bcast_init(struct net *net);
+void tipc_bcast_reinit(struct net *net);
+void tipc_bcast_stop(struct net *net);
+void tipc_bcast_add_peer(struct net *net, struct tipc_link *l,
+                        struct sk_buff_head *xmitq);
+void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_bcl);
+void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id);
+void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id);
+int  tipc_bcast_get_mtu(struct net *net);
+int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list);
+int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb);
+void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked);
+void tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
+                        struct tipc_msg *hdr);
+int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
+int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]);
+int tipc_bclink_reset_stats(struct net *net);
 
-struct tipc_node;
-extern const char tipc_bclink_name[];
+static inline void tipc_bcast_lock(struct net *net)
+{
+       spin_lock_bh(&tipc_net(net)->bclock);
+}
 
-/**
- * tipc_nmap_equal - test for equality of node maps
- */
-static inline int tipc_nmap_equal(struct tipc_node_map *nm_a,
-                                 struct tipc_node_map *nm_b)
+static inline void tipc_bcast_unlock(struct net *net)
 {
-       return !memcmp(nm_a, nm_b, sizeof(*nm_a));
+       spin_unlock_bh(&tipc_net(net)->bclock);
 }
 
-int tipc_bclink_init(struct net *net);
-void tipc_bclink_stop(struct net *net);
-void tipc_bclink_add_node(struct net *net, u32 addr);
-void tipc_bclink_remove_node(struct net *net, u32 addr);
-struct tipc_node *tipc_bclink_retransmit_to(struct net *tn);
-void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
-void tipc_bclink_rcv(struct net *net, struct sk_buff *buf);
-u32  tipc_bclink_get_last_sent(struct net *net);
-u32  tipc_bclink_acks_missing(struct tipc_node *n_ptr);
-void tipc_bclink_update_link_state(struct tipc_node *node,
-                                  u32 last_sent);
-int  tipc_bclink_reset_stats(struct net *net);
-int  tipc_bclink_set_queue_limits(struct net *net, u32 limit);
-void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
-                       u32 node, bool action);
-uint  tipc_bclink_get_mtu(void);
-int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list);
-void tipc_bclink_wakeup_users(struct net *net);
-int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
-int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]);
-void tipc_bclink_input(struct net *net);
-void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *msg);
+static inline struct tipc_link *tipc_bc_sndlink(struct net *net)
+{
+       return tipc_net(net)->bcl;
+}
 
 #endif
index ce9f7bfc0b92444950f51893e87abbc426151eb6..648f2a67f3148272dbbf7fd43e3a0536b1cdab3c 100644 (file)
@@ -193,10 +193,8 @@ void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest)
 
        rcu_read_lock();
        b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
-       if (b_ptr) {
-               tipc_bcbearer_sort(net, &b_ptr->nodes, dest, true);
+       if (b_ptr)
                tipc_disc_add_dest(b_ptr->link_req);
-       }
        rcu_read_unlock();
 }
 
@@ -207,10 +205,8 @@ void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest)
 
        rcu_read_lock();
        b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
-       if (b_ptr) {
-               tipc_bcbearer_sort(net, &b_ptr->nodes, dest, false);
+       if (b_ptr)
                tipc_disc_remove_dest(b_ptr->link_req);
-       }
        rcu_read_unlock();
 }
 
@@ -362,6 +358,7 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr)
        b_ptr->media->disable_media(b_ptr);
 
        tipc_node_delete_links(net, b_ptr->identity);
+       RCU_INIT_POINTER(b_ptr->media_ptr, NULL);
        if (b_ptr->link_req)
                tipc_disc_delete(b_ptr->link_req);
 
@@ -399,16 +396,13 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
 
 /* tipc_disable_l2_media - detach TIPC bearer from an L2 interface
  *
- * Mark L2 bearer as inactive so that incoming buffers are thrown away,
- * then get worker thread to complete bearer cleanup.  (Can't do cleanup
- * here because cleanup code needs to sleep and caller holds spinlocks.)
+ * Mark L2 bearer as inactive so that incoming buffers are thrown away
  */
 void tipc_disable_l2_media(struct tipc_bearer *b)
 {
        struct net_device *dev;
 
        dev = (struct net_device *)rtnl_dereference(b->media_ptr);
-       RCU_INIT_POINTER(b->media_ptr, NULL);
        RCU_INIT_POINTER(dev->tipc_ptr, NULL);
        synchronize_net();
        dev_put(dev);
@@ -420,10 +414,9 @@ void tipc_disable_l2_media(struct tipc_bearer *b)
  * @b_ptr: the bearer through which the packet is to be sent
  * @dest: peer destination address
  */
-int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
+int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
                     struct tipc_bearer *b, struct tipc_media_addr *dest)
 {
-       struct sk_buff *clone;
        struct net_device *dev;
        int delta;
 
@@ -431,42 +424,48 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
        if (!dev)
                return 0;
 
-       clone = skb_clone(buf, GFP_ATOMIC);
-       if (!clone)
-               return 0;
-
-       delta = dev->hard_header_len - skb_headroom(buf);
+       delta = dev->hard_header_len - skb_headroom(skb);
        if ((delta > 0) &&
-           pskb_expand_head(clone, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
-               kfree_skb(clone);
+           pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
+               kfree_skb(skb);
                return 0;
        }
 
-       skb_reset_network_header(clone);
-       clone->dev = dev;
-       clone->protocol = htons(ETH_P_TIPC);
-       dev_hard_header(clone, dev, ETH_P_TIPC, dest->value,
-                       dev->dev_addr, clone->len);
-       dev_queue_xmit(clone);
+       skb_reset_network_header(skb);
+       skb->dev = dev;
+       skb->protocol = htons(ETH_P_TIPC);
+       dev_hard_header(skb, dev, ETH_P_TIPC, dest->value,
+                       dev->dev_addr, skb->len);
+       dev_queue_xmit(skb);
        return 0;
 }
 
-/* tipc_bearer_send- sends buffer to destination over bearer
- *
- * IMPORTANT:
- * The media send routine must not alter the buffer being passed in
- * as it may be needed for later retransmission!
+int tipc_bearer_mtu(struct net *net, u32 bearer_id)
+{
+       int mtu = 0;
+       struct tipc_bearer *b;
+
+       rcu_read_lock();
+       b = rcu_dereference_rtnl(tipc_net(net)->bearer_list[bearer_id]);
+       if (b)
+               mtu = b->mtu;
+       rcu_read_unlock();
+       return mtu;
+}
+
+/* tipc_bearer_xmit_skb - sends buffer to destination over bearer
  */
-void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
-                     struct tipc_media_addr *dest)
+void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
+                         struct sk_buff *skb,
+                         struct tipc_media_addr *dest)
 {
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_bearer *b_ptr;
+       struct tipc_net *tn = tipc_net(net);
+       struct tipc_bearer *b;
 
        rcu_read_lock();
-       b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
-       if (likely(b_ptr))
-               b_ptr->media->send_msg(net, buf, b_ptr, dest);
+       b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+       if (likely(b))
+               b->media->send_msg(net, skb, b, dest);
        rcu_read_unlock();
 }
 
@@ -489,8 +488,31 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id,
                skb_queue_walk_safe(xmitq, skb, tmp) {
                        __skb_dequeue(xmitq);
                        b->media->send_msg(net, skb, b, dst);
-                       /* Until we remove cloning in tipc_l2_send_msg(): */
-                       kfree_skb(skb);
+               }
+       }
+       rcu_read_unlock();
+}
+
+/* tipc_bearer_bc_xmit() - broadcast buffers to all destinations
+ */
+void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
+                        struct sk_buff_head *xmitq)
+{
+       struct tipc_net *tn = tipc_net(net);
+       int net_id = tn->net_id;
+       struct tipc_bearer *b;
+       struct sk_buff *skb, *tmp;
+       struct tipc_msg *hdr;
+
+       rcu_read_lock();
+       b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+       if (likely(b)) {
+               skb_queue_walk_safe(xmitq, skb, tmp) {
+                       hdr = buf_msg(skb);
+                       msg_set_non_seq(hdr, 1);
+                       msg_set_mc_netid(hdr, net_id);
+                       __skb_dequeue(xmitq);
+                       b->media->send_msg(net, skb, b, &b->bcast_addr);
                }
        }
        rcu_read_unlock();
@@ -554,7 +576,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
        case NETDEV_CHANGE:
                if (netif_carrier_ok(dev))
                        break;
-       case NETDEV_DOWN:
+       case NETDEV_GOING_DOWN:
        case NETDEV_CHANGEMTU:
                tipc_reset_bearer(net, b_ptr);
                break;
index 6426f242f6262e80594cd1cdc438c4a94f4c7026..552185bc477327bc8a0ca4c638194d227268aace 100644 (file)
@@ -163,6 +163,7 @@ struct tipc_bearer {
        u32 identity;
        struct tipc_link_req *link_req;
        char net_plane;
+       int node_cnt;
        struct tipc_node_map nodes;
 };
 
@@ -215,10 +216,14 @@ struct tipc_media *tipc_media_find(const char *name);
 int tipc_bearer_setup(void);
 void tipc_bearer_cleanup(void);
 void tipc_bearer_stop(struct net *net);
-void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
-                     struct tipc_media_addr *dest);
+int tipc_bearer_mtu(struct net *net, u32 bearer_id);
+void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
+                         struct sk_buff *skb,
+                         struct tipc_media_addr *dest);
 void tipc_bearer_xmit(struct net *net, u32 bearer_id,
                      struct sk_buff_head *xmitq,
                      struct tipc_media_addr *dst);
+void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
+                        struct sk_buff_head *xmitq);
 
 #endif /* _TIPC_BEARER_H */
index 005ba5eb0ea426a3a24cbfe0e74aa4c1901af903..03a842870c52d22ca5c8e72ce063f78ba8391521 100644 (file)
@@ -42,6 +42,7 @@
 #include "bearer.h"
 #include "net.h"
 #include "socket.h"
+#include "bcast.h"
 
 #include <linux/module.h>
 
@@ -71,8 +72,15 @@ static int __net_init tipc_init_net(struct net *net)
        err = tipc_topsrv_start(net);
        if (err)
                goto out_subscr;
+
+       err = tipc_bcast_init(net);
+       if (err)
+               goto out_bclink;
+
        return 0;
 
+out_bclink:
+       tipc_bcast_stop(net);
 out_subscr:
        tipc_nametbl_stop(net);
 out_nametbl:
@@ -85,6 +93,7 @@ static void __net_exit tipc_exit_net(struct net *net)
 {
        tipc_topsrv_stop(net);
        tipc_net_stop(net);
+       tipc_bcast_stop(net);
        tipc_nametbl_stop(net);
        tipc_sk_rht_destroy(net);
 }
index b96b41eabf121cc8577b65d0ad5bb0727ca5d3f7..18e95a8020cd48b4594fb4d00f1fc47e2ed6f6e2 100644 (file)
@@ -62,8 +62,7 @@
 
 struct tipc_node;
 struct tipc_bearer;
-struct tipc_bcbearer;
-struct tipc_bclink;
+struct tipc_bc_base;
 struct tipc_link;
 struct tipc_name_table;
 struct tipc_server;
@@ -93,8 +92,8 @@ struct tipc_net {
        struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
 
        /* Broadcast link */
-       struct tipc_bcbearer *bcbearer;
-       struct tipc_bclink *bclink;
+       spinlock_t bclock;
+       struct tipc_bc_base *bcbase;
        struct tipc_link *bcl;
 
        /* Socket hash table */
@@ -114,6 +113,11 @@ static inline struct tipc_net *tipc_net(struct net *net)
        return net_generic(net, tipc_net_id);
 }
 
+static inline int tipc_netid(struct net *net)
+{
+       return tipc_net(net)->net_id;
+}
+
 static inline u16 mod(u16 x)
 {
        return x & 0xffffu;
index d14e0a4aa9af900a7ace6855ab91eb2bcc901641..afe8c47c4085c7e01e6e8bfb3d9ed1e4f13eaa20 100644 (file)
@@ -89,7 +89,7 @@ static void tipc_disc_init_msg(struct net *net, struct sk_buff *buf, u32 type,
                      MAX_H_SIZE, dest_domain);
        msg_set_non_seq(msg, 1);
        msg_set_node_sig(msg, tn->random);
-       msg_set_node_capabilities(msg, 0);
+       msg_set_node_capabilities(msg, TIPC_NODE_CAPABILITIES);
        msg_set_dest_domain(msg, dest_domain);
        msg_set_bc_netid(msg, tn->net_id);
        b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr);
@@ -167,11 +167,10 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
        /* Send response, if necessary */
        if (respond && (mtyp == DSC_REQ_MSG)) {
                rskb = tipc_buf_acquire(MAX_H_SIZE);
-               if (rskb) {
-                       tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
-                       tipc_bearer_send(net, bearer->identity, rskb, &maddr);
-                       kfree_skb(rskb);
-               }
+               if (!rskb)
+                       return;
+               tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
+               tipc_bearer_xmit_skb(net, bearer->identity, rskb, &maddr);
        }
 }
 
@@ -225,6 +224,7 @@ void tipc_disc_remove_dest(struct tipc_link_req *req)
 static void disc_timeout(unsigned long data)
 {
        struct tipc_link_req *req = (struct tipc_link_req *)data;
+       struct sk_buff *skb;
        int max_delay;
 
        spin_lock_bh(&req->lock);
@@ -242,9 +242,9 @@ static void disc_timeout(unsigned long data)
         * hold at fast polling rate if don't have any associated nodes,
         * otherwise hold at slow polling rate
         */
-       tipc_bearer_send(req->net, req->bearer_id, req->buf, &req->dest);
-
-
+       skb = skb_clone(req->buf, GFP_ATOMIC);
+       if (skb)
+               tipc_bearer_xmit_skb(req->net, req->bearer_id, skb, &req->dest);
        req->timer_intv *= 2;
        if (req->num_nodes)
                max_delay = TIPC_LINK_REQ_SLOW;
@@ -271,6 +271,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr,
                     struct tipc_media_addr *dest)
 {
        struct tipc_link_req *req;
+       struct sk_buff *skb;
 
        req = kmalloc(sizeof(*req), GFP_ATOMIC);
        if (!req)
@@ -292,7 +293,9 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr,
        setup_timer(&req->timer, disc_timeout, (unsigned long)req);
        mod_timer(&req->timer, jiffies + req->timer_intv);
        b_ptr->link_req = req;
-       tipc_bearer_send(net, req->bearer_id, req->buf, &req->dest);
+       skb = skb_clone(req->buf, GFP_ATOMIC);
+       if (skb)
+               tipc_bearer_xmit_skb(net, req->bearer_id, skb, &req->dest);
        return 0;
 }
 
@@ -316,6 +319,7 @@ void tipc_disc_delete(struct tipc_link_req *req)
 void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr)
 {
        struct tipc_link_req *req = b_ptr->link_req;
+       struct sk_buff *skb;
 
        spin_lock_bh(&req->lock);
        tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b_ptr);
@@ -325,6 +329,8 @@ void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr)
        req->num_nodes = 0;
        req->timer_intv = TIPC_LINK_REQ_INIT;
        mod_timer(&req->timer, jiffies + req->timer_intv);
-       tipc_bearer_send(net, req->bearer_id, req->buf, &req->dest);
+       skb = skb_clone(req->buf, GFP_ATOMIC);
+       if (skb)
+               tipc_bearer_xmit_skb(net, req->bearer_id, skb, &req->dest);
        spin_unlock_bh(&req->lock);
 }
index 75db07c78a6900157c0b568491a5d4e8e694e274..9efbdbde2b0863542a08c91c136fadc9aa0cc6d8 100644 (file)
@@ -50,6 +50,7 @@
  */
 static const char *link_co_err = "Link tunneling error, ";
 static const char *link_rst_msg = "Resetting link ";
+static const char tipc_bclink_name[] = "broadcast-link";
 
 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
        [TIPC_NLA_LINK_UNSPEC]          = { .type = NLA_UNSPEC },
@@ -75,6 +76,14 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
        [TIPC_NLA_PROP_WIN]             = { .type = NLA_U32 }
 };
 
+/* Send states for broadcast NACKs
+ */
+enum {
+       BC_NACK_SND_CONDITIONAL,
+       BC_NACK_SND_UNCONDITIONAL,
+       BC_NACK_SND_SUPPRESS,
+};
+
 /*
  * Interval between NACKs when packets arrive out of order
  */
@@ -110,7 +119,11 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
                                      struct sk_buff_head *xmitq);
 static void link_reset_statistics(struct tipc_link *l_ptr);
 static void link_print(struct tipc_link *l_ptr, const char *str);
-static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
+static void tipc_link_build_nack_msg(struct tipc_link *l,
+                                    struct sk_buff_head *xmitq);
+static void tipc_link_build_bc_init_msg(struct tipc_link *l,
+                                       struct sk_buff_head *xmitq);
+static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
 
 /*
  *  Simple non-static link routines (i.e. referenced outside this file)
@@ -120,11 +133,21 @@ bool tipc_link_is_up(struct tipc_link *l)
        return link_is_up(l);
 }
 
+bool tipc_link_peer_is_down(struct tipc_link *l)
+{
+       return l->state == LINK_PEER_RESET;
+}
+
 bool tipc_link_is_reset(struct tipc_link *l)
 {
        return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
 }
 
+bool tipc_link_is_establishing(struct tipc_link *l)
+{
+       return l->state == LINK_ESTABLISHING;
+}
+
 bool tipc_link_is_synching(struct tipc_link *l)
 {
        return l->state == LINK_SYNCHING;
@@ -140,11 +163,66 @@ bool tipc_link_is_blocked(struct tipc_link *l)
        return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
 }
 
+static bool link_is_bc_sndlink(struct tipc_link *l)
+{
+       return !l->bc_sndlink;
+}
+
+static bool link_is_bc_rcvlink(struct tipc_link *l)
+{
+       return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
+}
+
 int tipc_link_is_active(struct tipc_link *l)
 {
-       struct tipc_node *n = l->owner;
+       return l->active;
+}
+
+void tipc_link_set_active(struct tipc_link *l, bool active)
+{
+       l->active = active;
+}
+
+void tipc_link_add_bc_peer(struct tipc_link *snd_l,
+                          struct tipc_link *uc_l,
+                          struct sk_buff_head *xmitq)
+{
+       struct tipc_link *rcv_l = uc_l->bc_rcvlink;
+
+       snd_l->ackers++;
+       rcv_l->acked = snd_l->snd_nxt - 1;
+       tipc_link_build_bc_init_msg(uc_l, xmitq);
+}
+
+void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
+                             struct tipc_link *rcv_l,
+                             struct sk_buff_head *xmitq)
+{
+       u16 ack = snd_l->snd_nxt - 1;
+
+       snd_l->ackers--;
+       tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
+       tipc_link_reset(rcv_l);
+       rcv_l->state = LINK_RESET;
+       if (!snd_l->ackers) {
+               tipc_link_reset(snd_l);
+               __skb_queue_purge(xmitq);
+       }
+}
+
+int tipc_link_bc_peers(struct tipc_link *l)
+{
+       return l->ackers;
+}
+
+void tipc_link_set_mtu(struct tipc_link *l, int mtu)
+{
+       l->mtu = mtu;
+}
 
-       return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
+int tipc_link_mtu(struct tipc_link *l)
+{
+       return l->mtu;
 }
 
 static u32 link_own_addr(struct tipc_link *l)
@@ -155,57 +233,72 @@ static u32 link_own_addr(struct tipc_link *l)
 /**
  * tipc_link_create - create a new link
  * @n: pointer to associated node
- * @b: pointer to associated bearer
+ * @if_name: associated interface name
+ * @bearer_id: id (index) of associated bearer
+ * @tolerance: link tolerance to be used by link
+ * @net_plane: network plane (A,B,c..) this link belongs to
+ * @mtu: mtu to be advertised by link
+ * @priority: priority to be used by link
+ * @window: send window to be used by link
+ * @session: session to be used by link
  * @ownnode: identity of own node
- * @peer: identity of peer node
- * @maddr: media address to be used
+ * @peer: node id of peer node
+ * @peer_caps: bitmap describing peer node capabilities
+ * @bc_sndlink: the namespace global link used for broadcast sending
+ * @bc_rcvlink: the peer specific link used for broadcast reception
  * @inputq: queue to put messages ready for delivery
  * @namedq: queue to put binding table update messages ready for delivery
  * @link: return value, pointer to put the created link
  *
  * Returns true if link was created, otherwise false
  */
-bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
-                     u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
-                     struct sk_buff_head *inputq, struct sk_buff_head *namedq,
+bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
+                     int tolerance, char net_plane, u32 mtu, int priority,
+                     int window, u32 session, u32 ownnode, u32 peer,
+                     u16 peer_caps,
+                     struct tipc_link *bc_sndlink,
+                     struct tipc_link *bc_rcvlink,
+                     struct sk_buff_head *inputq,
+                     struct sk_buff_head *namedq,
                      struct tipc_link **link)
 {
        struct tipc_link *l;
        struct tipc_msg *hdr;
-       char *if_name;
 
        l = kzalloc(sizeof(*l), GFP_ATOMIC);
        if (!l)
                return false;
        *link = l;
+       l->pmsg = (struct tipc_msg *)&l->proto_msg;
+       hdr = l->pmsg;
+       tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer);
+       msg_set_size(hdr, sizeof(l->proto_msg));
+       msg_set_session(hdr, session);
+       msg_set_bearer_id(hdr, l->bearer_id);
 
        /* Note: peer i/f name is completed by reset/activate message */
-       if_name = strchr(b->name, ':') + 1;
        sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
                tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
                if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
+       strcpy((char *)msg_data(hdr), if_name);
 
        l->addr = peer;
-       l->media_addr = maddr;
-       l->owner = n;
+       l->peer_caps = peer_caps;
+       l->net = net;
        l->peer_session = WILDCARD_SESSION;
-       l->bearer_id = b->identity;
-       l->tolerance = b->tolerance;
-       l->net_plane = b->net_plane;
-       l->advertised_mtu = b->mtu;
-       l->mtu = b->mtu;
-       l->priority = b->priority;
-       tipc_link_set_queue_limits(l, b->window);
+       l->bearer_id = bearer_id;
+       l->tolerance = tolerance;
+       l->net_plane = net_plane;
+       l->advertised_mtu = mtu;
+       l->mtu = mtu;
+       l->priority = priority;
+       tipc_link_set_queue_limits(l, window);
+       l->ackers = 1;
+       l->bc_sndlink = bc_sndlink;
+       l->bc_rcvlink = bc_rcvlink;
        l->inputq = inputq;
        l->namedq = namedq;
        l->state = LINK_RESETTING;
-       l->pmsg = (struct tipc_msg *)&l->proto_msg;
-       hdr = l->pmsg;
-       tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer);
-       msg_set_size(hdr, sizeof(l->proto_msg));
-       msg_set_session(hdr, session);
-       msg_set_bearer_id(hdr, l->bearer_id);
-       strcpy((char *)msg_data(hdr), if_name);
        __skb_queue_head_init(&l->transmq);
        __skb_queue_head_init(&l->backlogq);
        __skb_queue_head_init(&l->deferdq);
@@ -214,27 +307,43 @@ bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
        return true;
 }
 
-/* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
+/**
+ * tipc_link_bc_create - create new link to be used for broadcast
+ * @n: pointer to associated node
+ * @mtu: mtu to be used
+ * @window: send window to be used
+ * @inputq: queue to put messages ready for delivery
+ * @namedq: queue to put binding table update messages ready for delivery
+ * @link: return value, pointer to put the created link
  *
- * Give a newly added peer node the sequence number where it should
- * start receiving and acking broadcast packets.
+ * Returns true if link was created, otherwise false
  */
-void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
-                                   struct sk_buff_head *xmitq)
+bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
+                        int mtu, int window, u16 peer_caps,
+                        struct sk_buff_head *inputq,
+                        struct sk_buff_head *namedq,
+                        struct tipc_link *bc_sndlink,
+                        struct tipc_link **link)
 {
-       struct sk_buff *skb;
-       struct sk_buff_head list;
-       u16 last_sent;
+       struct tipc_link *l;
 
-       skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
-                             0, l->addr, link_own_addr(l), 0, 0, 0);
-       if (!skb)
-               return;
-       last_sent = tipc_bclink_get_last_sent(l->owner->net);
-       msg_set_last_bcast(buf_msg(skb), last_sent);
-       __skb_queue_head_init(&list);
-       __skb_queue_tail(&list, skb);
-       tipc_link_xmit(l, &list, xmitq);
+       if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
+                             0, ownnode, peer, peer_caps, bc_sndlink,
+                             NULL, inputq, namedq, link))
+               return false;
+
+       l = *link;
+       strcpy(l->name, tipc_bclink_name);
+       tipc_link_reset(l);
+       l->state = LINK_RESET;
+       l->ackers = 0;
+       l->bc_rcvlink = l;
+
+       /* Broadcast send link is always up */
+       if (link_is_bc_sndlink(l))
+               l->state = LINK_ESTABLISHED;
+
+       return true;
 }
 
 /**
@@ -321,14 +430,15 @@ int tipc_link_fsm_evt(struct tipc_link *l, int evt)
                switch (evt) {
                case LINK_ESTABLISH_EVT:
                        l->state = LINK_ESTABLISHED;
-                       rc |= TIPC_LINK_UP_EVT;
                        break;
                case LINK_FAILOVER_BEGIN_EVT:
                        l->state = LINK_FAILINGOVER;
                        break;
-               case LINK_PEER_RESET_EVT:
                case LINK_RESET_EVT:
+                       l->state = LINK_RESET;
+                       break;
                case LINK_FAILURE_EVT:
+               case LINK_PEER_RESET_EVT:
                case LINK_SYNCH_BEGIN_EVT:
                case LINK_FAILOVER_END_EVT:
                        break;
@@ -438,6 +548,8 @@ static void link_profile_stats(struct tipc_link *l)
                l->stats.msg_length_profile[6]++;
 }
 
+/* tipc_link_timeout - perform periodic task as instructed from node timeout
+ */
 /* tipc_link_timeout - perform periodic task as instructed from node timeout
  */
 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
@@ -446,6 +558,9 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
        int mtyp = STATE_MSG;
        bool xmit = false;
        bool prb = false;
+       u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
+       u16 bc_acked = l->bc_rcvlink->acked;
+       bool bc_up = link_is_up(l->bc_rcvlink);
 
        link_profile_stats(l);
 
@@ -453,7 +568,7 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
        case LINK_ESTABLISHED:
        case LINK_SYNCHING:
                if (!l->silent_intv_cnt) {
-                       if (tipc_bclink_acks_missing(l->owner))
+                       if (bc_up && (bc_acked != bc_snt))
                                xmit = true;
                } else if (l->silent_intv_cnt <= l->abort_limit) {
                        xmit = true;
@@ -544,42 +659,8 @@ void link_prepare_wakeup(struct tipc_link *l)
        }
 }
 
-/**
- * tipc_link_reset_fragments - purge link's inbound message fragments queue
- * @l_ptr: pointer to link
- */
-void tipc_link_reset_fragments(struct tipc_link *l_ptr)
-{
-       kfree_skb(l_ptr->reasm_buf);
-       l_ptr->reasm_buf = NULL;
-}
-
-void tipc_link_purge_backlog(struct tipc_link *l)
-{
-       __skb_queue_purge(&l->backlogq);
-       l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
-       l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
-       l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
-       l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
-       l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
-}
-
-/**
- * tipc_link_purge_queues - purge all pkt queues associated with link
- * @l_ptr: pointer to link
- */
-void tipc_link_purge_queues(struct tipc_link *l_ptr)
-{
-       __skb_queue_purge(&l_ptr->deferdq);
-       __skb_queue_purge(&l_ptr->transmq);
-       tipc_link_purge_backlog(l_ptr);
-       tipc_link_reset_fragments(l_ptr);
-}
-
 void tipc_link_reset(struct tipc_link *l)
 {
-       tipc_link_fsm_evt(l, LINK_RESET_EVT);
-
        /* Link is down, accept any session */
        l->peer_session = WILDCARD_SESSION;
 
@@ -589,12 +670,16 @@ void tipc_link_reset(struct tipc_link *l)
        /* Prepare for renewed mtu size negotiation */
        l->mtu = l->advertised_mtu;
 
-       /* Clean up all queues: */
+       /* Clean up all queues and counters: */
        __skb_queue_purge(&l->transmq);
        __skb_queue_purge(&l->deferdq);
        skb_queue_splice_init(&l->wakeupq, l->inputq);
-
-       tipc_link_purge_backlog(l);
+       __skb_queue_purge(&l->backlogq);
+       l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
+       l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
+       l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
+       l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
+       l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
        kfree_skb(l->reasm_buf);
        kfree_skb(l->failover_reasm_skb);
        l->reasm_buf = NULL;
@@ -602,80 +687,14 @@ void tipc_link_reset(struct tipc_link *l)
        l->rcv_unacked = 0;
        l->snd_nxt = 1;
        l->rcv_nxt = 1;
+       l->acked = 0;
        l->silent_intv_cnt = 0;
        l->stats.recv_info = 0;
        l->stale_count = 0;
+       l->bc_peer_is_up = false;
        link_reset_statistics(l);
 }
 
-/**
- * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
- * @link: link to use
- * @list: chain of buffers containing message
- *
- * Consumes the buffer chain, except when returning an error code,
- * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
- * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
- */
-int __tipc_link_xmit(struct net *net, struct tipc_link *link,
-                    struct sk_buff_head *list)
-{
-       struct tipc_msg *msg = buf_msg(skb_peek(list));
-       unsigned int maxwin = link->window;
-       unsigned int i, imp = msg_importance(msg);
-       uint mtu = link->mtu;
-       u16 ack = mod(link->rcv_nxt - 1);
-       u16 seqno = link->snd_nxt;
-       u16 bc_last_in = link->owner->bclink.last_in;
-       struct tipc_media_addr *addr = link->media_addr;
-       struct sk_buff_head *transmq = &link->transmq;
-       struct sk_buff_head *backlogq = &link->backlogq;
-       struct sk_buff *skb, *bskb;
-
-       /* Match msg importance against this and all higher backlog limits: */
-       for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
-               if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
-                       return link_schedule_user(link, list);
-       }
-       if (unlikely(msg_size(msg) > mtu))
-               return -EMSGSIZE;
-
-       /* Prepare each packet for sending, and add to relevant queue: */
-       while (skb_queue_len(list)) {
-               skb = skb_peek(list);
-               msg = buf_msg(skb);
-               msg_set_seqno(msg, seqno);
-               msg_set_ack(msg, ack);
-               msg_set_bcast_ack(msg, bc_last_in);
-
-               if (likely(skb_queue_len(transmq) < maxwin)) {
-                       __skb_dequeue(list);
-                       __skb_queue_tail(transmq, skb);
-                       tipc_bearer_send(net, link->bearer_id, skb, addr);
-                       link->rcv_unacked = 0;
-                       seqno++;
-                       continue;
-               }
-               if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
-                       kfree_skb(__skb_dequeue(list));
-                       link->stats.sent_bundled++;
-                       continue;
-               }
-               if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
-                       kfree_skb(__skb_dequeue(list));
-                       __skb_queue_tail(backlogq, bskb);
-                       link->backlog[msg_importance(buf_msg(bskb))].len++;
-                       link->stats.sent_bundled++;
-                       link->stats.sent_bundles++;
-                       continue;
-               }
-               link->backlog[imp].len += skb_queue_len(list);
-               skb_queue_splice_tail_init(list, backlogq);
-       }
-       link->snd_nxt = seqno;
-       return 0;
-}
-
 /**
  * tipc_link_xmit(): enqueue buffer list according to queue situation
  * @link: link to use
@@ -696,7 +715,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
        unsigned int mtu = l->mtu;
        u16 ack = l->rcv_nxt - 1;
        u16 seqno = l->snd_nxt;
-       u16 bc_last_in = l->owner->bclink.last_in;
+       u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
        struct sk_buff_head *transmq = &l->transmq;
        struct sk_buff_head *backlogq = &l->backlogq;
        struct sk_buff *skb, *_skb, *bskb;
@@ -715,7 +734,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                hdr = buf_msg(skb);
                msg_set_seqno(hdr, seqno);
                msg_set_ack(hdr, ack);
-               msg_set_bcast_ack(hdr, bc_last_in);
+               msg_set_bcast_ack(hdr, bc_ack);
 
                if (likely(skb_queue_len(transmq) < maxwin)) {
                        _skb = skb_clone(skb, GFP_ATOMIC);
@@ -724,6 +743,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                        __skb_dequeue(list);
                        __skb_queue_tail(transmq, skb);
                        __skb_queue_tail(xmitq, _skb);
+                       TIPC_SKB_CB(skb)->ackers = l->ackers;
                        l->rcv_unacked = 0;
                        seqno++;
                        continue;
@@ -748,62 +768,13 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
        return 0;
 }
 
-/*
- * tipc_link_sync_rcv - synchronize broadcast link endpoints.
- * Receive the sequence number where we should start receiving and
- * acking broadcast packets from a newly added peer node, and open
- * up for reception of such packets.
- *
- * Called with node locked
- */
-static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
-{
-       struct tipc_msg *msg = buf_msg(buf);
-
-       n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
-       n->bclink.recv_permitted = true;
-       kfree_skb(buf);
-}
-
-/*
- * tipc_link_push_packets - push unsent packets to bearer
- *
- * Push out the unsent messages of a link where congestion
- * has abated. Node is locked.
- *
- * Called with node locked
- */
-void tipc_link_push_packets(struct tipc_link *link)
-{
-       struct sk_buff *skb;
-       struct tipc_msg *msg;
-       u16 seqno = link->snd_nxt;
-       u16 ack = mod(link->rcv_nxt - 1);
-
-       while (skb_queue_len(&link->transmq) < link->window) {
-               skb = __skb_dequeue(&link->backlogq);
-               if (!skb)
-                       break;
-               msg = buf_msg(skb);
-               link->backlog[msg_importance(msg)].len--;
-               msg_set_ack(msg, ack);
-               msg_set_seqno(msg, seqno);
-               seqno = mod(seqno + 1);
-               msg_set_bcast_ack(msg, link->owner->bclink.last_in);
-               link->rcv_unacked = 0;
-               __skb_queue_tail(&link->transmq, skb);
-               tipc_bearer_send(link->owner->net, link->bearer_id,
-                                skb, link->media_addr);
-       }
-       link->snd_nxt = seqno;
-}
-
 void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
 {
        struct sk_buff *skb, *_skb;
        struct tipc_msg *hdr;
        u16 seqno = l->snd_nxt;
        u16 ack = l->rcv_nxt - 1;
+       u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
 
        while (skb_queue_len(&l->transmq) < l->window) {
                skb = skb_peek(&l->backlogq);
@@ -817,96 +788,35 @@ void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
                l->backlog[msg_importance(hdr)].len--;
                __skb_queue_tail(&l->transmq, skb);
                __skb_queue_tail(xmitq, _skb);
-               msg_set_ack(hdr, ack);
+               TIPC_SKB_CB(skb)->ackers = l->ackers;
                msg_set_seqno(hdr, seqno);
-               msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+               msg_set_ack(hdr, ack);
+               msg_set_bcast_ack(hdr, bc_ack);
                l->rcv_unacked = 0;
                seqno++;
        }
        l->snd_nxt = seqno;
 }
 
-static void link_retransmit_failure(struct tipc_link *l_ptr,
-                                   struct sk_buff *buf)
-{
-       struct tipc_msg *msg = buf_msg(buf);
-       struct net *net = l_ptr->owner->net;
-
-       pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
-
-       if (l_ptr->addr) {
-               /* Handle failure on standard link */
-               link_print(l_ptr, "Resetting link ");
-               pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
-                       msg_user(msg), msg_type(msg), msg_size(msg),
-                       msg_errcode(msg));
-               pr_info("sqno %u, prev: %x, src: %x\n",
-                       msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg));
-       } else {
-               /* Handle failure on broadcast link */
-               struct tipc_node *n_ptr;
-               char addr_string[16];
-
-               pr_info("Msg seq number: %u,  ", msg_seqno(msg));
-               pr_cont("Outstanding acks: %lu\n",
-                       (unsigned long) TIPC_SKB_CB(buf)->handle);
-
-               n_ptr = tipc_bclink_retransmit_to(net);
-
-               tipc_addr_string_fill(addr_string, n_ptr->addr);
-               pr_info("Broadcast link info for %s\n", addr_string);
-               pr_info("Reception permitted: %d,  Acked: %u\n",
-                       n_ptr->bclink.recv_permitted,
-                       n_ptr->bclink.acked);
-               pr_info("Last in: %u,  Oos state: %u,  Last sent: %u\n",
-                       n_ptr->bclink.last_in,
-                       n_ptr->bclink.oos_state,
-                       n_ptr->bclink.last_sent);
-
-               n_ptr->action_flags |= TIPC_BCAST_RESET;
-               l_ptr->stale_count = 0;
-       }
-}
-
-void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
-                         u32 retransmits)
+static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
 {
-       struct tipc_msg *msg;
-
-       if (!skb)
-               return;
-
-       msg = buf_msg(skb);
-
-       /* Detect repeated retransmit failures */
-       if (l_ptr->last_retransm == msg_seqno(msg)) {
-               if (++l_ptr->stale_count > 100) {
-                       link_retransmit_failure(l_ptr, skb);
-                       return;
-               }
-       } else {
-               l_ptr->last_retransm = msg_seqno(msg);
-               l_ptr->stale_count = 1;
-       }
+       struct tipc_msg *hdr = buf_msg(skb);
 
-       skb_queue_walk_from(&l_ptr->transmq, skb) {
-               if (!retransmits)
-                       break;
-               msg = buf_msg(skb);
-               msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
-               msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-               tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
-                                l_ptr->media_addr);
-               retransmits--;
-               l_ptr->stats.retransmitted++;
-       }
+       pr_warn("Retransmission failure on link <%s>\n", l->name);
+       link_print(l, "Resetting link ");
+       pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
+               msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
+       pr_info("sqno %u, prev: %x, src: %x\n",
+               msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
 }
 
-static int tipc_link_retransm(struct tipc_link *l, int retransm,
-                             struct sk_buff_head *xmitq)
+int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to,
+                     struct sk_buff_head *xmitq)
 {
        struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
        struct tipc_msg *hdr;
+       u16 ack = l->rcv_nxt - 1;
+       u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
 
        if (!skb)
                return 0;
@@ -919,19 +829,25 @@ static int tipc_link_retransm(struct tipc_link *l, int retransm,
                link_retransmit_failure(l, skb);
                return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
        }
+
+       /* Move forward to where retransmission should start */
        skb_queue_walk(&l->transmq, skb) {
-               if (!retransm)
-                       return 0;
+               if (!less(buf_seqno(skb), from))
+                       break;
+       }
+
+       skb_queue_walk_from(&l->transmq, skb) {
+               if (more(buf_seqno(skb), to))
+                       break;
                hdr = buf_msg(skb);
                _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
                if (!_skb)
                        return 0;
                hdr = buf_msg(_skb);
-               msg_set_ack(hdr, l->rcv_nxt - 1);
-               msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+               msg_set_ack(hdr, ack);
+               msg_set_bcast_ack(hdr, bc_ack);
                _skb->priority = TC_PRIO_CONTROL;
                __skb_queue_tail(xmitq, _skb);
-               retransm--;
                l->stats.retransmitted++;
        }
        return 0;
@@ -942,22 +858,20 @@ static int tipc_link_retransm(struct tipc_link *l, int retransm,
  * Consumes buffer if message is of right type
  * Node lock must be held
  */
-static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb,
+static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
                            struct sk_buff_head *inputq)
 {
-       struct tipc_node *node = link->owner;
-
        switch (msg_user(buf_msg(skb))) {
        case TIPC_LOW_IMPORTANCE:
        case TIPC_MEDIUM_IMPORTANCE:
        case TIPC_HIGH_IMPORTANCE:
        case TIPC_CRITICAL_IMPORTANCE:
        case CONN_MANAGER:
-               __skb_queue_tail(inputq, skb);
+               skb_queue_tail(inputq, skb);
                return true;
        case NAME_DISTRIBUTOR:
-               node->bclink.recv_permitted = true;
-               skb_queue_tail(link->namedq, skb);
+               l->bc_rcvlink->state = LINK_ESTABLISHED;
+               skb_queue_tail(l->namedq, skb);
                return true;
        case MSG_BUNDLER:
        case TUNNEL_PROTOCOL:
@@ -978,10 +892,10 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb,
 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
                           struct sk_buff_head *inputq)
 {
-       struct tipc_node *node = l->owner;
        struct tipc_msg *hdr = buf_msg(skb);
        struct sk_buff **reasm_skb = &l->reasm_buf;
        struct sk_buff *iskb;
+       struct sk_buff_head tmpq;
        int usr = msg_user(hdr);
        int rc = 0;
        int pos = 0;
@@ -1006,23 +920,27 @@ static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
        }
 
        if (usr == MSG_BUNDLER) {
+               skb_queue_head_init(&tmpq);
                l->stats.recv_bundles++;
                l->stats.recv_bundled += msg_msgcnt(hdr);
                while (tipc_msg_extract(skb, &iskb, &pos))
-                       tipc_data_input(l, iskb, inputq);
+                       tipc_data_input(l, iskb, &tmpq);
+               tipc_skb_queue_splice_tail(&tmpq, inputq);
                return 0;
        } else if (usr == MSG_FRAGMENTER) {
                l->stats.recv_fragments++;
                if (tipc_buf_append(reasm_skb, &skb)) {
                        l->stats.recv_fragmented++;
                        tipc_data_input(l, skb, inputq);
-               } else if (!*reasm_skb) {
+               } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
+                       pr_warn_ratelimited("Unable to build fragment list\n");
                        return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
                }
                return 0;
        } else if (usr == BCAST_PROTOCOL) {
-               tipc_link_sync_rcv(node, skb);
-               return 0;
+               tipc_bcast_lock(l->net);
+               tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
+               tipc_bcast_unlock(l->net);
        }
 drop:
        kfree_skb(skb);
@@ -1044,49 +962,95 @@ static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
        return released;
 }
 
+/* tipc_link_build_ack_msg: prepare link acknowledge message for transmission
+ *
+ * Note that sending of broadcast ack is coordinated among nodes, to reduce
+ * risk of ack storms towards the sender
+ */
+int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
+{
+       if (!l)
+               return 0;
+
+       /* Broadcast ACK must be sent via a unicast link => defer to caller */
+       if (link_is_bc_rcvlink(l)) {
+               if (((l->rcv_nxt ^ link_own_addr(l)) & 0xf) != 0xf)
+                       return 0;
+               l->rcv_unacked = 0;
+               return TIPC_LINK_SND_BC_ACK;
+       }
+
+       /* Unicast ACK */
+       l->rcv_unacked = 0;
+       l->stats.sent_acks++;
+       tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
+       return 0;
+}
+
+/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
+ */
+void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
+{
+       int mtyp = RESET_MSG;
+
+       if (l->state == LINK_ESTABLISHING)
+               mtyp = ACTIVATE_MSG;
+
+       tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq);
+}
+
+/* tipc_link_build_nack_msg: prepare link nack message for transmission
+ */
+static void tipc_link_build_nack_msg(struct tipc_link *l,
+                                    struct sk_buff_head *xmitq)
+{
+       u32 def_cnt = ++l->stats.deferred_recv;
+
+       if (link_is_bc_rcvlink(l))
+               return;
+
+       if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
+               tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
+}
+
 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
- * @link: the link that should handle the message
+ * @l: the link that should handle the message
  * @skb: TIPC packet
  * @xmitq: queue to place packets to be sent after this call
  */
 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
                  struct sk_buff_head *xmitq)
 {
-       struct sk_buff_head *arrvq = &l->deferdq;
-       struct sk_buff_head tmpq;
+       struct sk_buff_head *defq = &l->deferdq;
        struct tipc_msg *hdr;
-       u16 seqno, rcv_nxt;
+       u16 seqno, rcv_nxt, win_lim;
        int rc = 0;
 
-       __skb_queue_head_init(&tmpq);
-
-       if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) {
-               if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV))
-                       tipc_link_build_proto_msg(l, STATE_MSG, 0,
-                                                 0, 0, 0, xmitq);
-               return rc;
-       }
-
-       while ((skb = skb_peek(arrvq))) {
+       do {
                hdr = buf_msg(skb);
+               seqno = msg_seqno(hdr);
+               rcv_nxt = l->rcv_nxt;
+               win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
 
                /* Verify and update link state */
-               if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) {
-                       __skb_dequeue(arrvq);
-                       rc = tipc_link_proto_rcv(l, skb, xmitq);
-                       continue;
-               }
+               if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
+                       return tipc_link_proto_rcv(l, skb, xmitq);
 
                if (unlikely(!link_is_up(l))) {
-                       rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
-                       if (!link_is_up(l)) {
-                               kfree_skb(__skb_dequeue(arrvq));
-                               goto exit;
-                       }
+                       if (l->state == LINK_ESTABLISHING)
+                               rc = TIPC_LINK_UP_EVT;
+                       goto drop;
                }
 
+               /* Don't send probe at next timeout expiration */
                l->silent_intv_cnt = 0;
 
+               /* Drop if outside receive window */
+               if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
+                       l->stats.duplicates++;
+                       goto drop;
+               }
+
                /* Forward queues and wake up waiting users */
                if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
                        tipc_link_advance_backlog(l, xmitq);
@@ -1094,79 +1058,28 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
                                link_prepare_wakeup(l);
                }
 
-               /* Defer reception if there is a gap in the sequence */
-               seqno = msg_seqno(hdr);
-               rcv_nxt = l->rcv_nxt;
-               if (unlikely(less(rcv_nxt, seqno))) {
-                       l->stats.deferred_recv++;
-                       goto exit;
-               }
-
-               __skb_dequeue(arrvq);
-
-               /* Drop if packet already received */
-               if (unlikely(more(rcv_nxt, seqno))) {
-                       l->stats.duplicates++;
-                       kfree_skb(skb);
-                       goto exit;
+               /* Defer delivery if sequence gap */
+               if (unlikely(seqno != rcv_nxt)) {
+                       __tipc_skb_queue_sorted(defq, seqno, skb);
+                       tipc_link_build_nack_msg(l, xmitq);
+                       break;
                }
 
-               /* Packet can be delivered */
+               /* Deliver packet */
                l->rcv_nxt++;
                l->stats.recv_info++;
-               if (unlikely(!tipc_data_input(l, skb, &tmpq)))
-                       rc = tipc_link_input(l, skb, &tmpq);
-
-               /* Ack at regular intervals */
-               if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
-                       l->rcv_unacked = 0;
-                       l->stats.sent_acks++;
-                       tipc_link_build_proto_msg(l, STATE_MSG,
-                                                 0, 0, 0, 0, xmitq);
-               }
-       }
-exit:
-       tipc_skb_queue_splice_tail(&tmpq, l->inputq);
-       return rc;
-}
-
-/**
- * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
- *
- * Returns increase in queue length (i.e. 0 or 1)
- */
-u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
-{
-       struct sk_buff *skb1;
-       u16 seq_no = buf_seqno(skb);
-
-       /* Empty queue ? */
-       if (skb_queue_empty(list)) {
-               __skb_queue_tail(list, skb);
-               return 1;
-       }
-
-       /* Last ? */
-       if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
-               __skb_queue_tail(list, skb);
-               return 1;
-       }
-
-       /* Locate insertion point in queue, then insert; discard if duplicate */
-       skb_queue_walk(list, skb1) {
-               u16 curr_seqno = buf_seqno(skb1);
-
-               if (seq_no == curr_seqno) {
-                       kfree_skb(skb);
-                       return 0;
-               }
-
-               if (less(seq_no, curr_seqno))
+               if (!tipc_data_input(l, skb, l->inputq))
+                       rc |= tipc_link_input(l, skb, l->inputq);
+               if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
+                       rc |= tipc_link_build_ack_msg(l, xmitq);
+               if (unlikely(rc & ~TIPC_LINK_SND_BC_ACK))
                        break;
-       }
+       } while ((skb = __skb_dequeue(defq)));
 
-       __skb_queue_before(list, skb1, skb);
-       return 1;
+       return rc;
+drop:
+       kfree_skb(skb);
+       return rc;
 }
 
 /*
@@ -1184,23 +1097,17 @@ void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
        skb = __skb_dequeue(&xmitq);
        if (!skb)
                return;
-       tipc_bearer_send(l->owner->net, l->bearer_id, skb, l->media_addr);
+       tipc_bearer_xmit_skb(l->net, l->bearer_id, skb, l->media_addr);
        l->rcv_unacked = 0;
-       kfree_skb(skb);
 }
 
-/* tipc_link_build_proto_msg: prepare link protocol message for transmission
- */
 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
                                      u16 rcvgap, int tolerance, int priority,
                                      struct sk_buff_head *xmitq)
 {
        struct sk_buff *skb = NULL;
        struct tipc_msg *hdr = l->pmsg;
-       u16 snd_nxt = l->snd_nxt;
-       u16 rcv_nxt = l->rcv_nxt;
-       u16 rcv_last = rcv_nxt - 1;
-       int node_up = l->owner->bclink.recv_permitted;
+       bool node_up = link_is_up(l->bc_rcvlink);
 
        /* Don't send protocol message during reset or link failover */
        if (tipc_link_is_blocked(l))
@@ -1208,33 +1115,34 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
 
        msg_set_type(hdr, mtyp);
        msg_set_net_plane(hdr, l->net_plane);
-       msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
-       msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net));
+       msg_set_next_sent(hdr, l->snd_nxt);
+       msg_set_ack(hdr, l->rcv_nxt - 1);
+       msg_set_bcast_ack(hdr, l->bc_rcvlink->rcv_nxt - 1);
+       msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
        msg_set_link_tolerance(hdr, tolerance);
        msg_set_linkprio(hdr, priority);
        msg_set_redundant_link(hdr, node_up);
        msg_set_seq_gap(hdr, 0);
 
        /* Compatibility: created msg must not be in sequence with pkt flow */
-       msg_set_seqno(hdr, snd_nxt + U16_MAX / 2);
+       msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
 
        if (mtyp == STATE_MSG) {
                if (!tipc_link_is_up(l))
                        return;
-               msg_set_next_sent(hdr, snd_nxt);
 
                /* Override rcvgap if there are packets in deferred queue */
                if (!skb_queue_empty(&l->deferdq))
-                       rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt;
+                       rcvgap = buf_seqno(skb_peek(&l->deferdq)) - l->rcv_nxt;
                if (rcvgap) {
                        msg_set_seq_gap(hdr, rcvgap);
                        l->stats.sent_nacks++;
                }
-               msg_set_ack(hdr, rcv_last);
                msg_set_probe(hdr, probe);
                if (probe)
                        l->stats.sent_probes++;
                l->stats.sent_states++;
+               l->rcv_unacked = 0;
        } else {
                /* RESET_MSG or ACTIVATE_MSG */
                msg_set_max_pkt(hdr, l->advertised_mtu);
@@ -1250,7 +1158,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
 }
 
 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
- * with contents of the link's tranmsit and backlog queues.
+ * with contents of the link's transmit and backlog queues.
  */
 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
                           int mtyp, struct sk_buff_head *xmitq)
@@ -1326,21 +1234,23 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
 {
        struct tipc_msg *hdr = buf_msg(skb);
        u16 rcvgap = 0;
-       u16 nacked_gap = msg_seq_gap(hdr);
+       u16 ack = msg_ack(hdr);
+       u16 gap = msg_seq_gap(hdr);
        u16 peers_snd_nxt =  msg_next_sent(hdr);
        u16 peers_tol = msg_link_tolerance(hdr);
        u16 peers_prio = msg_linkprio(hdr);
        u16 rcv_nxt = l->rcv_nxt;
+       int mtyp = msg_type(hdr);
        char *if_name;
        int rc = 0;
 
-       if (tipc_link_is_blocked(l))
+       if (tipc_link_is_blocked(l) || !xmitq)
                goto exit;
 
        if (link_own_addr(l) > msg_prevnode(hdr))
                l->net_plane = msg_net_plane(hdr);
 
-       switch (msg_type(hdr)) {
+       switch (mtyp) {
        case RESET_MSG:
 
                /* Ignore duplicate RESET with old session number */
@@ -1367,12 +1277,14 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
                        l->priority = peers_prio;
 
-               if (msg_type(hdr) == RESET_MSG) {
-                       rc |= tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
-               } else if (!link_is_up(l)) {
-                       tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
-                       rc |= tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
-               }
+               /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
+               if ((mtyp == RESET_MSG) || !link_is_up(l))
+                       rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
+
+               /* ACTIVATE_MSG takes up link if it was already locally reset */
+               if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
+                       rc = TIPC_LINK_UP_EVT;
+
                l->peer_session = msg_session(hdr);
                l->peer_bearer_id = msg_bearer_id(hdr);
                if (l->mtu > msg_max_pkt(hdr))
@@ -1389,9 +1301,12 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                l->stats.recv_states++;
                if (msg_probe(hdr))
                        l->stats.recv_probes++;
-               rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
-               if (!link_is_up(l))
+
+               if (!link_is_up(l)) {
+                       if (l->state == LINK_ESTABLISHING)
+                               rc = TIPC_LINK_UP_EVT;
                        break;
+               }
 
                /* Send NACK if peer has sent pkts we haven't received yet */
                if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
@@ -1399,11 +1314,11 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                if (rcvgap || (msg_probe(hdr)))
                        tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
                                                  0, 0, xmitq);
-               tipc_link_release_pkts(l, msg_ack(hdr));
+               tipc_link_release_pkts(l, ack);
 
                /* If NACK, retransmit will now start at right position */
-               if (nacked_gap) {
-                       rc = tipc_link_retransm(l, nacked_gap, xmitq);
+               if (gap) {
+                       rc = tipc_link_retrans(l, ack + 1, ack + gap, xmitq);
                        l->stats.recv_nacks++;
                }
 
@@ -1416,6 +1331,188 @@ exit:
        return rc;
 }
 
+/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
+ */
+static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
+                                        u16 peers_snd_nxt,
+                                        struct sk_buff_head *xmitq)
+{
+       struct sk_buff *skb;
+       struct tipc_msg *hdr;
+       struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
+       u16 ack = l->rcv_nxt - 1;
+       u16 gap_to = peers_snd_nxt - 1;
+
+       skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
+                             0, l->addr, link_own_addr(l), 0, 0, 0);
+       if (!skb)
+               return false;
+       hdr = buf_msg(skb);
+       msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
+       msg_set_bcast_ack(hdr, ack);
+       msg_set_bcgap_after(hdr, ack);
+       if (dfrd_skb)
+               gap_to = buf_seqno(dfrd_skb) - 1;
+       msg_set_bcgap_to(hdr, gap_to);
+       msg_set_non_seq(hdr, bcast);
+       __skb_queue_tail(xmitq, skb);
+       return true;
+}
+
+/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
+ *
+ * Give a newly added peer node the sequence number where it should
+ * start receiving and acking broadcast packets.
+ */
+static void tipc_link_build_bc_init_msg(struct tipc_link *l,
+                                       struct sk_buff_head *xmitq)
+{
+       struct sk_buff_head list;
+
+       __skb_queue_head_init(&list);
+       if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
+               return;
+       tipc_link_xmit(l, &list, xmitq);
+}
+
+/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
+ */
+void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
+{
+       int mtyp = msg_type(hdr);
+       u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
+
+       if (link_is_up(l))
+               return;
+
+       if (msg_user(hdr) == BCAST_PROTOCOL) {
+               l->rcv_nxt = peers_snd_nxt;
+               l->state = LINK_ESTABLISHED;
+               return;
+       }
+
+       if (l->peer_caps & TIPC_BCAST_SYNCH)
+               return;
+
+       if (msg_peer_node_is_up(hdr))
+               return;
+
+       /* Compatibility: accept older, less safe initial synch data */
+       if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
+               l->rcv_nxt = peers_snd_nxt;
+}
+
+/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
+ */
+void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
+                          struct sk_buff_head *xmitq)
+{
+       u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
+
+       if (!link_is_up(l))
+               return;
+
+       if (!msg_peer_node_is_up(hdr))
+               return;
+
+       l->bc_peer_is_up = true;
+
+       /* Ignore if peers_snd_nxt goes beyond receive window */
+       if (more(peers_snd_nxt, l->rcv_nxt + l->window))
+               return;
+
+       if (!more(peers_snd_nxt, l->rcv_nxt)) {
+               l->nack_state = BC_NACK_SND_CONDITIONAL;
+               return;
+       }
+
+       /* Don't NACK if one was recently sent or peeked */
+       if (l->nack_state == BC_NACK_SND_SUPPRESS) {
+               l->nack_state = BC_NACK_SND_UNCONDITIONAL;
+               return;
+       }
+
+       /* Conditionally delay NACK sending until next synch rcv */
+       if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
+               l->nack_state = BC_NACK_SND_UNCONDITIONAL;
+               if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
+                       return;
+       }
+
+       /* Send NACK now but suppress next one */
+       tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
+       l->nack_state = BC_NACK_SND_SUPPRESS;
+}
+
+void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
+                         struct sk_buff_head *xmitq)
+{
+       struct sk_buff *skb, *tmp;
+       struct tipc_link *snd_l = l->bc_sndlink;
+
+       if (!link_is_up(l) || !l->bc_peer_is_up)
+               return;
+
+       if (!more(acked, l->acked))
+               return;
+
+       /* Skip over packets peer has already acked */
+       skb_queue_walk(&snd_l->transmq, skb) {
+               if (more(buf_seqno(skb), l->acked))
+                       break;
+       }
+
+       /* Update/release the packets peer is acking now */
+       skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
+               if (more(buf_seqno(skb), acked))
+                       break;
+               if (!--TIPC_SKB_CB(skb)->ackers) {
+                       __skb_unlink(skb, &snd_l->transmq);
+                       kfree_skb(skb);
+               }
+       }
+       l->acked = acked;
+       tipc_link_advance_backlog(snd_l, xmitq);
+       if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
+               link_prepare_wakeup(snd_l);
+}
+
+/* tipc_link_bc_nack_rcv(): receive broadcast nack message
+ */
+int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
+                         struct sk_buff_head *xmitq)
+{
+       struct tipc_msg *hdr = buf_msg(skb);
+       u32 dnode = msg_destnode(hdr);
+       int mtyp = msg_type(hdr);
+       u16 acked = msg_bcast_ack(hdr);
+       u16 from = acked + 1;
+       u16 to = msg_bcgap_to(hdr);
+       u16 peers_snd_nxt = to + 1;
+       int rc = 0;
+
+       kfree_skb(skb);
+
+       if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
+               return 0;
+
+       if (mtyp != STATE_MSG)
+               return 0;
+
+       if (dnode == link_own_addr(l)) {
+               tipc_link_bc_ack_rcv(l, acked, xmitq);
+               rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq);
+               l->stats.recv_nacks++;
+               return rc;
+       }
+
+       /* Msg for other node => suppress own NACK at next sync if applicable */
+       if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
+               l->nack_state = BC_NACK_SND_SUPPRESS;
+
+       return 0;
+}
+
 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
 {
        int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
@@ -1480,7 +1577,7 @@ static void link_reset_statistics(struct tipc_link *l_ptr)
 static void link_print(struct tipc_link *l, const char *str)
 {
        struct sk_buff *hskb = skb_peek(&l->transmq);
-       u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt;
+       u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
        u16 tail = l->snd_nxt - 1;
 
        pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
@@ -1704,7 +1801,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
        if (tipc_link_is_up(link))
                if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
                        goto attr_msg_full;
-       if (tipc_link_is_active(link))
+       if (link->active)
                if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
                        goto attr_msg_full;
 
index 39ff8b6919a4271d31892992098be0fcf98acd0f..66d859b66c84f3a21f7b9115f75a9114fd198744 100644 (file)
@@ -66,7 +66,8 @@ enum {
  */
 enum {
        TIPC_LINK_UP_EVT       = 1,
-       TIPC_LINK_DOWN_EVT     = (1 << 1)
+       TIPC_LINK_DOWN_EVT     = (1 << 1),
+       TIPC_LINK_SND_BC_ACK   = (1 << 2)
 };
 
 /* Starting value for maximum packet size negotiation on unicast links
@@ -110,7 +111,7 @@ struct tipc_stats {
  * @name: link name character string
  * @media_addr: media address to use when sending messages over link
  * @timer: link timer
- * @owner: pointer to peer node
+ * @net: pointer to namespace struct
  * @refcnt: reference counter for permanent references (owner node & timer)
  * @peer_session: link session # being used by peer end of link
  * @peer_bearer_id: bearer id used by link's peer endpoint
@@ -119,6 +120,7 @@ struct tipc_stats {
  * @keepalive_intv: link keepalive timer interval
  * @abort_limit: # of unacknowledged continuity probes needed to reset link
  * @state: current state of link FSM
+ * @peer_caps: bitmap describing capabilities of peer node
  * @silent_intv_cnt: # of timer intervals without any reception from peer
  * @proto_msg: template for control messages generated by link
  * @pmsg: convenience pointer to "proto_msg" field
@@ -134,6 +136,8 @@ struct tipc_stats {
  * @snt_nxt: next sequence number to use for outbound messages
  * @last_retransmitted: sequence number of most recently retransmitted message
  * @stale_count: # of identical retransmit requests made by peer
+ * @ackers: # of peers that needs to ack each packet before it can be released
+ * @acked: # last packet acked by a certain peer. Used for broadcast.
  * @rcv_nxt: next sequence number to expect for inbound messages
  * @deferred_queue: deferred queue saved OOS b'cast message received from node
  * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
@@ -143,13 +147,14 @@ struct tipc_stats {
  * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
  * @long_msg_seq_no: next identifier to use for outbound fragmented messages
  * @reasm_buf: head of partially reassembled inbound message fragments
+ * @bc_rcvr: marks that this is a broadcast receiver link
  * @stats: collects statistics regarding link activity
  */
 struct tipc_link {
        u32 addr;
        char name[TIPC_MAX_LINK_NAME];
        struct tipc_media_addr *media_addr;
-       struct tipc_node *owner;
+       struct net *net;
 
        /* Management and link supervision data */
        u32 peer_session;
@@ -159,6 +164,8 @@ struct tipc_link {
        unsigned long keepalive_intv;
        u32 abort_limit;
        u32 state;
+       u16 peer_caps;
+       bool active;
        u32 silent_intv_cnt;
        struct {
                unchar hdr[INT_H_SIZE];
@@ -185,7 +192,7 @@ struct tipc_link {
        } backlog[5];
        u16 snd_nxt;
        u16 last_retransm;
-       u32 window;
+       u16 window;
        u32 stale_count;
 
        /* Reception */
@@ -201,42 +208,50 @@ struct tipc_link {
        /* Fragmentation/reassembly */
        struct sk_buff *reasm_buf;
 
+       /* Broadcast */
+       u16 ackers;
+       u16 acked;
+       struct tipc_link *bc_rcvlink;
+       struct tipc_link *bc_sndlink;
+       int nack_state;
+       bool bc_peer_is_up;
+
        /* Statistics */
        struct tipc_stats stats;
 };
 
-bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
-                     u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
-                     struct sk_buff_head *inputq, struct sk_buff_head *namedq,
+bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
+                     int tolerance, char net_plane, u32 mtu, int priority,
+                     int window, u32 session, u32 ownnode, u32 peer,
+                     u16 peer_caps,
+                     struct tipc_link *bc_sndlink,
+                     struct tipc_link *bc_rcvlink,
+                     struct sk_buff_head *inputq,
+                     struct sk_buff_head *namedq,
                      struct tipc_link **link);
+bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
+                        int mtu, int window, u16 peer_caps,
+                        struct sk_buff_head *inputq,
+                        struct sk_buff_head *namedq,
+                        struct tipc_link *bc_sndlink,
+                        struct tipc_link **link);
 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
                           int mtyp, struct sk_buff_head *xmitq);
-void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
-                                   struct sk_buff_head *xmitq);
+void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
 int tipc_link_fsm_evt(struct tipc_link *l, int evt);
 void tipc_link_reset_fragments(struct tipc_link *l_ptr);
 bool tipc_link_is_up(struct tipc_link *l);
+bool tipc_link_peer_is_down(struct tipc_link *l);
 bool tipc_link_is_reset(struct tipc_link *l);
+bool tipc_link_is_establishing(struct tipc_link *l);
 bool tipc_link_is_synching(struct tipc_link *l);
 bool tipc_link_is_failingover(struct tipc_link *l);
 bool tipc_link_is_blocked(struct tipc_link *l);
-int tipc_link_is_active(struct tipc_link *l_ptr);
-void tipc_link_purge_queues(struct tipc_link *l_ptr);
-void tipc_link_purge_backlog(struct tipc_link *l);
+void tipc_link_set_active(struct tipc_link *l, bool active);
 void tipc_link_reset(struct tipc_link *l_ptr);
-int __tipc_link_xmit(struct net *net, struct tipc_link *link,
-                    struct sk_buff_head *list);
 int tipc_link_xmit(struct tipc_link *link,     struct sk_buff_head *list,
                   struct sk_buff_head *xmitq);
-void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
-                         u32 gap, u32 tolerance, u32 priority);
-void tipc_link_push_packets(struct tipc_link *l_ptr);
-u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf);
-void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
-void tipc_link_retransmit(struct tipc_link *l_ptr,
-                         struct sk_buff *start, u32 retransmits);
-struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
-                                   const struct sk_buff *skb);
+void tipc_link_set_queue_limits(struct tipc_link *l, u32 window);
 
 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info);
@@ -246,5 +261,23 @@ int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq);
 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
                  struct sk_buff_head *xmitq);
-
+int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
+void tipc_link_add_bc_peer(struct tipc_link *snd_l,
+                          struct tipc_link *uc_l,
+                          struct sk_buff_head *xmitq);
+void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
+                             struct tipc_link *rcv_l,
+                             struct sk_buff_head *xmitq);
+int tipc_link_bc_peers(struct tipc_link *l);
+void tipc_link_set_mtu(struct tipc_link *l, int mtu);
+int tipc_link_mtu(struct tipc_link *l);
+void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
+                         struct sk_buff_head *xmitq);
+void tipc_link_build_bc_sync_msg(struct tipc_link *l,
+                                struct sk_buff_head *xmitq);
+void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr);
+void tipc_link_bc_sync_rcv(struct tipc_link *l,   struct tipc_msg *hdr,
+                          struct sk_buff_head *xmitq);
+int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
+                         struct sk_buff_head *xmitq);
 #endif
index c5ac436235e0823c016123394fef6a0cf321092c..8740930f07872ff1ec8ff9d6b712b2772ba48336 100644 (file)
@@ -121,7 +121,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
 {
        struct sk_buff *head = *headbuf;
        struct sk_buff *frag = *buf;
-       struct sk_buff *tail;
+       struct sk_buff *tail = NULL;
        struct tipc_msg *msg;
        u32 fragid;
        int delta;
@@ -141,9 +141,15 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
                if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
                        goto err;
                head = *headbuf = frag;
-               skb_frag_list_init(head);
-               TIPC_SKB_CB(head)->tail = NULL;
                *buf = NULL;
+               TIPC_SKB_CB(head)->tail = NULL;
+               if (skb_is_nonlinear(head)) {
+                       skb_walk_frags(head, tail) {
+                               TIPC_SKB_CB(head)->tail = tail;
+                       }
+               } else {
+                       skb_frag_list_init(head);
+               }
                return 0;
        }
 
@@ -176,7 +182,6 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
        *buf = NULL;
        return 0;
 err:
-       pr_warn_ratelimited("Unable to build fragment list\n");
        kfree_skb(*buf);
        kfree_skb(*headbuf);
        *buf = *headbuf = NULL;
@@ -559,18 +564,22 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
 /* tipc_msg_reassemble() - clone a buffer chain of fragments and
  *                         reassemble the clones into one message
  */
-struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list)
+bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
 {
-       struct sk_buff *skb;
+       struct sk_buff *skb, *_skb;
        struct sk_buff *frag = NULL;
        struct sk_buff *head = NULL;
-       int hdr_sz;
+       int hdr_len;
 
        /* Copy header if single buffer */
        if (skb_queue_len(list) == 1) {
                skb = skb_peek(list);
-               hdr_sz = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
-               return __pskb_copy(skb, hdr_sz, GFP_ATOMIC);
+               hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
+               _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
+               if (!_skb)
+                       return false;
+               __skb_queue_tail(rcvq, _skb);
+               return true;
        }
 
        /* Clone all fragments and reassemble */
@@ -584,9 +593,41 @@ struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list)
                if (!head)
                        goto error;
        }
-       return frag;
+       __skb_queue_tail(rcvq, frag);
+       return true;
 error:
        pr_warn("Failed do clone local mcast rcv buffer\n");
        kfree_skb(head);
-       return NULL;
+       return false;
+}
+
+/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
+ * @list: list to be appended to
+ * @seqno: sequence number of buffer to add
+ * @skb: buffer to add
+ */
+void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
+                            struct sk_buff *skb)
+{
+       struct sk_buff *_skb, *tmp;
+
+       if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
+               __skb_queue_head(list, skb);
+               return;
+       }
+
+       if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
+               __skb_queue_tail(list, skb);
+               return;
+       }
+
+       skb_queue_walk_safe(list, _skb, tmp) {
+               if (more(seqno, buf_seqno(_skb)))
+                       continue;
+               if (seqno == buf_seqno(_skb))
+                       break;
+               __skb_queue_before(list, _skb, skb);
+               return;
+       }
+       kfree_skb(skb);
 }
index a82c5848d4bc22129bd1e6ba7f677795febdc9e9..55778a0aebf3706f2c559528f6a482c4077197bb 100644 (file)
@@ -112,6 +112,7 @@ struct tipc_skb_cb {
        bool wakeup_pending;
        u16 chain_sz;
        u16 chain_imp;
+       u16 ackers;
 };
 
 #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
@@ -357,7 +358,7 @@ static inline u32 msg_importance(struct tipc_msg *m)
        if (likely((usr <= TIPC_CRITICAL_IMPORTANCE) && !msg_errcode(m)))
                return usr;
        if ((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER))
-               return msg_bits(m, 5, 13, 0x7);
+               return msg_bits(m, 9, 0, 0x7);
        return TIPC_SYSTEM_IMPORTANCE;
 }
 
@@ -366,7 +367,7 @@ static inline void msg_set_importance(struct tipc_msg *m, u32 i)
        int usr = msg_user(m);
 
        if (likely((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER)))
-               msg_set_bits(m, 5, 13, 0x7, i);
+               msg_set_bits(m, 9, 0, 0x7, i);
        else if (i < TIPC_SYSTEM_IMPORTANCE)
                msg_set_user(m, i);
        else
@@ -600,6 +601,11 @@ static inline u32 msg_last_bcast(struct tipc_msg *m)
        return msg_bits(m, 4, 16, 0xffff);
 }
 
+static inline u32 msg_bc_snd_nxt(struct tipc_msg *m)
+{
+       return msg_last_bcast(m) + 1;
+}
+
 static inline void msg_set_last_bcast(struct tipc_msg *m, u32 n)
 {
        msg_set_bits(m, 4, 16, 0xffff, n);
@@ -789,7 +795,9 @@ bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
                   int offset, int dsz, int mtu, struct sk_buff_head *list);
 bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
-struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
+bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq);
+void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
+                            struct sk_buff *skb);
 
 static inline u16 buf_seqno(struct sk_buff *skb)
 {
@@ -862,38 +870,6 @@ static inline struct sk_buff *tipc_skb_dequeue(struct sk_buff_head *list,
        return skb;
 }
 
-/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
- * @list: list to be appended to
- * @skb: buffer to add
- * Returns true if queue should treated further, otherwise false
- */
-static inline bool __tipc_skb_queue_sorted(struct sk_buff_head *list,
-                                          struct sk_buff *skb)
-{
-       struct sk_buff *_skb, *tmp;
-       struct tipc_msg *hdr = buf_msg(skb);
-       u16 seqno = msg_seqno(hdr);
-
-       if (skb_queue_empty(list) || (msg_user(hdr) == LINK_PROTOCOL)) {
-               __skb_queue_head(list, skb);
-               return true;
-       }
-       if (likely(less(seqno, buf_seqno(skb_peek(list))))) {
-               __skb_queue_head(list, skb);
-               return true;
-       }
-       if (!more(seqno, buf_seqno(skb_peek_tail(list)))) {
-               skb_queue_walk_safe(list, _skb, tmp) {
-                       if (likely(less(seqno, buf_seqno(_skb)))) {
-                               __skb_queue_before(list, _skb, skb);
-                               return true;
-                       }
-               }
-       }
-       __skb_queue_tail(list, skb);
-       return false;
-}
-
 /* tipc_skb_queue_splice_tail - append an skb list to lock protected list
  * @list: the new list to append. Not lock protected
  * @head: target list. Lock protected.
index e6018b7eb1970dfc85bc7e0dc8945ccf45a72180..c07612bab95c0957b2e34434aa73b6ef02708fff 100644 (file)
@@ -102,7 +102,7 @@ void named_cluster_distribute(struct net *net, struct sk_buff *skb)
                if (!oskb)
                        break;
                msg_set_destnode(buf_msg(oskb), dnode);
-               tipc_node_xmit_skb(net, oskb, dnode, dnode);
+               tipc_node_xmit_skb(net, oskb, dnode, 0);
        }
        rcu_read_unlock();
 
@@ -223,7 +223,7 @@ void tipc_named_node_up(struct net *net, u32 dnode)
                         &tn->nametbl->publ_list[TIPC_ZONE_SCOPE]);
        rcu_read_unlock();
 
-       tipc_node_xmit(net, &head, dnode, dnode);
+       tipc_node_xmit(net, &head, dnode, 0);
 }
 
 static void tipc_publ_subscribe(struct net *net, struct publication *publ,
index d6d1399ae22922754ba24364e21e89a6a3497d22..77bf9113c7a76be7cfc5173bc41b84664eb658e6 100644 (file)
@@ -112,14 +112,11 @@ int tipc_net_start(struct net *net, u32 addr)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        char addr_string[16];
-       int res;
 
        tn->own_addr = addr;
        tipc_named_reinit(net);
        tipc_sk_reinit(net);
-       res = tipc_bclink_init(net);
-       if (res)
-               return res;
+       tipc_bcast_reinit(net);
 
        tipc_nametbl_publish(net, TIPC_CFG_SRV, tn->own_addr, tn->own_addr,
                             TIPC_ZONE_SCOPE, 0, tn->own_addr);
@@ -142,7 +139,6 @@ void tipc_net_stop(struct net *net)
                              tn->own_addr);
        rtnl_lock();
        tipc_bearer_stop(net);
-       tipc_bclink_stop(net);
        tipc_node_stop(net);
        rtnl_unlock();
 
index 703875fd6cde204ddeaf630b9a6bd11daec6dbfa..20cddec0a43c7227a6e3aff860286ec04f9cd423 100644 (file)
@@ -72,7 +72,6 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
 static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
                                bool delete);
 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
-static void node_established_contact(struct tipc_node *n_ptr);
 static void tipc_node_delete(struct tipc_node *node);
 static void tipc_node_timeout(unsigned long data);
 static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
@@ -165,8 +164,10 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
        INIT_LIST_HEAD(&n_ptr->list);
        INIT_LIST_HEAD(&n_ptr->publ_list);
        INIT_LIST_HEAD(&n_ptr->conn_sks);
-       skb_queue_head_init(&n_ptr->bclink.namedq);
-       __skb_queue_head_init(&n_ptr->bclink.deferdq);
+       skb_queue_head_init(&n_ptr->bc_entry.namedq);
+       skb_queue_head_init(&n_ptr->bc_entry.inputq1);
+       __skb_queue_head_init(&n_ptr->bc_entry.arrvq);
+       skb_queue_head_init(&n_ptr->bc_entry.inputq2);
        hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
        list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
                if (n_ptr->addr < temp_node->addr)
@@ -177,6 +178,18 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
        n_ptr->signature = INVALID_NODE_SIG;
        n_ptr->active_links[0] = INVALID_BEARER_ID;
        n_ptr->active_links[1] = INVALID_BEARER_ID;
+       if (!tipc_link_bc_create(net, tipc_own_addr(net), n_ptr->addr,
+                                U16_MAX, tipc_bc_sndlink(net)->window,
+                                n_ptr->capabilities,
+                                &n_ptr->bc_entry.inputq1,
+                                &n_ptr->bc_entry.namedq,
+                                tipc_bc_sndlink(net),
+                                &n_ptr->bc_entry.link)) {
+               pr_warn("Broadcast rcv link creation failed, no memory\n");
+               kfree(n_ptr);
+               n_ptr = NULL;
+               goto exit;
+       }
        tipc_node_get(n_ptr);
        setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
        n_ptr->keepalive_intv = U32_MAX;
@@ -203,6 +216,7 @@ static void tipc_node_delete(struct tipc_node *node)
 {
        list_del_rcu(&node->list);
        hlist_del_rcu(&node->hash);
+       kfree(node->bc_entry.link);
        kfree_rcu(node, rcu);
 }
 
@@ -317,7 +331,11 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
        struct tipc_link *ol = node_active_link(n, 0);
        struct tipc_link *nl = n->links[bearer_id].link;
 
-       if (!nl || !tipc_link_is_up(nl))
+       if (!nl)
+               return;
+
+       tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
+       if (!tipc_link_is_up(nl))
                return;
 
        n->working_links++;
@@ -328,6 +346,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
        n->links[bearer_id].mtu = nl->mtu - INT_H_SIZE;
 
        tipc_bearer_add_dest(n->net, bearer_id, n->addr);
+       tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
 
        pr_debug("Established link <%s> on network plane %c\n",
                 nl->name, nl->net_plane);
@@ -336,8 +355,9 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
        if (!ol) {
                *slot0 = bearer_id;
                *slot1 = bearer_id;
-               tipc_link_build_bcast_sync_msg(nl, xmitq);
-               node_established_contact(n);
+               tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
+               n->action_flags |= TIPC_NOTIFY_NODE_UP;
+               tipc_bcast_add_peer(n->net, nl, xmitq);
                return;
        }
 
@@ -346,8 +366,11 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
                pr_debug("Old link <%s> becomes standby\n", ol->name);
                *slot0 = bearer_id;
                *slot1 = bearer_id;
+               tipc_link_set_active(nl, true);
+               tipc_link_set_active(ol, false);
        } else if (nl->priority == ol->priority) {
-               *slot0 = bearer_id;
+               tipc_link_set_active(nl, true);
+               *slot1 = bearer_id;
        } else {
                pr_debug("New link <%s> is standby\n", nl->name);
        }
@@ -416,10 +439,18 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
        }
 
        if (!tipc_node_is_up(n)) {
+               if (tipc_link_peer_is_down(l))
+                       tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
+               tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
+               tipc_link_fsm_evt(l, LINK_RESET_EVT);
                tipc_link_reset(l);
+               tipc_link_build_reset_msg(l, xmitq);
+               *maddr = &n->links[*bearer_id].maddr;
                node_lost_contact(n, &le->inputq);
+               tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
                return;
        }
+       tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
 
        /* There is still a working link => initiate failover */
        tnl = node_active_link(n, 0);
@@ -428,6 +459,7 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
        n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1);
        tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
        tipc_link_reset(l);
+       tipc_link_fsm_evt(l, LINK_RESET_EVT);
        tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
        tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
        *maddr = &n->links[tnl->bearer_id].maddr;
@@ -437,20 +469,28 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
 {
        struct tipc_link_entry *le = &n->links[bearer_id];
+       struct tipc_link *l = le->link;
        struct tipc_media_addr *maddr;
        struct sk_buff_head xmitq;
 
+       if (!l)
+               return;
+
        __skb_queue_head_init(&xmitq);
 
        tipc_node_lock(n);
-       __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
-       if (delete && le->link) {
-               kfree(le->link);
-               le->link = NULL;
-               n->link_cnt--;
+       if (!tipc_link_is_establishing(l)) {
+               __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
+               if (delete) {
+                       kfree(l);
+                       le->link = NULL;
+                       n->link_cnt--;
+               }
+       } else {
+               /* Defuse pending tipc_node_link_up() */
+               tipc_link_fsm_evt(l, LINK_RESET_EVT);
        }
        tipc_node_unlock(n);
-
        tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
        tipc_sk_rcv(n->net, &le->inputq);
 }
@@ -474,6 +514,7 @@ void tipc_node_check_dest(struct net *net, u32 onode,
        bool link_up = false;
        bool accept_addr = false;
        bool reset = true;
+       char *if_name;
 
        *dupl_addr = false;
        *respond = false;
@@ -560,13 +601,20 @@ void tipc_node_check_dest(struct net *net, u32 onode,
                        pr_warn("Cannot establish 3rd link to %x\n", n->addr);
                        goto exit;
                }
-               if (!tipc_link_create(n, b, mod(tipc_net(net)->random),
-                                     tipc_own_addr(net), onode, &le->maddr,
-                                     &le->inputq, &n->bclink.namedq, &l)) {
+               if_name = strchr(b->name, ':') + 1;
+               if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
+                                     b->net_plane, b->mtu, b->priority,
+                                     b->window, mod(tipc_net(net)->random),
+                                     tipc_own_addr(net), onode,
+                                     n->capabilities,
+                                     tipc_bc_sndlink(n->net), n->bc_entry.link,
+                                     &le->inputq,
+                                     &n->bc_entry.namedq, &l)) {
                        *respond = false;
                        goto exit;
                }
                tipc_link_reset(l);
+               tipc_link_fsm_evt(l, LINK_RESET_EVT);
                if (n->state == NODE_FAILINGOVER)
                        tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
                le->link = l;
@@ -579,7 +627,7 @@ void tipc_node_check_dest(struct net *net, u32 onode,
        memcpy(&le->maddr, maddr, sizeof(*maddr));
 exit:
        tipc_node_unlock(n);
-       if (reset)
+       if (reset && !tipc_link_is_reset(l))
                tipc_node_link_down(n, b->identity, false);
        tipc_node_put(n);
 }
@@ -686,10 +734,10 @@ static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
                        break;
                case SELF_ESTABL_CONTACT_EVT:
                case PEER_LOST_CONTACT_EVT:
-                       break;
                case NODE_SYNCH_END_EVT:
-               case NODE_SYNCH_BEGIN_EVT:
                case NODE_FAILOVER_BEGIN_EVT:
+                       break;
+               case NODE_SYNCH_BEGIN_EVT:
                case NODE_FAILOVER_END_EVT:
                default:
                        goto illegal_evt;
@@ -804,61 +852,36 @@ bool tipc_node_filter_pkt(struct tipc_node *n, struct tipc_msg *hdr)
        return true;
 }
 
-static void node_established_contact(struct tipc_node *n_ptr)
-{
-       tipc_node_fsm_evt(n_ptr, SELF_ESTABL_CONTACT_EVT);
-       n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
-       n_ptr->bclink.oos_state = 0;
-       n_ptr->bclink.acked = tipc_bclink_get_last_sent(n_ptr->net);
-       tipc_bclink_add_node(n_ptr->net, n_ptr->addr);
-}
-
-static void node_lost_contact(struct tipc_node *n_ptr,
+static void node_lost_contact(struct tipc_node *n,
                              struct sk_buff_head *inputq)
 {
        char addr_string[16];
        struct tipc_sock_conn *conn, *safe;
        struct tipc_link *l;
-       struct list_head *conns = &n_ptr->conn_sks;
+       struct list_head *conns = &n->conn_sks;
        struct sk_buff *skb;
-       struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
        uint i;
 
        pr_debug("Lost contact with %s\n",
-                tipc_addr_string_fill(addr_string, n_ptr->addr));
-
-       /* Flush broadcast link info associated with lost node */
-       if (n_ptr->bclink.recv_permitted) {
-               __skb_queue_purge(&n_ptr->bclink.deferdq);
+                tipc_addr_string_fill(addr_string, n->addr));
 
-               if (n_ptr->bclink.reasm_buf) {
-                       kfree_skb(n_ptr->bclink.reasm_buf);
-                       n_ptr->bclink.reasm_buf = NULL;
-               }
-
-               tipc_bclink_remove_node(n_ptr->net, n_ptr->addr);
-               tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
-
-               n_ptr->bclink.recv_permitted = false;
-       }
+       /* Clean up broadcast state */
+       tipc_bcast_remove_peer(n->net, n->bc_entry.link);
 
        /* Abort any ongoing link failover */
        for (i = 0; i < MAX_BEARERS; i++) {
-               l = n_ptr->links[i].link;
+               l = n->links[i].link;
                if (l)
                        tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
        }
 
-       /* Prevent re-contact with node until cleanup is done */
-       tipc_node_fsm_evt(n_ptr, SELF_LOST_CONTACT_EVT);
-
        /* Notify publications from this node */
-       n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN;
+       n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
 
        /* Notify sockets connected to node */
        list_for_each_entry_safe(conn, safe, conns, list) {
                skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
-                                     SHORT_H_SIZE, 0, tn->own_addr,
+                                     SHORT_H_SIZE, 0, tipc_own_addr(n->net),
                                      conn->peer_node, conn->port,
                                      conn->peer_port, TIPC_ERR_NO_NODE);
                if (likely(skb))
@@ -920,18 +943,13 @@ void tipc_node_unlock(struct tipc_node *node)
        publ_list = &node->publ_list;
 
        node->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
-                               TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
-                               TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
-                               TIPC_BCAST_RESET);
+                               TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
 
        spin_unlock_bh(&node->lock);
 
        if (flags & TIPC_NOTIFY_NODE_DOWN)
                tipc_publ_notify(net, publ_list, addr);
 
-       if (flags & TIPC_WAKEUP_BCAST_USERS)
-               tipc_bclink_wakeup_users(net);
-
        if (flags & TIPC_NOTIFY_NODE_UP)
                tipc_named_node_up(net, addr);
 
@@ -943,11 +961,6 @@ void tipc_node_unlock(struct tipc_node *node)
                tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
                                      link_id, addr);
 
-       if (flags & TIPC_BCAST_MSG_EVT)
-               tipc_bclink_input(net);
-
-       if (flags & TIPC_BCAST_RESET)
-               tipc_node_reset_links(node);
 }
 
 /* Caller should hold node lock for the passed node */
@@ -1062,6 +1075,67 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
        return 0;
 }
 
+/**
+ * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
+ * @net: the applicable net namespace
+ * @skb: TIPC packet
+ * @bearer_id: id of bearer message arrived on
+ *
+ * Invoked with no locks held.
+ */
+static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
+{
+       int rc;
+       struct sk_buff_head xmitq;
+       struct tipc_bclink_entry *be;
+       struct tipc_link_entry *le;
+       struct tipc_msg *hdr = buf_msg(skb);
+       int usr = msg_user(hdr);
+       u32 dnode = msg_destnode(hdr);
+       struct tipc_node *n;
+
+       __skb_queue_head_init(&xmitq);
+
+       /* If NACK for other node, let rcv link for that node peek into it */
+       if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
+               n = tipc_node_find(net, dnode);
+       else
+               n = tipc_node_find(net, msg_prevnode(hdr));
+       if (!n) {
+               kfree_skb(skb);
+               return;
+       }
+       be = &n->bc_entry;
+       le = &n->links[bearer_id];
+
+       rc = tipc_bcast_rcv(net, be->link, skb);
+
+       /* Broadcast link reset may happen at reassembly failure */
+       if (rc & TIPC_LINK_DOWN_EVT)
+               tipc_node_reset_links(n);
+
+       /* Broadcast ACKs are sent on a unicast link */
+       if (rc & TIPC_LINK_SND_BC_ACK) {
+               tipc_node_lock(n);
+               tipc_link_build_ack_msg(le->link, &xmitq);
+               tipc_node_unlock(n);
+       }
+
+       if (!skb_queue_empty(&xmitq))
+               tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+
+       /* Deliver. 'arrvq' is under inputq2's lock protection */
+       if (!skb_queue_empty(&be->inputq1)) {
+               spin_lock_bh(&be->inputq2.lock);
+               spin_lock_bh(&be->inputq1.lock);
+               skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
+               spin_unlock_bh(&be->inputq1.lock);
+               spin_unlock_bh(&be->inputq2.lock);
+               tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2);
+       }
+       tipc_node_put(n);
+}
+
 /**
  * tipc_node_check_state - check and if necessary update node state
  * @skb: TIPC packet
@@ -1116,7 +1190,7 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
        }
 
        /* Ignore duplicate packets */
-       if (less(oseqno, rcv_nxt))
+       if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
                return true;
 
        /* Initiate or update failover mode if applicable */
@@ -1146,8 +1220,8 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
        if (!pl || !tipc_link_is_up(pl))
                return true;
 
-       /* Initiate or update synch mode if applicable */
-       if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) {
+       /* Initiate synch mode if applicable */
+       if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
                syncpt = iseqno + exp_pkts - 1;
                if (!tipc_link_is_up(l)) {
                        tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
@@ -1204,6 +1278,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
        int usr = msg_user(hdr);
        int bearer_id = b->identity;
        struct tipc_link_entry *le;
+       u16 bc_ack = msg_bcast_ack(hdr);
        int rc = 0;
 
        __skb_queue_head_init(&xmitq);
@@ -1212,13 +1287,12 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
        if (unlikely(!tipc_msg_validate(skb)))
                goto discard;
 
-       /* Handle arrival of a non-unicast link packet */
+       /* Handle arrival of discovery or broadcast packet */
        if (unlikely(msg_non_seq(hdr))) {
-               if (usr ==  LINK_CONFIG)
-                       tipc_disc_rcv(net, skb, b);
+               if (unlikely(usr == LINK_CONFIG))
+                       return tipc_disc_rcv(net, skb, b);
                else
-                       tipc_bclink_rcv(net, skb);
-               return;
+                       return tipc_node_bc_rcv(net, skb, bearer_id);
        }
 
        /* Locate neighboring node that sent packet */
@@ -1227,19 +1301,18 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
                goto discard;
        le = &n->links[bearer_id];
 
+       /* Ensure broadcast reception is in synch with peer's send state */
+       if (unlikely(usr == LINK_PROTOCOL))
+               tipc_bcast_sync_rcv(net, n->bc_entry.link, hdr);
+       else if (unlikely(n->bc_entry.link->acked != bc_ack))
+               tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack);
+
        tipc_node_lock(n);
 
        /* Is reception permitted at the moment ? */
        if (!tipc_node_filter_pkt(n, hdr))
                goto unlock;
 
-       if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
-               tipc_bclink_sync_state(n, hdr);
-
-       /* Release acked broadcast packets */
-       if (unlikely(n->bclink.acked != msg_bcast_ack(hdr)))
-               tipc_bclink_acknowledge(n, msg_bcast_ack(hdr));
-
        /* Check and if necessary update node state */
        if (likely(tipc_node_check_state(n, skb, bearer_id, &xmitq))) {
                rc = tipc_link_rcv(le->link, skb, &xmitq);
@@ -1254,8 +1327,8 @@ unlock:
        if (unlikely(rc & TIPC_LINK_DOWN_EVT))
                tipc_node_link_down(n, bearer_id, false);
 
-       if (unlikely(!skb_queue_empty(&n->bclink.namedq)))
-               tipc_named_rcv(net, &n->bclink.namedq);
+       if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
+               tipc_named_rcv(net, &n->bc_entry.namedq);
 
        if (!skb_queue_empty(&le->inputq))
                tipc_sk_rcv(net, &le->inputq);
index 344b3e7594fd0d59d8b83143181ffeac8c2df9a9..6734562d3c6e57092e005516429400a1f68c1d8c 100644 (file)
 enum {
        TIPC_NOTIFY_NODE_DOWN           = (1 << 3),
        TIPC_NOTIFY_NODE_UP             = (1 << 4),
-       TIPC_WAKEUP_BCAST_USERS         = (1 << 5),
        TIPC_NOTIFY_LINK_UP             = (1 << 6),
-       TIPC_NOTIFY_LINK_DOWN           = (1 << 7),
-       TIPC_BCAST_MSG_EVT              = (1 << 9),
-       TIPC_BCAST_RESET                = (1 << 10)
+       TIPC_NOTIFY_LINK_DOWN           = (1 << 7)
 };
 
-/**
- * struct tipc_node_bclink - TIPC node bclink structure
- * @acked: sequence # of last outbound b'cast message acknowledged by node
- * @last_in: sequence # of last in-sequence b'cast message received from node
- * @last_sent: sequence # of last b'cast message sent by node
- * @oos_state: state tracker for handling OOS b'cast messages
- * @deferred_queue: deferred queue saved OOS b'cast message received from node
- * @reasm_buf: broadcast reassembly queue head from node
- * @inputq_map: bitmap indicating which inqueues should be kicked
- * @recv_permitted: true if node is allowed to receive b'cast messages
+/* Optional capabilities supported by this code version
  */
-struct tipc_node_bclink {
-       u32 acked;
-       u32 last_in;
-       u32 last_sent;
-       u32 oos_state;
-       u32 deferred_size;
-       struct sk_buff_head deferdq;
-       struct sk_buff *reasm_buf;
-       struct sk_buff_head namedq;
-       bool recv_permitted;
+enum {
+       TIPC_BCAST_SYNCH = (1 << 1)
 };
 
+#define TIPC_NODE_CAPABILITIES TIPC_BCAST_SYNCH
+
 struct tipc_link_entry {
        struct tipc_link *link;
        u32 mtu;
@@ -92,6 +74,14 @@ struct tipc_link_entry {
        struct tipc_media_addr maddr;
 };
 
+struct tipc_bclink_entry {
+       struct tipc_link *link;
+       struct sk_buff_head inputq1;
+       struct sk_buff_head arrvq;
+       struct sk_buff_head inputq2;
+       struct sk_buff_head namedq;
+};
+
 /**
  * struct tipc_node - TIPC node structure
  * @addr: network address of node
@@ -104,7 +94,6 @@ struct tipc_link_entry {
  * @active_links: bearer ids of active links, used as index into links[] array
  * @links: array containing references to all links to node
  * @action_flags: bit mask of different types of node actions
- * @bclink: broadcast-related info
  * @state: connectivity state vs peer node
  * @sync_point: sequence number where synch/failover is finished
  * @list: links to adjacent nodes in sorted list of cluster's nodes
@@ -124,8 +113,8 @@ struct tipc_node {
        struct hlist_node hash;
        int active_links[2];
        struct tipc_link_entry links[MAX_BEARERS];
+       struct tipc_bclink_entry bc_entry;
        int action_flags;
-       struct tipc_node_bclink bclink;
        struct list_head list;
        int state;
        u16 sync_point;
index 1060d52ff23eb14f7b2ed2f732beacf9a698502c..552dbaba9cf386a07e6c4f499fda27ca1f8a8f4a 100644 (file)
@@ -689,13 +689,13 @@ static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
        msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
 
 new_mtu:
-       mtu = tipc_bclink_get_mtu();
+       mtu = tipc_bcast_get_mtu(net);
        rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, pktchain);
        if (unlikely(rc < 0))
                return rc;
 
        do {
-               rc = tipc_bclink_xmit(net, pktchain);
+               rc = tipc_bcast_xmit(net, pktchain);
                if (likely(!rc))
                        return dsz;
 
index c170d3138953a2361df5439aeffadd29afa52ad9..816914ef228dac5fff2433ebdc8ca957173dc19d 100644 (file)
@@ -52,6 +52,8 @@
 /* IANA assigned UDP port */
 #define UDP_PORT_DEFAULT       6118
 
+#define UDP_MIN_HEADROOM        28
+
 static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = {
        [TIPC_NLA_UDP_UNSPEC]   = {.type = NLA_UNSPEC},
        [TIPC_NLA_UDP_LOCAL]    = {.type = NLA_BINARY,
@@ -153,11 +155,12 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
        struct udp_bearer *ub;
        struct udp_media_addr *dst = (struct udp_media_addr *)&dest->value;
        struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value;
-       struct sk_buff *clone;
        struct rtable *rt;
 
-       clone = skb_clone(skb, GFP_ATOMIC);
-       skb_set_inner_protocol(clone, htons(ETH_P_TIPC));
+       if (skb_headroom(skb) < UDP_MIN_HEADROOM)
+               pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
+
+       skb_set_inner_protocol(skb, htons(ETH_P_TIPC));
        ub = rcu_dereference_rtnl(b->media_ptr);
        if (!ub) {
                err = -ENODEV;
@@ -167,7 +170,7 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
                struct flowi4 fl = {
                        .daddr = dst->ipv4.s_addr,
                        .saddr = src->ipv4.s_addr,
-                       .flowi4_mark = clone->mark,
+                       .flowi4_mark = skb->mark,
                        .flowi4_proto = IPPROTO_UDP
                };
                rt = ip_route_output_key(net, &fl);
@@ -176,7 +179,7 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
                        goto tx_error;
                }
                ttl = ip4_dst_hoplimit(&rt->dst);
-               err = udp_tunnel_xmit_skb(rt, ub->ubsock->sk, clone,
+               err = udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb,
                                          src->ipv4.s_addr,
                                          dst->ipv4.s_addr, 0, ttl, 0,
                                          src->udp_port, dst->udp_port,
@@ -199,7 +202,7 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
                if (err)
                        goto tx_error;
                ttl = ip6_dst_hoplimit(ndst);
-               err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, clone,
+               err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb,
                                           ndst->dev, &src->ipv6,
                                           &dst->ipv6, 0, ttl, src->udp_port,
                                           dst->udp_port, false);
@@ -208,7 +211,7 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
        return err;
 
 tx_error:
-       kfree_skb(clone);
+       kfree_skb(skb);
        return err;
 }
 
@@ -425,7 +428,6 @@ static void tipc_udp_disable(struct tipc_bearer *b)
        }
        if (ub->ubsock)
                sock_set_flag(ub->ubsock->sk, SOCK_DEAD);
-       RCU_INIT_POINTER(b->media_ptr, NULL);
        RCU_INIT_POINTER(ub->bearer, NULL);
 
        /* sock_release need to be done outside of rtnl lock */
index ef31b40ad55000a5fd029d7479b546483cc782b3..aaa0b58d6aba29816aa81c0f6ce444a0ab0be0fb 100644 (file)
@@ -326,9 +326,10 @@ found:
        return s;
 }
 
-static inline int unix_writable(struct sock *sk)
+static int unix_writable(const struct sock *sk)
 {
-       return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
+       return sk->sk_state != TCP_LISTEN &&
+              (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
 }
 
 static void unix_write_space(struct sock *sk)
@@ -2064,6 +2065,11 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
                goto out;
        }
 
+       if (flags & MSG_PEEK)
+               skip = sk_peek_offset(sk, flags);
+       else
+               skip = 0;
+
        do {
                int chunk;
                struct sk_buff *skb, *last;
@@ -2112,7 +2118,6 @@ unlock:
                        break;
                }
 
-               skip = sk_peek_offset(sk, flags);
                while (skip >= unix_skb_len(skb)) {
                        skip -= unix_skb_len(skb);
                        last = skb;
@@ -2179,14 +2184,12 @@ unlock:
                        if (UNIXCB(skb).fp)
                                scm.fp = scm_fp_dup(UNIXCB(skb).fp);
 
-                       if (skip) {
-                               sk_peek_offset_fwd(sk, chunk);
-                               skip -= chunk;
-                       }
+                       sk_peek_offset_fwd(sk, chunk);
 
                        if (UNIXCB(skb).fp)
                                break;
 
+                       skip = 0;
                        last = skb;
                        last_len = skb->len;
                        unix_state_lock(sk);
index df5fc6b340f1bbde621fbe84fa44e0af6e2e00af..00e8a349aabccc61cec1f9ebb889bc7dd9d3b5e1 100644 (file)
@@ -1948,13 +1948,13 @@ int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
        err = misc_register(&vsock_device);
        if (err) {
                pr_err("Failed to register misc device\n");
-               return -ENOENT;
+               goto err_reset_transport;
        }
 
        err = proto_register(&vsock_proto, 1);  /* we want our slab */
        if (err) {
                pr_err("Cannot register vsock protocol\n");
-               goto err_misc_deregister;
+               goto err_deregister_misc;
        }
 
        err = sock_register(&vsock_family_ops);
@@ -1969,8 +1969,9 @@ int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
 
 err_unregister_proto:
        proto_unregister(&vsock_proto);
-err_misc_deregister:
+err_deregister_misc:
        misc_deregister(&vsock_device);
+err_reset_transport:
        transport = NULL;
 err_busy:
        mutex_unlock(&vsock_register_mutex);
index 1f63daff39659e08561862cfd71220ffc6949291..7555cad83a752a930a54e4a8ca609846386e0ec1 100644 (file)
 
 static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg);
 static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg);
-static void vmci_transport_peer_attach_cb(u32 sub_id,
-                                         const struct vmci_event_data *ed,
-                                         void *client_data);
 static void vmci_transport_peer_detach_cb(u32 sub_id,
                                          const struct vmci_event_data *ed,
                                          void *client_data);
 static void vmci_transport_recv_pkt_work(struct work_struct *work);
+static void vmci_transport_cleanup(struct work_struct *work);
 static int vmci_transport_recv_listen(struct sock *sk,
                                      struct vmci_transport_packet *pkt);
 static int vmci_transport_recv_connecting_server(
@@ -75,6 +73,10 @@ struct vmci_transport_recv_pkt_info {
        struct vmci_transport_packet pkt;
 };
 
+static LIST_HEAD(vmci_transport_cleanup_list);
+static DEFINE_SPINLOCK(vmci_transport_cleanup_lock);
+static DECLARE_WORK(vmci_transport_cleanup_work, vmci_transport_cleanup);
+
 static struct vmci_handle vmci_transport_stream_handle = { VMCI_INVALID_ID,
                                                           VMCI_INVALID_ID };
 static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
@@ -791,44 +793,6 @@ out:
        return err;
 }
 
-static void vmci_transport_peer_attach_cb(u32 sub_id,
-                                         const struct vmci_event_data *e_data,
-                                         void *client_data)
-{
-       struct sock *sk = client_data;
-       const struct vmci_event_payload_qp *e_payload;
-       struct vsock_sock *vsk;
-
-       e_payload = vmci_event_data_const_payload(e_data);
-
-       vsk = vsock_sk(sk);
-
-       /* We don't ask for delayed CBs when we subscribe to this event (we
-        * pass 0 as flags to vmci_event_subscribe()).  VMCI makes no
-        * guarantees in that case about what context we might be running in,
-        * so it could be BH or process, blockable or non-blockable.  So we
-        * need to account for all possible contexts here.
-        */
-       local_bh_disable();
-       bh_lock_sock(sk);
-
-       /* XXX This is lame, we should provide a way to lookup sockets by
-        * qp_handle.
-        */
-       if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
-                                e_payload->handle)) {
-               /* XXX This doesn't do anything, but in the future we may want
-                * to set a flag here to verify the attach really did occur and
-                * we weren't just sent a datagram claiming it was.
-                */
-               goto out;
-       }
-
-out:
-       bh_unlock_sock(sk);
-       local_bh_enable();
-}
-
 static void vmci_transport_handle_detach(struct sock *sk)
 {
        struct vsock_sock *vsk;
@@ -871,28 +835,38 @@ static void vmci_transport_peer_detach_cb(u32 sub_id,
                                          const struct vmci_event_data *e_data,
                                          void *client_data)
 {
-       struct sock *sk = client_data;
+       struct vmci_transport *trans = client_data;
        const struct vmci_event_payload_qp *e_payload;
-       struct vsock_sock *vsk;
 
        e_payload = vmci_event_data_const_payload(e_data);
-       vsk = vsock_sk(sk);
-       if (vmci_handle_is_invalid(e_payload->handle))
-               return;
-
-       /* Same rules for locking as for peer_attach_cb(). */
-       local_bh_disable();
-       bh_lock_sock(sk);
 
        /* XXX This is lame, we should provide a way to lookup sockets by
         * qp_handle.
         */
-       if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
-                                e_payload->handle))
-               vmci_transport_handle_detach(sk);
+       if (vmci_handle_is_invalid(e_payload->handle) ||
+           vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
+               return;
 
-       bh_unlock_sock(sk);
-       local_bh_enable();
+       /* We don't ask for delayed CBs when we subscribe to this event (we
+        * pass 0 as flags to vmci_event_subscribe()).  VMCI makes no
+        * guarantees in that case about what context we might be running in,
+        * so it could be BH or process, blockable or non-blockable.  So we
+        * need to account for all possible contexts here.
+        */
+       spin_lock_bh(&trans->lock);
+       if (!trans->sk)
+               goto out;
+
+       /* Apart from here, trans->lock is only grabbed as part of sk destruct,
+        * where trans->sk isn't locked.
+        */
+       bh_lock_sock(trans->sk);
+
+       vmci_transport_handle_detach(trans->sk);
+
+       bh_unlock_sock(trans->sk);
+ out:
+       spin_unlock_bh(&trans->lock);
 }
 
 static void vmci_transport_qp_resumed_cb(u32 sub_id,
@@ -1181,7 +1155,7 @@ vmci_transport_recv_connecting_server(struct sock *listener,
         */
        err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
                                   vmci_transport_peer_detach_cb,
-                                  pending, &detach_sub_id);
+                                  vmci_trans(vpending), &detach_sub_id);
        if (err < VMCI_SUCCESS) {
                vmci_transport_send_reset(pending, pkt);
                err = vmci_transport_error_to_vsock_error(err);
@@ -1321,7 +1295,6 @@ vmci_transport_recv_connecting_client(struct sock *sk,
                    || vmci_trans(vsk)->qpair
                    || vmci_trans(vsk)->produce_size != 0
                    || vmci_trans(vsk)->consume_size != 0
-                   || vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID
                    || vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
                        skerr = EPROTO;
                        err = -EINVAL;
@@ -1389,7 +1362,6 @@ static int vmci_transport_recv_connecting_client_negotiate(
        struct vsock_sock *vsk;
        struct vmci_handle handle;
        struct vmci_qp *qpair;
-       u32 attach_sub_id;
        u32 detach_sub_id;
        bool is_local;
        u32 flags;
@@ -1399,7 +1371,6 @@ static int vmci_transport_recv_connecting_client_negotiate(
 
        vsk = vsock_sk(sk);
        handle = VMCI_INVALID_HANDLE;
-       attach_sub_id = VMCI_INVALID_ID;
        detach_sub_id = VMCI_INVALID_ID;
 
        /* If we have gotten here then we should be past the point where old
@@ -1444,23 +1415,15 @@ static int vmci_transport_recv_connecting_client_negotiate(
                goto destroy;
        }
 
-       /* Subscribe to attach and detach events first.
+       /* Subscribe to detach events first.
         *
         * XXX We attach once for each queue pair created for now so it is easy
         * to find the socket (it's provided), but later we should only
         * subscribe once and add a way to lookup sockets by queue pair handle.
         */
-       err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_ATTACH,
-                                  vmci_transport_peer_attach_cb,
-                                  sk, &attach_sub_id);
-       if (err < VMCI_SUCCESS) {
-               err = vmci_transport_error_to_vsock_error(err);
-               goto destroy;
-       }
-
        err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
                                   vmci_transport_peer_detach_cb,
-                                  sk, &detach_sub_id);
+                                  vmci_trans(vsk), &detach_sub_id);
        if (err < VMCI_SUCCESS) {
                err = vmci_transport_error_to_vsock_error(err);
                goto destroy;
@@ -1496,7 +1459,6 @@ static int vmci_transport_recv_connecting_client_negotiate(
        vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size =
                pkt->u.size;
 
-       vmci_trans(vsk)->attach_sub_id = attach_sub_id;
        vmci_trans(vsk)->detach_sub_id = detach_sub_id;
 
        vmci_trans(vsk)->notify_ops->process_negotiate(sk);
@@ -1504,9 +1466,6 @@ static int vmci_transport_recv_connecting_client_negotiate(
        return 0;
 
 destroy:
-       if (attach_sub_id != VMCI_INVALID_ID)
-               vmci_event_unsubscribe(attach_sub_id);
-
        if (detach_sub_id != VMCI_INVALID_ID)
                vmci_event_unsubscribe(detach_sub_id);
 
@@ -1607,9 +1566,11 @@ static int vmci_transport_socket_init(struct vsock_sock *vsk,
        vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
        vmci_trans(vsk)->qpair = NULL;
        vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = 0;
-       vmci_trans(vsk)->attach_sub_id = vmci_trans(vsk)->detach_sub_id =
-               VMCI_INVALID_ID;
+       vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
        vmci_trans(vsk)->notify_ops = NULL;
+       INIT_LIST_HEAD(&vmci_trans(vsk)->elem);
+       vmci_trans(vsk)->sk = &vsk->sk;
+       spin_lock_init(&vmci_trans(vsk)->lock);
        if (psk) {
                vmci_trans(vsk)->queue_pair_size =
                        vmci_trans(psk)->queue_pair_size;
@@ -1629,29 +1590,57 @@ static int vmci_transport_socket_init(struct vsock_sock *vsk,
        return 0;
 }
 
-static void vmci_transport_destruct(struct vsock_sock *vsk)
+static void vmci_transport_free_resources(struct list_head *transport_list)
 {
-       if (vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID) {
-               vmci_event_unsubscribe(vmci_trans(vsk)->attach_sub_id);
-               vmci_trans(vsk)->attach_sub_id = VMCI_INVALID_ID;
-       }
+       while (!list_empty(transport_list)) {
+               struct vmci_transport *transport =
+                   list_first_entry(transport_list, struct vmci_transport,
+                                    elem);
+               list_del(&transport->elem);
 
-       if (vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
-               vmci_event_unsubscribe(vmci_trans(vsk)->detach_sub_id);
-               vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
-       }
+               if (transport->detach_sub_id != VMCI_INVALID_ID) {
+                       vmci_event_unsubscribe(transport->detach_sub_id);
+                       transport->detach_sub_id = VMCI_INVALID_ID;
+               }
 
-       if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) {
-               vmci_qpair_detach(&vmci_trans(vsk)->qpair);
-               vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
-               vmci_trans(vsk)->produce_size = 0;
-               vmci_trans(vsk)->consume_size = 0;
+               if (!vmci_handle_is_invalid(transport->qp_handle)) {
+                       vmci_qpair_detach(&transport->qpair);
+                       transport->qp_handle = VMCI_INVALID_HANDLE;
+                       transport->produce_size = 0;
+                       transport->consume_size = 0;
+               }
+
+               kfree(transport);
        }
+}
+
+static void vmci_transport_cleanup(struct work_struct *work)
+{
+       LIST_HEAD(pending);
+
+       spin_lock_bh(&vmci_transport_cleanup_lock);
+       list_replace_init(&vmci_transport_cleanup_list, &pending);
+       spin_unlock_bh(&vmci_transport_cleanup_lock);
+       vmci_transport_free_resources(&pending);
+}
+
+static void vmci_transport_destruct(struct vsock_sock *vsk)
+{
+       /* Ensure that the detach callback doesn't use the sk/vsk
+        * we are about to destruct.
+        */
+       spin_lock_bh(&vmci_trans(vsk)->lock);
+       vmci_trans(vsk)->sk = NULL;
+       spin_unlock_bh(&vmci_trans(vsk)->lock);
 
        if (vmci_trans(vsk)->notify_ops)
                vmci_trans(vsk)->notify_ops->socket_destruct(vsk);
 
-       kfree(vsk->trans);
+       spin_lock_bh(&vmci_transport_cleanup_lock);
+       list_add(&vmci_trans(vsk)->elem, &vmci_transport_cleanup_list);
+       spin_unlock_bh(&vmci_transport_cleanup_lock);
+       schedule_work(&vmci_transport_cleanup_work);
+
        vsk->trans = NULL;
 }
 
@@ -2146,6 +2135,9 @@ module_init(vmci_transport_init);
 
 static void __exit vmci_transport_exit(void)
 {
+       cancel_work_sync(&vmci_transport_cleanup_work);
+       vmci_transport_free_resources(&vmci_transport_cleanup_list);
+
        if (!vmci_handle_is_invalid(vmci_transport_stream_handle)) {
                if (vmci_datagram_destroy_handle(
                        vmci_transport_stream_handle) != VMCI_SUCCESS)
@@ -2164,6 +2156,7 @@ module_exit(vmci_transport_exit);
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
+MODULE_VERSION("1.0.2.0-k");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("vmware_vsock");
 MODULE_ALIAS_NETPROTO(PF_VSOCK);
index ce6c9623d5f069029ce58294bcc7de9bc3728fcd..2ad46f39649f8130d9f19ce48ccf8a8b4309797d 100644 (file)
@@ -119,10 +119,12 @@ struct vmci_transport {
        u64 queue_pair_size;
        u64 queue_pair_min_size;
        u64 queue_pair_max_size;
-       u32 attach_sub_id;
        u32 detach_sub_id;
        union vmci_transport_notify notify;
        struct vmci_transport_notify_ops *notify_ops;
+       struct list_head elem;
+       struct sock *sk;
+       spinlock_t lock; /* protects sk. */
 };
 
 int vmci_transport_register(void);
index a8de9e3002000d7eaa76f6764797e5b231d187ff..24e06a2377f6b3601003157d22dd05bb0278f145 100644 (file)
@@ -1928,8 +1928,10 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
        struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
        struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
+       struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
+       struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
 
-       if (!lt && !rp && !re)
+       if (!lt && !rp && !re && !et && !rt)
                return err;
 
        /* pedantic mode - thou shalt sayeth replaceth */
index 63e7d50e6a4fe3ca46cbd3b7ae064600bf6696ff..b30514514e370e1de4fed734d1dcc7d28c927695 100644 (file)
@@ -13,6 +13,7 @@ hostprogs-y += tracex3
 hostprogs-y += tracex4
 hostprogs-y += tracex5
 hostprogs-y += tracex6
+hostprogs-y += trace_output
 hostprogs-y += lathist
 
 test_verifier-objs := test_verifier.o libbpf.o
@@ -27,6 +28,7 @@ tracex3-objs := bpf_load.o libbpf.o tracex3_user.o
 tracex4-objs := bpf_load.o libbpf.o tracex4_user.o
 tracex5-objs := bpf_load.o libbpf.o tracex5_user.o
 tracex6-objs := bpf_load.o libbpf.o tracex6_user.o
+trace_output-objs := bpf_load.o libbpf.o trace_output_user.o
 lathist-objs := bpf_load.o libbpf.o lathist_user.o
 
 # Tell kbuild to always build the programs
@@ -40,6 +42,7 @@ always += tracex3_kern.o
 always += tracex4_kern.o
 always += tracex5_kern.o
 always += tracex6_kern.o
+always += trace_output_kern.o
 always += tcbpf1_kern.o
 always += lathist_kern.o
 
@@ -55,6 +58,7 @@ HOSTLOADLIBES_tracex3 += -lelf
 HOSTLOADLIBES_tracex4 += -lelf -lrt
 HOSTLOADLIBES_tracex5 += -lelf
 HOSTLOADLIBES_tracex6 += -lelf
+HOSTLOADLIBES_trace_output += -lelf -lrt
 HOSTLOADLIBES_lathist += -lelf
 
 # point this to your LLVM backend with bpf support
@@ -64,3 +68,6 @@ $(obj)/%.o: $(src)/%.c
        clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
                -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
                -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
+       clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
+               -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
+               -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s
index 21aa1b44c30ca1ff8ba369de84f7492df01aa518..b35c21e0b43f68a6dd57fe784e3183c821aaefa9 100644 (file)
@@ -37,6 +37,8 @@ static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) =
        (void *) BPF_FUNC_clone_redirect;
 static int (*bpf_redirect)(int ifindex, int flags) =
        (void *) BPF_FUNC_redirect;
+static int (*bpf_perf_event_output)(void *ctx, void *map, int index, void *data, int size) =
+       (void *) BPF_FUNC_perf_event_output;
 
 /* llvm builtin functions that eBPF C program may use to
  * emit BPF_LD_ABS and BPF_LD_IND instructions
diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c
new file mode 100644 (file)
index 0000000..8d8d1ec
--- /dev/null
@@ -0,0 +1,31 @@
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+struct bpf_map_def SEC("maps") my_map = {
+       .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+       .key_size = sizeof(int),
+       .value_size = sizeof(u32),
+       .max_entries = 2,
+};
+
+SEC("kprobe/sys_write")
+int bpf_prog1(struct pt_regs *ctx)
+{
+       struct S {
+               u64 pid;
+               u64 cookie;
+       } data;
+
+       memset(&data, 0, sizeof(data));
+       data.pid = bpf_get_current_pid_tgid();
+       data.cookie = 0x12345678;
+
+       bpf_perf_event_output(ctx, &my_map, 0, &data, sizeof(data));
+
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/trace_output_user.c b/samples/bpf/trace_output_user.c
new file mode 100644 (file)
index 0000000..661a7d0
--- /dev/null
@@ -0,0 +1,196 @@
+/* This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <sys/ioctl.h>
+#include <linux/perf_event.h>
+#include <linux/bpf.h>
+#include <errno.h>
+#include <assert.h>
+#include <sys/syscall.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <time.h>
+#include <signal.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+static int pmu_fd;
+
+int page_size;
+int page_cnt = 8;
+volatile struct perf_event_mmap_page *header;
+
+typedef void (*print_fn)(void *data, int size);
+
+static int perf_event_mmap(int fd)
+{
+       void *base;
+       int mmap_size;
+
+       page_size = getpagesize();
+       mmap_size = page_size * (page_cnt + 1);
+
+       base = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+       if (base == MAP_FAILED) {
+               printf("mmap err\n");
+               return -1;
+       }
+
+       header = base;
+       return 0;
+}
+
+static int perf_event_poll(int fd)
+{
+       struct pollfd pfd = { .fd = fd, .events = POLLIN };
+
+       return poll(&pfd, 1, 1000);
+}
+
+struct perf_event_sample {
+       struct perf_event_header header;
+       __u32 size;
+       char data[];
+};
+
+void perf_event_read(print_fn fn)
+{
+       __u64 data_tail = header->data_tail;
+       __u64 data_head = header->data_head;
+       __u64 buffer_size = page_cnt * page_size;
+       void *base, *begin, *end;
+       char buf[256];
+
+       asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
+       if (data_head == data_tail)
+               return;
+
+       base = ((char *)header) + page_size;
+
+       begin = base + data_tail % buffer_size;
+       end = base + data_head % buffer_size;
+
+       while (begin != end) {
+               struct perf_event_sample *e;
+
+               e = begin;
+               if (begin + e->header.size > base + buffer_size) {
+                       long len = base + buffer_size - begin;
+
+                       assert(len < e->header.size);
+                       memcpy(buf, begin, len);
+                       memcpy(buf + len, base, e->header.size - len);
+                       e = (void *) buf;
+                       begin = base + e->header.size - len;
+               } else if (begin + e->header.size == base + buffer_size) {
+                       begin = base;
+               } else {
+                       begin += e->header.size;
+               }
+
+               if (e->header.type == PERF_RECORD_SAMPLE) {
+                       fn(e->data, e->size);
+               } else if (e->header.type == PERF_RECORD_LOST) {
+                       struct {
+                               struct perf_event_header header;
+                               __u64 id;
+                               __u64 lost;
+                       } *lost = (void *) e;
+                       printf("lost %lld events\n", lost->lost);
+               } else {
+                       printf("unknown event type=%d size=%d\n",
+                              e->header.type, e->header.size);
+               }
+       }
+
+       __sync_synchronize(); /* smp_mb() */
+       header->data_tail = data_head;
+}
+
+static __u64 time_get_ns(void)
+{
+       struct timespec ts;
+
+       clock_gettime(CLOCK_MONOTONIC, &ts);
+       return ts.tv_sec * 1000000000ull + ts.tv_nsec;
+}
+
+static __u64 start_time;
+
+#define MAX_CNT 100000ll
+
+static void print_bpf_output(void *data, int size)
+{
+       static __u64 cnt;
+       struct {
+               __u64 pid;
+               __u64 cookie;
+       } *e = data;
+
+       if (e->cookie != 0x12345678) {
+               printf("BUG pid %llx cookie %llx sized %d\n",
+                      e->pid, e->cookie, size);
+               kill(0, SIGINT);
+       }
+
+       cnt++;
+
+       if (cnt == MAX_CNT) {
+               printf("recv %lld events per sec\n",
+                      MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
+               kill(0, SIGINT);
+       }
+}
+
+static void test_bpf_perf_event(void)
+{
+       struct perf_event_attr attr = {
+               .sample_type = PERF_SAMPLE_RAW,
+               .type = PERF_TYPE_SOFTWARE,
+               .config = PERF_COUNT_SW_BPF_OUTPUT,
+       };
+       int key = 0;
+
+       pmu_fd = perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
+
+       assert(pmu_fd >= 0);
+       assert(bpf_update_elem(map_fd[0], &key, &pmu_fd, BPF_ANY) == 0);
+       ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+}
+
+int main(int argc, char **argv)
+{
+       char filename[256];
+       FILE *f;
+
+       snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+       if (load_bpf_file(filename)) {
+               printf("%s", bpf_log_buf);
+               return 1;
+       }
+
+       test_bpf_perf_event();
+
+       if (perf_event_mmap(pmu_fd) < 0)
+               return 1;
+
+       f = popen("taskset 1 dd if=/dev/zero of=/dev/null", "r");
+       (void) f;
+
+       start_time = time_get_ns();
+       for (;;) {
+               perf_event_poll(pmu_fd);
+               perf_event_read(print_bpf_output);
+       }
+
+       return 0;
+}
index 0cd46e129920e8ad114eb335ae92ce85230d068d..b967e4f9fed2e6cc78b9538c79517a473fc375b8 100755 (executable)
@@ -115,7 +115,7 @@ esac
 BUILD_DEBUG="$(grep -s '^CONFIG_DEBUG_INFO=y' $KCONFIG_CONFIG || true)"
 
 # Setup the directory structure
-rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir"
+rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" $objtree/debian/files
 mkdir -m 755 -p "$tmpdir/DEBIAN"
 mkdir -p "$tmpdir/lib" "$tmpdir/boot"
 mkdir -p "$fwdir/lib/firmware/$version/"
@@ -408,7 +408,7 @@ binary-arch:
        \$(MAKE) KDEB_SOURCENAME=${sourcename} KDEB_PKGVERSION=${packageversion} bindeb-pkg
 
 clean:
-       rm -rf debian/*tmp
+       rm -rf debian/*tmp debian/files
        mv debian/ debian.backup # debian/ might be cleaned away
        \$(MAKE) clean
        mv debian.backup debian
index 6e50841ef1f63355ead908559fbfe4793095601f..26f4039d54b8f6bd8dd0aba99837835ba13c871b 100644 (file)
@@ -6131,21 +6131,18 @@ security_initcall(selinux_init);
 static struct nf_hook_ops selinux_nf_ops[] = {
        {
                .hook =         selinux_ipv4_postroute,
-               .owner =        THIS_MODULE,
                .pf =           NFPROTO_IPV4,
                .hooknum =      NF_INET_POST_ROUTING,
                .priority =     NF_IP_PRI_SELINUX_LAST,
        },
        {
                .hook =         selinux_ipv4_forward,
-               .owner =        THIS_MODULE,
                .pf =           NFPROTO_IPV4,
                .hooknum =      NF_INET_FORWARD,
                .priority =     NF_IP_PRI_SELINUX_FIRST,
        },
        {
                .hook =         selinux_ipv4_output,
-               .owner =        THIS_MODULE,
                .pf =           NFPROTO_IPV4,
                .hooknum =      NF_INET_LOCAL_OUT,
                .priority =     NF_IP_PRI_SELINUX_FIRST,
@@ -6153,14 +6150,12 @@ static struct nf_hook_ops selinux_nf_ops[] = {
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
        {
                .hook =         selinux_ipv6_postroute,
-               .owner =        THIS_MODULE,
                .pf =           NFPROTO_IPV6,
                .hooknum =      NF_INET_POST_ROUTING,
                .priority =     NF_IP6_PRI_SELINUX_LAST,
        },
        {
                .hook =         selinux_ipv6_forward,
-               .owner =        THIS_MODULE,
                .pf =           NFPROTO_IPV6,
                .hooknum =      NF_INET_FORWARD,
                .priority =     NF_IP6_PRI_SELINUX_FIRST,
index a9e41da05d28df87d0702e7c5f801de6a3b59cc8..6d1706c9777e64fc69ca305e6ef55a2d563bb29d 100644 (file)
@@ -57,7 +57,6 @@ static unsigned int smack_ipv4_output(void *priv,
 static struct nf_hook_ops smack_nf_ops[] = {
        {
                .hook =         smack_ipv4_output,
-               .owner =        THIS_MODULE,
                .pf =           NFPROTO_IPV4,
                .hooknum =      NF_INET_LOCAL_OUT,
                .priority =     NF_IP_PRI_SELINUX_FIRST,
@@ -65,7 +64,6 @@ static struct nf_hook_ops smack_nf_ops[] = {
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
        {
                .hook =         smack_ipv6_output,
-               .owner =        THIS_MODULE,
                .pf =           NFPROTO_IPV6,
                .hooknum =      NF_INET_LOCAL_OUT,
                .priority =     NF_IP6_PRI_SELINUX_FIRST,
index 584a0343ab0cc132b7c2038679923b6617926cf7..85813de26da87715df7d1d259339e30450c7815a 100644 (file)
@@ -633,6 +633,7 @@ static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
        SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
        SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
        SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
+       SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
        {} /* terminator */
 };
 
index afec6dc9f91fddcf8c0023b344771307aa684d3a..16b8dcba5c12d2d13ed7c80c4e6f93df69d9944f 100644 (file)
@@ -5306,6 +5306,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
index 9d947aef2c8b60b99f63fd9464ad289bef0c7061..def5cc8dff0293c2f70c4c3fcf67da1aea1bf55e 100644 (file)
@@ -4520,7 +4520,11 @@ static int patch_stac92hd73xx(struct hda_codec *codec)
                return err;
 
        spec = codec->spec;
-       codec->power_save_node = 1;
+       /* enable power_save_node only for new 92HD89xx chips, as it causes
+        * click noises on old 92HD73xx chips.
+        */
+       if ((codec->core.vendor_id & 0xfffffff0) != 0x111d7670)
+               codec->power_save_node = 1;
        spec->linear_tone_beep = 0;
        spec->gen.mixer_nid = 0x1d;
        spec->have_spdif_mux = 1;
index 58c3164802b8ceda545e1469f557a814702694be..8c907ebea18960ec8e48942f5746fd2121705736 100644 (file)
@@ -129,6 +129,8 @@ static struct snd_soc_dai_link db1300_i2s_dai = {
        .cpu_dai_name   = "au1xpsc_i2s.2",
        .platform_name  = "au1xpsc-pcm.2",
        .codec_name     = "wm8731.0-001b",
+       .dai_fmt        = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
+                         SND_SOC_DAIFMT_CBM_CFM,
        .ops            = &db1200_i2s_wm8731_ops,
 };
 
@@ -146,6 +148,8 @@ static struct snd_soc_dai_link db1550_i2s_dai = {
        .cpu_dai_name   = "au1xpsc_i2s.3",
        .platform_name  = "au1xpsc-pcm.3",
        .codec_name     = "wm8731.0-001b",
+       .dai_fmt        = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
+                         SND_SOC_DAIFMT_CBM_CFM,
        .ops            = &db1200_i2s_wm8731_ops,
 };
 
index 268a28bd1df409dd103d08bbe809cfe294b1e858..5c101af0ac630dddf1cf12085846d5cfd9c08911 100644 (file)
@@ -519,11 +519,11 @@ static const struct snd_kcontrol_new rt5645_snd_controls[] = {
                RT5645_L_VOL_SFT + 1, RT5645_R_VOL_SFT + 1, 63, 0, adc_vol_tlv),
 
        /* ADC Boost Volume Control */
-       SOC_DOUBLE_TLV("STO1 ADC Boost Gain", RT5645_ADC_BST_VOL1,
+       SOC_DOUBLE_TLV("ADC Boost Capture Volume", RT5645_ADC_BST_VOL1,
                RT5645_STO1_ADC_L_BST_SFT, RT5645_STO1_ADC_R_BST_SFT, 3, 0,
                adc_bst_tlv),
-       SOC_DOUBLE_TLV("STO2 ADC Boost Gain", RT5645_ADC_BST_VOL1,
-               RT5645_STO2_ADC_L_BST_SFT, RT5645_STO2_ADC_R_BST_SFT, 3, 0,
+       SOC_DOUBLE_TLV("Mono ADC Boost Capture Volume", RT5645_ADC_BST_VOL2,
+               RT5645_MONO_ADC_L_BST_SFT, RT5645_MONO_ADC_R_BST_SFT, 3, 0,
                adc_bst_tlv),
 
        /* I2S2 function select */
index 0e4cfc6ac64984acb1bd395a7477b7cf21cfc020..8c964cfb120ddc4130e5d39e9ab96ae34623eb34 100644 (file)
@@ -39,8 +39,8 @@
 #define RT5645_STO1_ADC_DIG_VOL                        0x1c
 #define RT5645_MONO_ADC_DIG_VOL                        0x1d
 #define RT5645_ADC_BST_VOL1                    0x1e
-/* Mixer - D-D */
 #define RT5645_ADC_BST_VOL2                    0x20
+/* Mixer - D-D */
 #define RT5645_STO1_ADC_MIXER                  0x27
 #define RT5645_MONO_ADC_MIXER                  0x28
 #define RT5645_AD_DA_MIXER                     0x29
 #define RT5645_STO1_ADC_R_BST_SFT              12
 #define RT5645_STO1_ADC_COMP_MASK              (0x3 << 10)
 #define RT5645_STO1_ADC_COMP_SFT               10
-#define RT5645_STO2_ADC_L_BST_MASK             (0x3 << 8)
-#define RT5645_STO2_ADC_L_BST_SFT              8
-#define RT5645_STO2_ADC_R_BST_MASK             (0x3 << 6)
-#define RT5645_STO2_ADC_R_BST_SFT              6
-#define RT5645_STO2_ADC_COMP_MASK              (0x3 << 4)
-#define RT5645_STO2_ADC_COMP_SFT               4
+
+/* ADC Boost Volume Control (0x20) */
+#define RT5645_MONO_ADC_L_BST_MASK             (0x3 << 14)
+#define RT5645_MONO_ADC_L_BST_SFT              14
+#define RT5645_MONO_ADC_R_BST_MASK             (0x3 << 12)
+#define RT5645_MONO_ADC_R_BST_SFT              12
+#define RT5645_MONO_ADC_COMP_MASK              (0x3 << 10)
+#define RT5645_MONO_ADC_COMP_SFT               10
 
 /* Stereo2 ADC Mixer Control (0x26) */
 #define RT5645_STO2_ADC_SRC_MASK               (0x1 << 15)
index bfda25ef0dd43313f066a7afca06c11b8a8ecbe0..f540f82b1f271ec4833d9ea43aa7f4567c0df712 100644 (file)
@@ -1376,8 +1376,8 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
                        sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT);
 
        snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL,
-                       SGTL5000_BIAS_R_MASK,
-                       sgtl5000->micbias_voltage << SGTL5000_BIAS_R_SHIFT);
+                       SGTL5000_BIAS_VOLT_MASK,
+                       sgtl5000->micbias_voltage << SGTL5000_BIAS_VOLT_SHIFT);
        /*
         * disable DAP
         * TODO:
@@ -1549,7 +1549,7 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
                        else {
                                sgtl5000->micbias_voltage = 0;
                                dev_err(&client->dev,
-                                       "Unsuitable MicBias resistor\n");
+                                       "Unsuitable MicBias voltage\n");
                        }
                } else {
                        sgtl5000->micbias_voltage = 0;
index e3a0bca28bcf5fb7c454de1abf235c1c5ace347d..cc1d3981fa4b6b92c018d596a0aaac1192f141f1 100644 (file)
@@ -549,7 +549,7 @@ static struct snd_soc_dai_driver tas2552_dai[] = {
 /*
  * DAC digital volumes. From -7 to 24 dB in 1 dB steps
  */
-static DECLARE_TLV_DB_SCALE(dac_tlv, -7, 100, 0);
+static DECLARE_TLV_DB_SCALE(dac_tlv, -700, 100, 0);
 
 static const char * const tas2552_din_source_select[] = {
        "Muted",
index 1a82b19b26442e31eb38e8fa38287d9710aa3546..8739126a1f6f60d4c9e3eac08aaf337c2f7b3a2b 100644 (file)
@@ -1509,14 +1509,17 @@ static int aic3x_init(struct snd_soc_codec *codec)
        snd_soc_write(codec, PGAL_2_LLOPM_VOL, DEFAULT_VOL);
        snd_soc_write(codec, PGAR_2_RLOPM_VOL, DEFAULT_VOL);
 
-       /* Line2 to HP Bypass default volume, disconnect from Output Mixer */
-       snd_soc_write(codec, LINE2L_2_HPLOUT_VOL, DEFAULT_VOL);
-       snd_soc_write(codec, LINE2R_2_HPROUT_VOL, DEFAULT_VOL);
-       snd_soc_write(codec, LINE2L_2_HPLCOM_VOL, DEFAULT_VOL);
-       snd_soc_write(codec, LINE2R_2_HPRCOM_VOL, DEFAULT_VOL);
-       /* Line2 Line Out default volume, disconnect from Output Mixer */
-       snd_soc_write(codec, LINE2L_2_LLOPM_VOL, DEFAULT_VOL);
-       snd_soc_write(codec, LINE2R_2_RLOPM_VOL, DEFAULT_VOL);
+       /* On tlv320aic3104, these registers are reserved and must not be written */
+       if (aic3x->model != AIC3X_MODEL_3104) {
+               /* Line2 to HP Bypass default volume, disconnect from Output Mixer */
+               snd_soc_write(codec, LINE2L_2_HPLOUT_VOL, DEFAULT_VOL);
+               snd_soc_write(codec, LINE2R_2_HPROUT_VOL, DEFAULT_VOL);
+               snd_soc_write(codec, LINE2L_2_HPLCOM_VOL, DEFAULT_VOL);
+               snd_soc_write(codec, LINE2R_2_HPRCOM_VOL, DEFAULT_VOL);
+               /* Line2 Line Out default volume, disconnect from Output Mixer */
+               snd_soc_write(codec, LINE2L_2_LLOPM_VOL, DEFAULT_VOL);
+               snd_soc_write(codec, LINE2R_2_RLOPM_VOL, DEFAULT_VOL);
+       }
 
        switch (aic3x->model) {
        case AIC3X_MODEL_3X:
index 293e47a6ff59073af3aaf0bb6c6d76521d1b1d94..2fbc6ef8cbdb394fd4ffa2b612f50d111fcdc4e5 100644 (file)
@@ -3760,7 +3760,7 @@ static int wm8962_i2c_probe(struct i2c_client *i2c,
        ret = snd_soc_register_codec(&i2c->dev,
                                     &soc_codec_dev_wm8962, &wm8962_dai, 1);
        if (ret < 0)
-               goto err_enable;
+               goto err_pm_runtime;
 
        regcache_cache_only(wm8962->regmap, true);
 
@@ -3769,6 +3769,8 @@ static int wm8962_i2c_probe(struct i2c_client *i2c,
 
        return 0;
 
+err_pm_runtime:
+       pm_runtime_disable(&i2c->dev);
 err_enable:
        regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
 err:
@@ -3778,6 +3780,7 @@ err:
 static int wm8962_i2c_remove(struct i2c_client *client)
 {
        snd_soc_unregister_codec(&client->dev);
+       pm_runtime_disable(&client->dev);
        return 0;
 }
 
index a3e97b46b64e3871ec9362b231d19f9334628229..ba34252b7bba4fdd8d1b7c58a313b490c14aa329 100644 (file)
@@ -131,23 +131,32 @@ static inline void i2s_clear_irqs(struct dw_i2s_dev *dev, u32 stream)
 
        if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
                for (i = 0; i < 4; i++)
-                       i2s_write_reg(dev->i2s_base, TOR(i), 0);
+                       i2s_read_reg(dev->i2s_base, TOR(i));
        } else {
                for (i = 0; i < 4; i++)
-                       i2s_write_reg(dev->i2s_base, ROR(i), 0);
+                       i2s_read_reg(dev->i2s_base, ROR(i));
        }
 }
 
 static void i2s_start(struct dw_i2s_dev *dev,
                      struct snd_pcm_substream *substream)
 {
-
+       u32 i, irq;
        i2s_write_reg(dev->i2s_base, IER, 1);
 
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               for (i = 0; i < 4; i++) {
+                       irq = i2s_read_reg(dev->i2s_base, IMR(i));
+                       i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x30);
+               }
                i2s_write_reg(dev->i2s_base, ITER, 1);
-       else
+       } else {
+               for (i = 0; i < 4; i++) {
+                       irq = i2s_read_reg(dev->i2s_base, IMR(i));
+                       i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x03);
+               }
                i2s_write_reg(dev->i2s_base, IRER, 1);
+       }
 
        i2s_write_reg(dev->i2s_base, CER, 1);
 }
index 48b2d24dd1f0a9a639c6bd7e7549035ed38f8e15..b95132e2f9dc299d82c783810982c2d5b768fb35 100644 (file)
@@ -95,7 +95,8 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_I2S:
                /* data on rising edge of bclk, frame low 1clk before data */
-               strcr |= SSI_STCR_TFSI | SSI_STCR_TEFS | SSI_STCR_TXBIT0;
+               strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSI |
+                       SSI_STCR_TEFS;
                scr |= SSI_SCR_NET;
                if (ssi->flags & IMX_SSI_USE_I2S_SLAVE) {
                        scr &= ~SSI_I2S_MODE_MASK;
@@ -104,33 +105,31 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
                break;
        case SND_SOC_DAIFMT_LEFT_J:
                /* data on rising edge of bclk, frame high with data */
-               strcr |= SSI_STCR_TXBIT0;
+               strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP;
                break;
        case SND_SOC_DAIFMT_DSP_B:
                /* data on rising edge of bclk, frame high with data */
-               strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0;
+               strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSL;
                break;
        case SND_SOC_DAIFMT_DSP_A:
                /* data on rising edge of bclk, frame high 1clk before data */
-               strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0 | SSI_STCR_TEFS;
+               strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSL |
+                       SSI_STCR_TEFS;
                break;
        }
 
        /* DAI clock inversion */
        switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
        case SND_SOC_DAIFMT_IB_IF:
-               strcr |= SSI_STCR_TFSI;
-               strcr &= ~SSI_STCR_TSCKP;
+               strcr ^= SSI_STCR_TSCKP | SSI_STCR_TFSI;
                break;
        case SND_SOC_DAIFMT_IB_NF:
-               strcr &= ~(SSI_STCR_TSCKP | SSI_STCR_TFSI);
+               strcr ^= SSI_STCR_TSCKP;
                break;
        case SND_SOC_DAIFMT_NB_IF:
-               strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP;
+               strcr ^= SSI_STCR_TFSI;
                break;
        case SND_SOC_DAIFMT_NB_NF:
-               strcr &= ~SSI_STCR_TFSI;
-               strcr |= SSI_STCR_TSCKP;
                break;
        }
 
index 82e350e9501ccc0d5ebc82962f60c034466448eb..ac75816ada7c31b133586693e5c73a41dc79348f 100644 (file)
@@ -69,7 +69,8 @@ snd_emux_init_seq_oss(struct snd_emux *emu)
        struct snd_seq_oss_reg *arg;
        struct snd_seq_device *dev;
 
-       if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS,
+       /* using device#1 here for avoiding conflicts with OPL3 */
+       if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS,
                               sizeof(struct snd_seq_oss_reg), &dev) < 0)
                return;
 
index 349bc96ca1fedc4946ab9596edd5dd1b3813ac0b..e5f18a288b7489a93e880657401dd1f99014deda 100644 (file)
@@ -17,6 +17,7 @@ libperf-y += levenshtein.o
 libperf-y += llvm-utils.o
 libperf-y += parse-options.o
 libperf-y += parse-events.o
+libperf-y += perf_regs.o
 libperf-y += path.o
 libperf-y += rbtree.o
 libperf-y += bitmap.o
@@ -103,7 +104,6 @@ libperf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o
 
 libperf-y += scripting-engines/
 
-libperf-$(CONFIG_PERF_REGS) += perf_regs.o
 libperf-$(CONFIG_ZLIB) += zlib.o
 libperf-$(CONFIG_LZMA) += lzma.o
 
index 885e8ac83997905db7baa0d334015b5cf46e37d9..6b8eb13e14e4d5897fca71c41634a07cc9ec5d16 100644 (file)
@@ -6,6 +6,7 @@ const struct sample_reg __weak sample_reg_masks[] = {
        SMPL_REG_END
 };
 
+#ifdef HAVE_PERF_REGS_SUPPORT
 int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
 {
        int i, idx = 0;
@@ -29,3 +30,4 @@ out:
        *valp = regs->cache_regs[id];
        return 0;
 }
+#endif
index 2984dcc54d67cd7acb9d02975c4fb1b7b55fdb7c..679d6e493962267f7b219b88c05d06c61ef1d2ea 100644 (file)
@@ -2,6 +2,7 @@
 #define __PERF_REGS_H
 
 #include <linux/types.h>
+#include <linux/compiler.h>
 
 struct regs_dump;
 
index d1b6475095967fb029af77ed69956c28b7828226..6cae06117b55297e031a210129e7f5a3f9d4c053 100644 (file)
 
 #define FIXUP_SECTION ".ex_fixup"
 
+static inline unsigned long __fls(unsigned long x);
+
 #include "word-at-a-time.h"
 
 #include "utils.h"
 
+static inline unsigned long __fls(unsigned long x)
+{
+       int lz;
+
+       asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x));
+       return sizeof(unsigned long) - 1 - lz;
+}
 
 static int page_size;
 static char *mem_region;