]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
authorDavid S. Miller <davem@davemloft.net>
Sat, 6 Dec 2014 04:56:46 +0000 (20:56 -0800)
committerDavid S. Miller <davem@davemloft.net>
Sat, 6 Dec 2014 04:56:46 +0000 (20:56 -0800)
Pablo Neira Ayuso says:

====================
Netfilter updates for net-next

The following batch contains netfilter updates for net-next. Basically,
enhancements for xt_recent, skip zeroing of timer in conntrack, fix
linking problem with recent redirect support for nf_tables, ipset
updates and a couple of cleanups. More specifically, they are:

1) Rise maximum number per IP address to be remembered in xt_recent
   while retaining backward compatibility, from Florian Westphal.

2) Skip zeroing timer area in nf_conn objects, also from Florian.

3) Inspect IPv4 and IPv6 traffic from the bridge to allow filtering using
   using meta l4proto and transport layer header, from Alvaro Neira.

4) Fix linking problems in the new redirect support when CONFIG_IPV6=n
   and IP6_NF_IPTABLES=n.

And ipset updates from Jozsef Kadlecsik:

5) Support updating element extensions when the set is full (fixes
   netfilter bugzilla id 880).

6) Fix set match with 32-bits userspace / 64-bits kernel.

7) Indicate explicitly when /0 networks are supported in ipset.

8) Simplify cidr handling for hash:*net* types.

9) Allocate the proper size of memory when /0 networks are supported.

10) Explicitly add padding elements to hash:net,net and hash:net,port,
    because the elements must be u32 sized for the used hash function.

Jozsef is also cooking ipset RCU conversion which should land soon if
they reach the merge window in time.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
350 files changed:
Documentation/ABI/testing/sysfs-class-net
Documentation/filesystems/overlayfs.txt
Documentation/networking/switchdev.txt [new file with mode: 0644]
Documentation/networking/timestamping.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/exynos5250-snow.dts
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/boot/dts/r8a7740.dtsi
arch/arm/boot/dts/r8a7790.dtsi
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/tegra114-dalmore.dts
arch/arm/boot/dts/tegra114-roth.dts
arch/arm/boot/dts/tegra114-tn7.dts
arch/arm/boot/dts/tegra114.dtsi
arch/arm/boot/dts/tegra124-jetson-tk1.dts
arch/arm/boot/dts/tegra124-nyan-big.dts
arch/arm/boot/dts/tegra124-venice2.dts
arch/arm/boot/dts/tegra124.dtsi
arch/arm/boot/dts/tegra20-harmony.dts
arch/arm/boot/dts/tegra20-iris-512.dts
arch/arm/boot/dts/tegra20-medcom-wide.dts
arch/arm/boot/dts/tegra20-paz00.dts
arch/arm/boot/dts/tegra20-seaboard.dts
arch/arm/boot/dts/tegra20-tamonten.dtsi
arch/arm/boot/dts/tegra20-trimslice.dts
arch/arm/boot/dts/tegra20-ventana.dts
arch/arm/boot/dts/tegra20-whistler.dts
arch/arm/boot/dts/tegra20.dtsi
arch/arm/boot/dts/tegra30-apalis-eval.dts
arch/arm/boot/dts/tegra30-beaver.dts
arch/arm/boot/dts/tegra30-cardhu.dtsi
arch/arm/boot/dts/tegra30-colibri-eval-v3.dts
arch/arm/boot/dts/tegra30.dtsi
arch/arm/configs/davinci_all_defconfig
arch/arm/configs/exynos_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/include/asm/thread_info.h
arch/arm/kernel/traps.c
arch/arm/kvm/mmu.c
arch/arm/mach-mvebu/coherency.c
arch/arm/mach-shmobile/clock-r8a7740.c
arch/arm/mach-shmobile/clock-r8a7790.c
arch/arm/mach-shmobile/setup-sh73a0.c
arch/arm/mach-tegra/irq.c
arch/arm/mm/proc-v7.S
arch/arm/mm/proc-xscale.S
arch/arm64/kvm/sys_regs.c
arch/ia64/kvm/kvm-ia64.c
arch/mips/Kconfig
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/r4kcache.h
arch/mips/include/asm/uaccess.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/kernel/bmips_vec.S
arch/mips/kernel/cps-vec.S
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/rtlx.c
arch/mips/kernel/setup.c
arch/mips/kernel/signal.c
arch/mips/loongson/common/Makefile
arch/mips/mm/tlbex.c
arch/mips/mti-sead3/sead3-leds.c
arch/mips/netlogic/xlp/Makefile
arch/powerpc/configs/85xx/ge_imp3a_defconfig
arch/powerpc/configs/86xx/gef_ppc9a_defconfig
arch/powerpc/configs/86xx/gef_sbc310_defconfig
arch/powerpc/configs/86xx/gef_sbc610_defconfig
arch/powerpc/configs/86xx/sbc8641d_defconfig
arch/powerpc/configs/c2k_defconfig
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/ppc64e_defconfig
arch/powerpc/configs/ppc6xx_defconfig
arch/powerpc/configs/pseries_defconfig
arch/powerpc/configs/pseries_le_defconfig
arch/powerpc/include/asm/pci-bridge.h
arch/powerpc/kernel/eeh_sysfs.c
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/vdso32/getcpu.S
arch/powerpc/platforms/powernv/opal-hmi.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/pci.c
arch/powerpc/platforms/pseries/msi.c
arch/powerpc/xmon/xmon.c
arch/sparc/include/asm/dma-mapping.h
arch/tile/configs/tilegx_defconfig
arch/tile/configs/tilepro_defconfig
arch/x86/include/asm/page_32_types.h
arch/x86/include/asm/page_64_types.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/traps.h
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/entry_64.S
arch/x86/kernel/traps.c
arch/x86/kvm/mmu.c
crypto/algif_hash.c
crypto/algif_skcipher.c
drivers/atm/solos-pci.c
drivers/clk/at91/clk-usb.c
drivers/clk/clk-divider.c
drivers/clk/pxa/clk-pxa27x.c
drivers/clk/qcom/mmcc-apq8084.c
drivers/clk/rockchip/clk.c
drivers/clocksource/sun4i_timer.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/hwmon/g762.c
drivers/iio/accel/bmc150-accel.c
drivers/iio/accel/kxcjk-1013.c
drivers/iio/adc/men_z188_adc.c
drivers/iio/gyro/bmg160.c
drivers/input/joystick/xpad.c
drivers/input/mouse/elantech.c
drivers/input/mouse/synaptics.c
drivers/irqchip/irq-atmel-aic-common.c
drivers/irqchip/irq-bcm7120-l2.c
drivers/irqchip/irq-brcmstb-l2.c
drivers/isdn/mISDN/socket.c
drivers/net/Kconfig
drivers/net/dsa/bcm_sf2.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h [new file with mode: 0644]
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/Makefile
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h [new file with mode: 0644]
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c [new file with mode: 0644]
drivers/net/ethernet/intel/ixgbevf/defines.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/vf.c
drivers/net/ethernet/intel/ixgbevf/vf.h
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/rocker/Kconfig [new file with mode: 0644]
drivers/net/ethernet/rocker/Makefile [new file with mode: 0644]
drivers/net/ethernet/rocker/rocker.c [new file with mode: 0644]
drivers/net/ethernet/rocker/rocker.h [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/hyperv/netvsc.c
drivers/net/ipvlan/ipvlan.h
drivers/net/ipvlan/ipvlan_main.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/ppp/pppoe.c
drivers/net/tun.c
drivers/net/vxlan.c
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
drivers/net/xen-netback/xenbus.c
drivers/pci/msi.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/csiostor/csio_hw_chip.h
drivers/scsi/csiostor/csio_init.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/ufs/ufshcd-pltfrm.c
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/spi/spi-dw.c
drivers/spi/spi-sirf.c
drivers/spi/spi.c
drivers/staging/rtl8188eu/core/rtw_cmd.c
drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
drivers/staging/rtl8188eu/core/rtw_wlan_util.c
drivers/staging/rtl8188eu/os_dep/usb_intf.c
drivers/thermal/cpu_cooling.c
drivers/thermal/samsung/exynos_thermal_common.c
drivers/thermal/st/st_thermal.c
drivers/tty/serial/of_serial.c
drivers/usb/core/quirks.c
drivers/usb/dwc3/ep0.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/keyspan.c
drivers/usb/serial/ssu100.c
drivers/usb/storage/unusual_uas.h
fs/Makefile
fs/aio.c
fs/btrfs/ctree.c
fs/btrfs/locking.c
fs/btrfs/locking.h
fs/dcache.c
fs/isofs/inode.c
fs/nfsd/nfs4callback.c
fs/nfsd/nfsd.h
fs/overlayfs/Kconfig
fs/overlayfs/Makefile
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/readdir.c
fs/overlayfs/super.c
include/dt-bindings/clock/qcom,mmcc-apq8084.h
include/linux/clk-provider.h
include/linux/if_bridge.h
include/linux/iio/events.h
include/linux/kvm_host.h
include/linux/netdevice.h
include/linux/pci.h
include/linux/percpu-refcount.h
include/linux/rtnetlink.h
include/linux/skbuff.h
include/net/af_vsock.h
include/net/checksum.h
include/net/inet_common.h
include/net/ipx.h
include/net/sctp/sm.h
include/net/sctp/structs.h
include/net/switchdev.h [new file with mode: 0644]
include/sound/pcm.h
include/uapi/linux/if_bridge.h
include/uapi/linux/if_link.h
include/uapi/linux/neighbour.h
include/uapi/sound/asound.h
kernel/events/uprobes.c
kernel/sched/idle_task.c
kernel/sched/stop_task.c
net/Kconfig
net/Makefile
net/appletalk/ddp.c
net/atm/common.c
net/ax25/af_ax25.c
net/bluetooth/hci_sock.c
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bridge/br_fdb.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_stp.c
net/caif/caif_socket.c
net/can/bcm.c
net/can/raw.c
net/core/datagram.c
net/core/dev.c
net/core/net-sysfs.c
net/core/rtnetlink.c
net/dccp/proto.c
net/decnet/af_decnet.c
net/ieee802154/dgram.c
net/ieee802154/raw.c
net/ipv4/af_inet.c
net/ipv4/fou.c
net/ipv4/ip_vti.c
net/ipv4/ping.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_udp_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/raw.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipx/af_ipx.c
net/ipx/ipx_route.c
net/irda/af_irda.c
net/iucv/af_iucv.c
net/key/af_key.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ppp.c
net/llc/af_llc.c
net/netfilter/nf_conntrack_core.c
net/netlink/af_netlink.c
net/netrom/af_netrom.c
net/nfc/llcp_commands.c
net/nfc/rawsock.c
net/packet/af_packet.c
net/phonet/datagram.c
net/phonet/pep.c
net/rds/ib.h
net/rds/ib_recv.c
net/rds/iw.h
net/rds/iw_recv.c
net/rds/message.c
net/rds/rds.h
net/rds/recv.c
net/rds/send.c
net/rds/tcp.h
net/rds/tcp_recv.c
net/rose/af_rose.c
net/sched/sch_fq.c
net/sctp/chunk.c
net/sctp/sm_make_chunk.c
net/sctp/socket.c
net/sunrpc/svcsock.c
net/switchdev/Kconfig [new file with mode: 0644]
net/switchdev/Makefile [new file with mode: 0644]
net/switchdev/switchdev.c [new file with mode: 0644]
net/tipc/Makefile
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/bearer.h
net/tipc/core.h
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_distr.c
net/tipc/name_distr.h
net/tipc/name_table.c
net/tipc/name_table.h
net/tipc/node.c
net/tipc/node.h
net/tipc/node_subscr.c [deleted file]
net/tipc/node_subscr.h [deleted file]
net/tipc/socket.c
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/vmci_transport.c
net/x25/af_x25.c
sound/core/pcm.c
sound/core/pcm_misc.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_priv.h
sound/pci/hda/patch_realtek.c
sound/usb/mixer_quirks.c
sound/usb/quirks.c
virt/kvm/arm/vgic.c
virt/kvm/kvm_main.c

index e1b2e785bba80ea468e6da0a3b055f39a0fce9d8..beb8ec4dabbc648dd629e16a076662c9b78ff529 100644 (file)
@@ -216,3 +216,11 @@ Contact:   netdev@vger.kernel.org
 Description:
                Indicates the interface protocol type as a decimal value. See
                include/uapi/linux/if_arp.h for all possible values.
+
+What:          /sys/class/net/<iface>/phys_switch_id
+Date:          November 2014
+KernelVersion: 3.19
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the unique physical switch identifier of a switch this
+               port belongs to, as a string.
index 530850a72735ac6c47c46a51e71b84ca427cee5d..a27c950ece61b0d312fbba2a3757fab71c5b3616 100644 (file)
@@ -64,7 +64,7 @@ is formed.
 At mount time, the two directories given as mount options "lowerdir" and
 "upperdir" are combined into a merged directory:
 
-  mount -t overlayfs overlayfs -olowerdir=/lower,upperdir=/upper,\
+  mount -t overlay overlay -olowerdir=/lower,upperdir=/upper,\
 workdir=/work /merged
 
 The "workdir" needs to be an empty directory on the same filesystem
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
new file mode 100644 (file)
index 0000000..f981a92
--- /dev/null
@@ -0,0 +1,59 @@
+Switch (and switch-ish) device drivers HOWTO
+===========================
+
+Please note that the word "switch" is here used in very generic meaning.
+This include devices supporting L2/L3 but also various flow offloading chips,
+including switches embedded into SR-IOV NICs.
+
+Lets describe a topology a bit. Imagine the following example:
+
+       +----------------------------+    +---------------+
+       |     SOME switch chip       |    |      CPU      |
+       +----------------------------+    +---------------+
+       port1 port2 port3 port4 MNGMNT    |     PCI-E     |
+         |     |     |     |     |       +---------------+
+        PHY   PHY    |     |     |         |  NIC0 NIC1
+                     |     |     |         |   |    |
+                     |     |     +- PCI-E -+   |    |
+                     |     +------- MII -------+    |
+                     +------------- MII ------------+
+
+In this example, there are two independent lines between the switch silicon
+and CPU. NIC0 and NIC1 drivers are not aware of a switch presence. They are
+separate from the switch driver. SOME switch chip is by managed by a driver
+via PCI-E device MNGMNT. Note that MNGMNT device, NIC0 and NIC1 may be
+connected to some other type of bus.
+
+Now, for the previous example show the representation in kernel:
+
+       +----------------------------+    +---------------+
+       |     SOME switch chip       |    |      CPU      |
+       +----------------------------+    +---------------+
+       sw0p0 sw0p1 sw0p2 sw0p3 MNGMNT    |     PCI-E     |
+         |     |     |     |     |       +---------------+
+        PHY   PHY    |     |     |         |  eth0 eth1
+                     |     |     |         |   |    |
+                     |     |     +- PCI-E -+   |    |
+                     |     +------- MII -------+    |
+                     +------------- MII ------------+
+
+Lets call the example switch driver for SOME switch chip "SOMEswitch". This
+driver takes care of PCI-E device MNGMNT. There is a netdevice instance sw0pX
+created for each port of a switch. These netdevices are instances
+of "SOMEswitch" driver. sw0pX netdevices serve as a "representation"
+of the switch chip. eth0 and eth1 are instances of some other existing driver.
+
+The only difference of the switch-port netdevice from the ordinary netdevice
+is that is implements couple more NDOs:
+
+  ndo_switch_parent_id_get - This returns the same ID for two port netdevices
+                            of the same physical switch chip. This is
+                            mandatory to be implemented by all switch drivers
+                            and serves the caller for recognition of a port
+                            netdevice.
+  ndo_switch_parent_* - Functions that serve for a manipulation of the switch
+                       chip itself (it can be though of as a "parent" of the
+                       port, therefore the name). They are not port-specific.
+                       Caller might use arbitrary port netdevice of the same
+                       switch and it will make no difference.
+  ndo_switch_port_* - Functions that serve for a port-specific manipulation.
index 412f45ca2d73e3cd31a487e5fe13b73fc552d9f5..1d6d02d6ba52b531642436e8a6d8c0640af5467d 100644 (file)
@@ -136,7 +136,7 @@ SOF_TIMESTAMPING_OPT_ID:
 
   This option is implemented only for transmit timestamps. There, the
   timestamp is always looped along with a struct sock_extended_err.
-  The option modifies field ee_info to pass an id that is unique
+  The option modifies field ee_data to pass an id that is unique
   among all possibly concurrently outstanding timestamp requests for
   that socket. In practice, it is a monotonically increasing u32
   (that wraps).
index a545d68af54cad334ebcd16527aaedeafdafc74e..c95153559ed2073ebbb96f895dd15dd02c729dfb 100644 (file)
@@ -6908,11 +6908,12 @@ F:      drivers/scsi/osd/
 F:     include/scsi/osd_*
 F:     fs/exofs/
 
-OVERLAYFS FILESYSTEM
+OVERLAY FILESYSTEM
 M:     Miklos Szeredi <miklos@szeredi.hu>
-L:     linux-fsdevel@vger.kernel.org
+L:     linux-unionfs@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs.git
 S:     Supported
-F:     fs/overlayfs/*
+F:     fs/overlayfs/
 F:     Documentation/filesystems/overlayfs.txt
 
 P54 WIRELESS DRIVER
@@ -7864,6 +7865,13 @@ F:       drivers/hid/hid-roccat*
 F:     include/linux/hid-roccat*
 F:     Documentation/ABI/*/sysfs-driver-hid-roccat*
 
+ROCKER DRIVER
+M:     Jiri Pirko <jiri@resnulli.us>
+M:     Scott Feldman <sfeldma@gmail.com>
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     drivers/net/ethernet/rocker/
+
 ROCKETPORT DRIVER
 P:     Comtrol Corp.
 W:     http://www.comtrol.com
@@ -9058,6 +9066,13 @@ F:       lib/swiotlb.c
 F:     arch/*/kernel/pci-swiotlb.c
 F:     include/linux/swiotlb.h
 
+SWITCHDEV
+M:     Jiri Pirko <jiri@resnulli.us>
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     net/switchdev/
+F:     include/net/switchdev.h
+
 SYNOPSYS ARC ARCHITECTURE
 M:     Vineet Gupta <vgupta@synopsys.com>
 S:     Supported
index 00d618bbe8e7588a8c36ed0ba0371b219e8d8543..2fd5c4e5c139b60d28344cc62d117bd9df7f7d09 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 18
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
 NAME = Diseased Newt
 
 # *DOCUMENTATION*
index e51fcef884a43d629ab12132e549cc8a0b731405..60429ad1c5d8451c1e481e0190ad5d312dc54522 100644 (file)
        num-cs = <1>;
 };
 
+&usbdrd_dwc3 {
+       dr_mode = "host";
+};
+
 #include "cros-ec-keyboard.dtsi"
index f21b9aa00fbb214f4dee8b0b1980884f5411c70f..d55c1a2eb798966340325afbb5c3009c8bcef28c 100644 (file)
                #size-cells = <1>;
                ranges;
 
-               dwc3 {
+               usbdrd_dwc3: dwc3 {
                        compatible = "synopsys,dwc3";
                        reg = <0x12000000 0x10000>;
                        interrupts = <0 72 0>;
index d46c213a17ad5de43972fd4f7b28beda61b53347..eed697a6bd6bb290ca16f2536cf04651d56c2d15 100644 (file)
                        clocks = <&cpg_clocks R8A7740_CLK_S>,
                                 <&cpg_clocks R8A7740_CLK_S>, <&sub_clk>,
                                 <&cpg_clocks R8A7740_CLK_B>,
-                                <&sub_clk>, <&sub_clk>,
+                                <&cpg_clocks R8A7740_CLK_HPP>, <&sub_clk>,
                                 <&cpg_clocks R8A7740_CLK_B>;
                        #clock-cells = <1>;
                        renesas,clock-indices = <
index d0e17733dc1a340608b10b3f4f595e93ec53d45d..e20affe156c1c3349be801c446b48f944bcc5e98 100644 (file)
                        #clock-cells = <0>;
                        clock-output-names = "sd2";
                };
-               sd3_clk: sd3_clk@e615007c {
+               sd3_clk: sd3_clk@e615026c {
                        compatible = "renesas,r8a7790-div6-clock", "renesas,cpg-div6-clock";
-                       reg = <0 0xe615007c 0 4>;
+                       reg = <0 0xe615026c 0 4>;
                        clocks = <&pll1_div2_clk>;
                        #clock-cells = <0>;
                        clock-output-names = "sd3";
index 543f895d18d3c870446f52085095221566ef83a3..2e652e2339e9a1caf9c4abead9d7b958cc7515a1 100644 (file)
                        clocks = <&ahb1_gates 6>;
                        resets = <&ahb1_rst 6>;
                        #dma-cells = <1>;
+
+                       /* DMA controller requires AHB1 clocked from PLL6 */
+                       assigned-clocks = <&ahb1_mux>;
+                       assigned-clock-parents = <&pll6>;
                };
 
                mmc0: mmc@01c0f000 {
index 5c21d216515a3d5a06b76a4b449cc72ef208b6af..8b7aa0dcdc6ee4d4a04100650c1ac33cebcd6762 100644 (file)
@@ -15,6 +15,7 @@
        aliases {
                rtc0 = "/i2c@7000d000/tps65913@58";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uartd;
        };
 
        memory {
index c7c6825f11fbb1b902b6f230eb62d8d8f88077a6..38acf78d7815fab2ab14842503a4bf79c040f9ed 100644 (file)
                linux,initrd-end = <0x82800000>;
        };
 
+       aliases {
+               serial0 = &uartd;
+       };
+
        firmware {
                trusted-foundations {
                        compatible = "tlm,trusted-foundations";
                                                regulator-name = "vddio-sdmmc3";
                                                regulator-min-microvolt = <1800000>;
                                                regulator-max-microvolt = <3300000>;
-                                               regulator-always-on;
-                                               regulator-boot-on;
                                        };
 
                                        ldousb {
        sdhci@78000400 {
                status = "okay";
                bus-width = <4>;
-               vmmc-supply = <&vddio_sdmmc3>;
+               vqmmc-supply = <&vddio_sdmmc3>;
                cd-gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_LOW>;
                power-gpios = <&gpio TEGRA_GPIO(H, 0) GPIO_ACTIVE_HIGH>;
        };
        sdhci@78000600 {
                status = "okay";
                bus-width = <8>;
-               vmmc-supply = <&vdd_1v8>;
                non-removable;
        };
 
index 96366214563542479db657f80b12a28918e30824..f91c2c9b2f9431aef1e1611ba2eaf064a40aed7d 100644 (file)
                linux,initrd-end = <0x82800000>;
        };
 
+       aliases {
+               serial0 = &uartd;
+       };
+
        firmware {
                trusted-foundations {
                        compatible = "tlm,trusted-foundations";
        sdhci@78000600 {
                status = "okay";
                bus-width = <8>;
-               vmmc-supply = <&vdd_1v8>;
                non-removable;
        };
 
index 2ca9c1807f72374bb176aada1203f6d9bc88e220..222f3b3f4dd5c4f852259349228db76e89c7ac27 100644 (file)
@@ -9,13 +9,6 @@
        compatible = "nvidia,tegra114";
        interrupt-parent = <&gic>;
 
-       aliases {
-               serial0 = &uarta;
-               serial1 = &uartb;
-               serial2 = &uartc;
-               serial3 = &uartd;
-       };
-
        host1x@50000000 {
                compatible = "nvidia,tegra114-host1x", "simple-bus";
                reg = <0x50000000 0x00028000>;
index 029c9a0215413355d3ce7c081a3ac5bd00fcd3d7..51b373ff106555edf302c9066809ea4abbd25c5c 100644 (file)
@@ -10,6 +10,7 @@
        aliases {
                rtc0 = "/i2c@0,7000d000/pmic@40";
                rtc1 = "/rtc@0,7000e000";
+               serial0 = &uartd;
        };
 
        memory {
index 7d0784ce4c748183083fa4ab5947b9adfb083d72..53181d31024713796897f5980cf9994339eb691f 100644 (file)
@@ -10,6 +10,7 @@
        aliases {
                rtc0 = "/i2c@0,7000d000/pmic@40";
                rtc1 = "/rtc@0,7000e000";
+               serial0 = &uarta;
        };
 
        memory {
index 13008858e96754dda8de3f838add6e0b6d4fb911..5c3f7813360d2a59bffcc463f059b2047440fabf 100644 (file)
@@ -10,6 +10,7 @@
        aliases {
                rtc0 = "/i2c@0,7000d000/pmic@40";
                rtc1 = "/rtc@0,7000e000";
+               serial0 = &uarta;
        };
 
        memory {
index 478c555ebd96bd0897bae7d3d0e7fa368573cba5..df2b06b299851a85533243a96126e49e43813066 100644 (file)
         * the APB DMA based serial driver, the comptible is
         * "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart".
         */
-       serial@0,70006000 {
+       uarta: serial@0,70006000 {
                compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
                reg = <0x0 0x70006000 0x0 0x40>;
                reg-shift = <2>;
                status = "disabled";
        };
 
-       serial@0,70006040 {
+       uartb: serial@0,70006040 {
                compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
                reg = <0x0 0x70006040 0x0 0x40>;
                reg-shift = <2>;
                status = "disabled";
        };
 
-       serial@0,70006200 {
+       uartc: serial@0,70006200 {
                compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
                reg = <0x0 0x70006200 0x0 0x40>;
                reg-shift = <2>;
                status = "disabled";
        };
 
-       serial@0,70006300 {
+       uartd: serial@0,70006300 {
                compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
                reg = <0x0 0x70006300 0x0 0x40>;
                reg-shift = <2>;
index a37279af687c6a436ba5308c6139a5c3c8be9014..b926a07b944303fb24468d6899bc9324c7c956bb 100644 (file)
@@ -10,6 +10,7 @@
        aliases {
                rtc0 = "/i2c@7000d000/tps6586x@34";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uartd;
        };
 
        memory {
index 8cfb83f42e1fd87ff608b171a2c404942975b0ff..1dd7d7bfdfcc25e3d3fcb35e6f17c4ccb95b2b5f 100644 (file)
@@ -6,6 +6,11 @@
        model = "Toradex Colibri T20 512MB on Iris";
        compatible = "toradex,iris", "toradex,colibri_t20-512", "nvidia,tegra20";
 
+       aliases {
+               serial0 = &uarta;
+               serial1 = &uartd;
+       };
+
        host1x@50000000 {
                hdmi@54280000 {
                        status = "okay";
index 1b7c56b33acae6f2c6c4b1da3154d6c92aebb96a..9b87526ab0b70fad25125a4f5764d418243aa50c 100644 (file)
@@ -6,6 +6,10 @@
        model = "Avionic Design Medcom-Wide board";
        compatible = "ad,medcom-wide", "ad,tamonten", "nvidia,tegra20";
 
+       aliases {
+               serial0 = &uartd;
+       };
+
        pwm@7000a000 {
                status = "okay";
        };
index d4438e30de456c70047457f6ee974ac31a6f686f..ed7e1009326cd748628c5422849bfffad2a3b21e 100644 (file)
@@ -10,6 +10,8 @@
        aliases {
                rtc0 = "/i2c@7000d000/tps6586x@34";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uarta;
+               serial1 = &uartc;
        };
 
        memory {
index a1d4bf9895d74c8b0efb7d2fe948a9fcd602b25e..ea282c7c0ca5645394a28e313fbad1deac339882 100644 (file)
@@ -10,6 +10,7 @@
        aliases {
                rtc0 = "/i2c@7000d000/tps6586x@34";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uartd;
        };
 
        memory {
index 80e7d386ce3452e3776e70771233eea336a81e98..13d4e6185275f43c3f74fbeb0397d97b0348f9fb 100644 (file)
@@ -7,6 +7,7 @@
        aliases {
                rtc0 = "/i2c@7000d000/tps6586x@34";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uartd;
        };
 
        memory {
index 5ad87979ab13ff68527f3b6c8a1ba9bb51e2bda5..d99af4ef9c6444f73e7044c7447557fbca4b4ee1 100644 (file)
@@ -10,6 +10,7 @@
        aliases {
                rtc0 = "/i2c@7000c500/rtc@56";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uarta;
        };
 
        memory {
index ca8484cccddccc32313649e1abfd644b527f50a3..04c58e9ca490bb8bf205b4608d371bbecf5f3f61 100644 (file)
@@ -10,6 +10,7 @@
        aliases {
                rtc0 = "/i2c@7000d000/tps6586x@34";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uartd;
        };
 
        memory {
index 1843725785c90f1f2518bade455af7dead4c6ec8..340d81108df1a232fcefe64c81bdb9372a4c814c 100644 (file)
@@ -10,6 +10,7 @@
        aliases {
                rtc0 = "/i2c@7000d000/max8907@3c";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uarta;
        };
 
        memory {
index 3b374c49d04d962478aebc62799bcce5b31aa6dd..8acf5d85c99da5b0077f6ce1b80c2ba08d45865c 100644 (file)
@@ -9,14 +9,6 @@
        compatible = "nvidia,tegra20";
        interrupt-parent = <&intc>;
 
-       aliases {
-               serial0 = &uarta;
-               serial1 = &uartb;
-               serial2 = &uartc;
-               serial3 = &uartd;
-               serial4 = &uarte;
-       };
-
        host1x@50000000 {
                compatible = "nvidia,tegra20-host1x", "simple-bus";
                reg = <0x50000000 0x00024000>;
index 45d40f024585d95a53928e36d351db05aa514592..6236bdecb48ba08891896f6967199aba6e2a6680 100644 (file)
                rtc0 = "/i2c@7000c000/rtc@68";
                rtc1 = "/i2c@7000d000/tps65911@2d";
                rtc2 = "/rtc@7000e000";
+               serial0 = &uarta;
+               serial1 = &uartb;
+               serial2 = &uartc;
+               serial3 = &uartd;
        };
 
        pcie-controller@00003000 {
index cee8f2246fdb2467fbde27bae612a0752d7f6da2..6b157eeabcc5c9b07009c4d91a291122b3732887 100644 (file)
@@ -9,6 +9,7 @@
        aliases {
                rtc0 = "/i2c@7000d000/tps65911@2d";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uarta;
        };
 
        memory {
index 20637954624425795453cc78699e674276de5430..a1b682ea01bd70ab94025cd12a4d5205d45f9db7 100644 (file)
@@ -30,6 +30,8 @@
        aliases {
                rtc0 = "/i2c@7000d000/tps65911@2d";
                rtc1 = "/rtc@7000e000";
+               serial0 = &uarta;
+               serial1 = &uartc;
        };
 
        memory {
index 7793abd5bef132388100e8a0c1aa4ca79e7ed803..4d3ddc58564126433410c17b5c3ef569532a9aa1 100644 (file)
@@ -10,6 +10,9 @@
                rtc0 = "/i2c@7000c000/rtc@68";
                rtc1 = "/i2c@7000d000/tps65911@2d";
                rtc2 = "/rtc@7000e000";
+               serial0 = &uarta;
+               serial1 = &uartb;
+               serial2 = &uartd;
        };
 
        host1x@50000000 {
index aa6ccea13d308036e853b58bafe840d7c3450e87..b270b9e3d4554407157be95cadd71c9b5e902eeb 100644 (file)
@@ -9,14 +9,6 @@
        compatible = "nvidia,tegra30";
        interrupt-parent = <&intc>;
 
-       aliases {
-               serial0 = &uarta;
-               serial1 = &uartb;
-               serial2 = &uartc;
-               serial3 = &uartd;
-               serial4 = &uarte;
-       };
-
        pcie-controller@00003000 {
                compatible = "nvidia,tegra30-pcie";
                device_type = "pci";
index f95f72d62db73de315e4688953ae8ebb285412c4..759f9b0053e294a0cee0f9ce26f63974daa84850 100644 (file)
@@ -97,7 +97,6 @@ CONFIG_PPP_ASYNC=m
 CONFIG_PPP_SYNC_TTY=m
 CONFIG_PPP_DEFLATE=m
 CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
 # CONFIG_INPUT_MOUSEDEV is not set
 CONFIG_INPUT_EVDEV=m
 CONFIG_INPUT_EVBUG=m
index 72058b8a6f4d4ccce4a8e5a740f82ba0321ef561..e21ef830a48365a06db80d0127fa5a3f55f17f71 100644 (file)
@@ -142,11 +142,13 @@ CONFIG_MMC_DW_IDMAC=y
 CONFIG_MMC_DW_EXYNOS=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_MAX77802=y
 CONFIG_RTC_DRV_S5M=y
 CONFIG_RTC_DRV_S3C=y
 CONFIG_DMADEVICES=y
 CONFIG_PL330_DMA=y
 CONFIG_COMMON_CLK_MAX77686=y
+CONFIG_COMMON_CLK_MAX77802=y
 CONFIG_COMMON_CLK_S2MPS11=y
 CONFIG_EXYNOS_IOMMU=y
 CONFIG_IIO=y
index 3487046d8a7844b68bc2365c05dfc500e3fca2c9..9d7a32f93fcf2e93a66b42b3352b7b3119d9ec0e 100644 (file)
@@ -217,6 +217,7 @@ CONFIG_I2C_CADENCE=y
 CONFIG_I2C_DESIGNWARE_PLATFORM=y
 CONFIG_I2C_EXYNOS5=y
 CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_S3C2410=y
 CONFIG_I2C_SIRF=y
 CONFIG_I2C_TEGRA=y
 CONFIG_I2C_ST=y
index fc44d3761f9e7d36eb8ff4911ff0120a63e7584f..ce73ab6354149f8c490319bdeb6acdbc92cd784c 100644 (file)
@@ -44,16 +44,6 @@ struct cpu_context_save {
        __u32   extra[2];               /* Xscale 'acc' register, etc */
 };
 
-struct arm_restart_block {
-       union {
-               /* For user cache flushing */
-               struct {
-                       unsigned long start;
-                       unsigned long end;
-               } cache;
-       };
-};
-
 /*
  * low level task data that entry.S needs immediate access to.
  * __switch_to() assumes cpu_context follows immediately after cpu_domain.
@@ -79,7 +69,6 @@ struct thread_info {
        unsigned long           thumbee_state;  /* ThumbEE Handler Base register */
 #endif
        struct restart_block    restart_block;
-       struct arm_restart_block        arm_restart_block;
 };
 
 #define INIT_THREAD_INFO(tsk)                                          \
index 0c8b10801d36ad6a25806892ea283c7b93d28f61..9f5d81881eb6da9bdd599a7321cf83fbdcb0a0e2 100644 (file)
@@ -533,8 +533,6 @@ static int bad_syscall(int n, struct pt_regs *regs)
        return regs->ARM_r0;
 }
 
-static long do_cache_op_restart(struct restart_block *);
-
 static inline int
 __do_cache_op(unsigned long start, unsigned long end)
 {
@@ -543,24 +541,8 @@ __do_cache_op(unsigned long start, unsigned long end)
        do {
                unsigned long chunk = min(PAGE_SIZE, end - start);
 
-               if (signal_pending(current)) {
-                       struct thread_info *ti = current_thread_info();
-
-                       ti->restart_block = (struct restart_block) {
-                               .fn     = do_cache_op_restart,
-                       };
-
-                       ti->arm_restart_block = (struct arm_restart_block) {
-                               {
-                                       .cache = {
-                                               .start  = start,
-                                               .end    = end,
-                                       },
-                               },
-                       };
-
-                       return -ERESTART_RESTARTBLOCK;
-               }
+               if (fatal_signal_pending(current))
+                       return 0;
 
                ret = flush_cache_user_range(start, start + chunk);
                if (ret)
@@ -573,15 +555,6 @@ __do_cache_op(unsigned long start, unsigned long end)
        return 0;
 }
 
-static long do_cache_op_restart(struct restart_block *unused)
-{
-       struct arm_restart_block *restart_block;
-
-       restart_block = &current_thread_info()->arm_restart_block;
-       return __do_cache_op(restart_block->cache.start,
-                            restart_block->cache.end);
-}
-
 static inline int
 do_cache_op(unsigned long start, unsigned long end, int flags)
 {
index 57a403a5c22bf9e174ec88a0377b4ab07c3b0a29..8664ff17cbbeaf531b03174e1524cc00a6e86849 100644 (file)
@@ -197,7 +197,8 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
        pgd = pgdp + pgd_index(addr);
        do {
                next = kvm_pgd_addr_end(addr, end);
-               unmap_puds(kvm, pgd, addr, next);
+               if (!pgd_none(*pgd))
+                       unmap_puds(kvm, pgd, addr, next);
        } while (pgd++, addr = next, addr != end);
 }
 
@@ -834,6 +835,11 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
        return kvm_vcpu_dabt_iswrite(vcpu);
 }
 
+static bool kvm_is_device_pfn(unsigned long pfn)
+{
+       return !pfn_valid(pfn);
+}
+
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                          struct kvm_memory_slot *memslot, unsigned long hva,
                          unsigned long fault_status)
@@ -904,7 +910,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (is_error_pfn(pfn))
                return -EFAULT;
 
-       if (kvm_is_mmio_pfn(pfn))
+       if (kvm_is_device_pfn(pfn))
                mem_type = PAGE_S2_DEVICE;
 
        spin_lock(&kvm->mmu_lock);
index 2bdc3233abe2bcc78c527bf8efe4b0032a5880dc..044b51185fccb2e68c1f89c4efb3822704d28488 100644 (file)
@@ -400,6 +400,8 @@ int __init coherency_init(void)
                 type == COHERENCY_FABRIC_TYPE_ARMADA_380)
                armada_375_380_coherency_init(np);
 
+       of_node_put(np);
+
        return 0;
 }
 
index 0794f0426e7044810ec46d695c4a9e0c87abd38b..19df9cb304952a1ac1c9366120885871ffd4812a 100644 (file)
@@ -455,7 +455,7 @@ enum {
        MSTP128, MSTP127, MSTP125,
        MSTP116, MSTP111, MSTP100, MSTP117,
 
-       MSTP230,
+       MSTP230, MSTP229,
        MSTP222,
        MSTP218, MSTP217, MSTP216, MSTP214,
        MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
@@ -474,11 +474,12 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP127] = SH_CLK_MSTP32(&div4_clks[DIV4_S],   SMSTPCR1, 27, 0), /* CEU20 */
        [MSTP125] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
        [MSTP117] = SH_CLK_MSTP32(&div4_clks[DIV4_B],   SMSTPCR1, 17, 0), /* LCDC1 */
-       [MSTP116] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
+       [MSTP116] = SH_CLK_MSTP32(&div4_clks[DIV4_HPP], SMSTPCR1, 16, 0), /* IIC0 */
        [MSTP111] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 11, 0), /* TMU1 */
        [MSTP100] = SH_CLK_MSTP32(&div4_clks[DIV4_B],   SMSTPCR1,  0, 0), /* LCDC0 */
 
        [MSTP230] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 30, 0), /* SCIFA6 */
+       [MSTP229] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],  SMSTPCR2, 29, 0), /* INTCA */
        [MSTP222] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 22, 0), /* SCIFA7 */
        [MSTP218] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],  SMSTPCR2, 18, 0), /* DMAC1 */
        [MSTP217] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],  SMSTPCR2, 17, 0), /* DMAC2 */
@@ -575,6 +576,10 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("sh-dma-engine.0",        &mstp_clks[MSTP218]),
        CLKDEV_DEV_ID("sh-sci.7",               &mstp_clks[MSTP222]),
        CLKDEV_DEV_ID("e6cd0000.serial",        &mstp_clks[MSTP222]),
+       CLKDEV_DEV_ID("renesas_intc_irqpin.0",  &mstp_clks[MSTP229]),
+       CLKDEV_DEV_ID("renesas_intc_irqpin.1",  &mstp_clks[MSTP229]),
+       CLKDEV_DEV_ID("renesas_intc_irqpin.2",  &mstp_clks[MSTP229]),
+       CLKDEV_DEV_ID("renesas_intc_irqpin.3",  &mstp_clks[MSTP229]),
        CLKDEV_DEV_ID("sh-sci.6",               &mstp_clks[MSTP230]),
        CLKDEV_DEV_ID("e6cc0000.serial",        &mstp_clks[MSTP230]),
 
index 126ddafad5265dc62793fd6e7f25aea16b7c42e1..f62265200592f2915ec01dec37f420afb2495f2e 100644 (file)
@@ -68,7 +68,7 @@
 
 #define SDCKCR         0xE6150074
 #define SD2CKCR                0xE6150078
-#define SD3CKCR                0xE615007C
+#define SD3CKCR                0xE615026C
 #define MMC0CKCR       0xE6150240
 #define MMC1CKCR       0xE6150244
 #define SSPCKCR                0xE6150248
index b7bd8e50966879608cde0e5c152cefb12185d9f1..328657d011d5108d1743823d75ba031488eb39eb 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/of_platform.h>
 #include <linux/delay.h>
 #include <linux/input.h>
+#include <linux/i2c/i2c-sh_mobile.h>
 #include <linux/io.h>
 #include <linux/serial_sci.h>
 #include <linux/sh_dma.h>
@@ -192,11 +193,18 @@ static struct resource i2c4_resources[] = {
        },
 };
 
+static struct i2c_sh_mobile_platform_data i2c_platform_data = {
+       .clks_per_count = 2,
+};
+
 static struct platform_device i2c0_device = {
        .name           = "i2c-sh_mobile",
        .id             = 0,
        .resource       = i2c0_resources,
        .num_resources  = ARRAY_SIZE(i2c0_resources),
+       .dev            = {
+               .platform_data  = &i2c_platform_data,
+       },
 };
 
 static struct platform_device i2c1_device = {
@@ -204,6 +212,9 @@ static struct platform_device i2c1_device = {
        .id             = 1,
        .resource       = i2c1_resources,
        .num_resources  = ARRAY_SIZE(i2c1_resources),
+       .dev            = {
+               .platform_data  = &i2c_platform_data,
+       },
 };
 
 static struct platform_device i2c2_device = {
@@ -211,6 +222,9 @@ static struct platform_device i2c2_device = {
        .id             = 2,
        .resource       = i2c2_resources,
        .num_resources  = ARRAY_SIZE(i2c2_resources),
+       .dev            = {
+               .platform_data  = &i2c_platform_data,
+       },
 };
 
 static struct platform_device i2c3_device = {
@@ -218,6 +232,9 @@ static struct platform_device i2c3_device = {
        .id             = 3,
        .resource       = i2c3_resources,
        .num_resources  = ARRAY_SIZE(i2c3_resources),
+       .dev            = {
+               .platform_data  = &i2c_platform_data,
+       },
 };
 
 static struct platform_device i2c4_device = {
@@ -225,6 +242,9 @@ static struct platform_device i2c4_device = {
        .id             = 4,
        .resource       = i2c4_resources,
        .num_resources  = ARRAY_SIZE(i2c4_resources),
+       .dev            = {
+               .platform_data  = &i2c_platform_data,
+       },
 };
 
 static const struct sh_dmae_slave_config sh73a0_dmae_slaves[] = {
index da7be13aecce3cd8d12de9b64c10b6e3facf252b..ab95f5391a2b631e5cace17bbb176766e7d410bf 100644 (file)
@@ -99,42 +99,42 @@ static inline void tegra_irq_write_mask(unsigned int irq, unsigned long reg)
 
 static void tegra_mask(struct irq_data *d)
 {
-       if (d->irq < FIRST_LEGACY_IRQ)
+       if (d->hwirq < FIRST_LEGACY_IRQ)
                return;
 
-       tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_CLR);
+       tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IER_CLR);
 }
 
 static void tegra_unmask(struct irq_data *d)
 {
-       if (d->irq < FIRST_LEGACY_IRQ)
+       if (d->hwirq < FIRST_LEGACY_IRQ)
                return;
 
-       tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_SET);
+       tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IER_SET);
 }
 
 static void tegra_ack(struct irq_data *d)
 {
-       if (d->irq < FIRST_LEGACY_IRQ)
+       if (d->hwirq < FIRST_LEGACY_IRQ)
                return;
 
-       tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR);
+       tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_CLR);
 }
 
 static void tegra_eoi(struct irq_data *d)
 {
-       if (d->irq < FIRST_LEGACY_IRQ)
+       if (d->hwirq < FIRST_LEGACY_IRQ)
                return;
 
-       tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR);
+       tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_CLR);
 }
 
 static int tegra_retrigger(struct irq_data *d)
 {
-       if (d->irq < FIRST_LEGACY_IRQ)
+       if (d->hwirq < FIRST_LEGACY_IRQ)
                return 0;
 
-       tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_SET);
+       tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_SET);
 
        return 1;
 }
@@ -142,7 +142,7 @@ static int tegra_retrigger(struct irq_data *d)
 #ifdef CONFIG_PM_SLEEP
 static int tegra_set_wake(struct irq_data *d, unsigned int enable)
 {
-       u32 irq = d->irq;
+       u32 irq = d->hwirq;
        u32 index, mask;
 
        if (irq < FIRST_LEGACY_IRQ ||
index b3a947863ac7bb7e38d47b7a640d698b55a34bbc..22ac2a6fbfe373b432f43b1041ca9cf42e189837 100644 (file)
@@ -270,7 +270,6 @@ __v7_pj4b_setup:
 /* Auxiliary Debug Modes Control 1 Register */
 #define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */
 #define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */
-#define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */
 #define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */
 
 /* Auxiliary Debug Modes Control 2 Register */
@@ -293,7 +292,6 @@ __v7_pj4b_setup:
        /* Auxiliary Debug Modes Control 1 Register */
        mrc     p15, 1, r0, c15, c1, 1
        orr     r0, r0, #PJ4B_CLEAN_LINE
-       orr     r0, r0, #PJ4B_BCK_OFF_STREX
        orr     r0, r0, #PJ4B_INTER_PARITY
        bic     r0, r0, #PJ4B_STATIC_BP
        mcr     p15, 1, r0, c15, c1, 1
index 23259f104c66fd367d4663cbd4adafd240ffa50d..afa2b3c4df4a267e5a609c13e6e7d6461be85616 100644 (file)
@@ -535,7 +535,7 @@ ENTRY(cpu_xscale_do_suspend)
        mrc     p15, 0, r5, c15, c1, 0  @ CP access reg
        mrc     p15, 0, r6, c13, c0, 0  @ PID
        mrc     p15, 0, r7, c3, c0, 0   @ domain ID
-       mrc     p15, 0, r8, c1, c1, 0   @ auxiliary control reg
+       mrc     p15, 0, r8, c1, c0, 1   @ auxiliary control reg
        mrc     p15, 0, r9, c1, c0, 0   @ control reg
        bic     r4, r4, #2              @ clear frequency change bit
        stmia   r0, {r4 - r9}           @ store cp regs
@@ -552,7 +552,7 @@ ENTRY(cpu_xscale_do_resume)
        mcr     p15, 0, r6, c13, c0, 0  @ PID
        mcr     p15, 0, r7, c3, c0, 0   @ domain ID
        mcr     p15, 0, r1, c2, c0, 0   @ translation table base addr
-       mcr     p15, 0, r8, c1, c1, 0   @ auxiliary control reg
+       mcr     p15, 0, r8, c1, c0, 1   @ auxiliary control reg
        mov     r0, r9                  @ control register
        b       cpu_resume_mmu
 ENDPROC(cpu_xscale_do_resume)
index 4cc3b719208e0a8238930d44b409b2f7c2beb9f5..3d7c2df89946cc1d1606a4b3401115f10e44ab71 100644 (file)
@@ -424,6 +424,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        /* VBAR_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
          NULL, reset_val, VBAR_EL1, 0 },
+
+       /* ICC_SRE_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
+         trap_raz_wi },
+
        /* CONTEXTIDR_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
          access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
@@ -690,6 +695,10 @@ static const struct sys_reg_desc cp15_regs[] = {
        { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
        { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
        { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
+
+       /* ICC_SRE */
+       { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
+
        { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
 };
 
index ec6b9acb6bea8733a5f17bd7afe4a0a249ff4de9..dbe46f43884df183a69a2da387a941f55dbb371d 100644 (file)
@@ -1563,7 +1563,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
 
        for (i = 0; i < npages; i++) {
                pfn = gfn_to_pfn(kvm, base_gfn + i);
-               if (!kvm_is_mmio_pfn(pfn)) {
+               if (!kvm_is_reserved_pfn(pfn)) {
                        kvm_set_pmt_entry(kvm, base_gfn + i,
                                        pfn << PAGE_SHIFT,
                                _PAGE_AR_RWX | _PAGE_MA_WB);
index f43aa536c517437bc6778d6adb1d9e0212effc8c..9536ef912f594651be7e403264f3eb30c3355384 100644 (file)
@@ -2101,9 +2101,17 @@ config 64BIT_PHYS_ADDR
 config ARCH_PHYS_ADDR_T_64BIT
        def_bool 64BIT_PHYS_ADDR
 
+choice
+       prompt "SmartMIPS or microMIPS ASE support"
+
+config CPU_NEEDS_NO_SMARTMIPS_OR_MICROMIPS
+       bool "None"
+       help
+         Select this if you want neither microMIPS nor SmartMIPS support
+
 config CPU_HAS_SMARTMIPS
        depends on SYS_SUPPORTS_SMARTMIPS
-       bool "Support for the SmartMIPS ASE"
+       bool "SmartMIPS"
        help
          SmartMIPS is a extension of the MIPS32 architecture aimed at
          increased security at both hardware and software level for
@@ -2115,11 +2123,13 @@ config CPU_HAS_SMARTMIPS
 
 config CPU_MICROMIPS
        depends on SYS_SUPPORTS_MICROMIPS
-       bool "Build kernel using microMIPS ISA"
+       bool "microMIPS"
        help
          When this option is enabled the kernel will be built using the
          microMIPS ISA
 
+endchoice
+
 config CPU_HAS_MSA
        bool "Support for the MIPS SIMD Architecture (EXPERIMENTAL)"
        depends on CPU_SUPPORTS_MSA
index b46cd220a018d72deb84c6b8d5ea5f75d9e22a79..22a135ac91de3830e885342b834feb47ab109eff 100644 (file)
 #define MIPS_CONF6_SYND                (_ULCAST_(1) << 13)
 /* proAptiv FTLB on/off bit */
 #define MIPS_CONF6_FTLBEN      (_ULCAST_(1) << 15)
+/* FTLB probability bits */
+#define MIPS_CONF6_FTLBP_SHIFT (16)
 
 #define MIPS_CONF7_WII         (_ULCAST_(1) << 31)
 
index 4520adc8699b9c00835a4340c8da217e90dcc305..cd6e0afc683366e598eadbaf8c572e0434fdb9bf 100644 (file)
@@ -257,7 +257,11 @@ static inline void protected_flush_icache_line(unsigned long addr)
  */
 static inline void protected_writeback_dcache_line(unsigned long addr)
 {
+#ifdef CONFIG_EVA
+       protected_cachee_op(Hit_Writeback_Inv_D, addr);
+#else
        protected_cache_op(Hit_Writeback_Inv_D, addr);
+#endif
 }
 
 static inline void protected_writeback_scache_line(unsigned long addr)
index 133678ab4eb88cbf213d6516c21be76d06e23a22..22a5624e2fd2dcecf4b5592097e0c18620012426 100644 (file)
@@ -1422,7 +1422,7 @@ static inline long __strnlen_user(const char __user *s, long n)
 }
 
 /*
- * strlen_user: - Get the size of a string in user space.
+ * strnlen_user: - Get the size of a string in user space.
  * @str: The string to measure.
  *
  * Context: User context only. This function may sleep.
@@ -1431,9 +1431,7 @@ static inline long __strnlen_user(const char __user *s, long n)
  *
  * Returns the size of the string INCLUDING the terminating NUL.
  * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
+ * If the string is too long, returns a value greater than @n.
  */
 static inline long strnlen_user(const char __user *s, long n)
 {
index 9dc58568f230096244b5db31f01b59a106b1cb3c..d001bb1ad177e7b6e2df2fbb7e42e894784b2e91 100644 (file)
 #define __NR_seccomp                   (__NR_Linux + 316)
 #define __NR_getrandom                 (__NR_Linux + 317)
 #define __NR_memfd_create              (__NR_Linux + 318)
-#define __NR_memfd_create              (__NR_Linux + 319)
+#define __NR_bpf                       (__NR_Linux + 319)
 
 /*
  * Offset of the last N32 flavoured syscall
index 290c23b516789ba16193f7b72fa61eafbf7907b2..86495072a922f31e0214cad3b6ac71c10aaf5fa8 100644 (file)
@@ -208,7 +208,6 @@ bmips_reset_nmi_vec_end:
 END(bmips_reset_nmi_vec)
 
        .set    pop
-       .previous
 
 /***********************************************************************
  * CPU1 warm restart vector (used for second and subsequent boots).
@@ -281,5 +280,3 @@ LEAF(bmips_enable_xks01)
        jr      ra
 
 END(bmips_enable_xks01)
-
-       .previous
index e6e97d2a5c9e68cccde81ab0f181184d1e27fd13..0384b05ab5a02413cbcb11a163375029f285255f 100644 (file)
@@ -229,6 +229,7 @@ LEAF(mips_cps_core_init)
         nop
 
        .set    push
+       .set    mips32r2
        .set    mt
 
        /* Only allow 1 TC per VPE to execute... */
@@ -345,6 +346,7 @@ LEAF(mips_cps_boot_vpes)
         nop
 
        .set    push
+       .set    mips32r2
        .set    mt
 
 1:     /* Enter VPE configuration state */
index d5a4f380b019bb8c8a4f5ca1ce0229c45689cf4f..dc49cf30c2db46f9e0f2caef74548459c71f6714 100644 (file)
@@ -193,6 +193,32 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
 static char unknown_isa[] = KERN_ERR \
        "Unsupported ISA type, c0.config0: %d.";
 
+static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c)
+{
+
+       unsigned int probability = c->tlbsize / c->tlbsizevtlb;
+
+       /*
+        * 0 = All TLBWR instructions go to FTLB
+        * 1 = 15:1: For every 16 TBLWR instructions, 15 go to the
+        * FTLB and 1 goes to the VTLB.
+        * 2 = 7:1: As above with 7:1 ratio.
+        * 3 = 3:1: As above with 3:1 ratio.
+        *
+        * Use the linear midpoint as the probability threshold.
+        */
+       if (probability >= 12)
+               return 1;
+       else if (probability >= 6)
+               return 2;
+       else
+               /*
+                * So FTLB is less than 4 times bigger than VTLB.
+                * A 3:1 ratio can still be useful though.
+                */
+               return 3;
+}
+
 static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
 {
        unsigned int config6;
@@ -203,9 +229,14 @@ static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
        case CPU_P5600:
                /* proAptiv & related cores use Config6 to enable the FTLB */
                config6 = read_c0_config6();
+               /* Clear the old probability value */
+               config6 &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
                if (enable)
                        /* Enable FTLB */
-                       write_c0_config6(config6 | MIPS_CONF6_FTLBEN);
+                       write_c0_config6(config6 |
+                                        (calculate_ftlb_probability(c)
+                                         << MIPS_CONF6_FTLBP_SHIFT)
+                                        | MIPS_CONF6_FTLBEN);
                else
                        /* Disable FTLB */
                        write_c0_config6(config6 &  ~MIPS_CONF6_FTLBEN);
index 31b1b763cb298841eee156c61752687c4056809d..c5c4fd54d797221256e147a8a0be5278a8806df5 100644 (file)
@@ -94,12 +94,12 @@ int rtlx_open(int index, int can_sleep)
        int ret = 0;
 
        if (index >= RTLX_CHANNELS) {
-               pr_debug(KERN_DEBUG "rtlx_open index out of range\n");
+               pr_debug("rtlx_open index out of range\n");
                return -ENOSYS;
        }
 
        if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
-               pr_debug(KERN_DEBUG "rtlx_open channel %d already opened\n", index);
+               pr_debug("rtlx_open channel %d already opened\n", index);
                ret = -EBUSY;
                goto out_fail;
        }
index d21ec57b6e952046db450d48929161b76b0710e3..f3b635f86c39c085ac67126929d4a7cec89e9702 100644 (file)
@@ -485,7 +485,7 @@ static void __init bootmem_init(void)
  * NOTE: historically plat_mem_setup did the entire platform initialization.
  *      This was rather impractical because it meant plat_mem_setup had to
  * get away without any kind of memory allocator.  To keep old code from
- * breaking plat_setup was just renamed to plat_setup and a second platform
+ * breaking plat_setup was just renamed to plat_mem_setup and a second platform
  * initialization hook for anything else was introduced.
  */
 
@@ -493,7 +493,7 @@ static int usermem __initdata;
 
 static int __init early_parse_mem(char *p)
 {
-       unsigned long start, size;
+       phys_t start, size;
 
        /*
         * If a user specifies memory size, we
index 1d57605e4615288a604403de1a7071d7112b20c0..16f1e4f2bf3c3c08896161106529b4a0c551dd9e 100644 (file)
@@ -658,13 +658,13 @@ static int signal_setup(void)
                save_fp_context = _save_fp_context;
                restore_fp_context = _restore_fp_context;
        } else {
-               save_fp_context = copy_fp_from_sigcontext;
-               restore_fp_context = copy_fp_to_sigcontext;
+               save_fp_context = copy_fp_to_sigcontext;
+               restore_fp_context = copy_fp_from_sigcontext;
        }
 #endif /* CONFIG_SMP */
 #else
-       save_fp_context = copy_fp_from_sigcontext;;
-       restore_fp_context = copy_fp_to_sigcontext;
+       save_fp_context = copy_fp_to_sigcontext;
+       restore_fp_context = copy_fp_from_sigcontext;
 #endif
 
        return 0;
index 0bb9cc9dc621f705dd77b139b0985c01213f9c8e..d87e03330b29ae0dd5e27e9e84536dcc96af6f5c 100644 (file)
@@ -11,7 +11,8 @@ obj-$(CONFIG_PCI) += pci.o
 # Serial port support
 #
 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-obj-$(CONFIG_SERIAL_8250) += serial.o
+loongson-serial-$(CONFIG_SERIAL_8250) := serial.o
+obj-y += $(loongson-serial-m) $(loongson-serial-y)
 obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o
 obj-$(CONFIG_LOONGSON_MC146818) += rtc.o
 
index b5f228e7eae6144565e34c74bf6f86e5d973a760..e3328a96e80909b758a8d619b6a0f8398399d2da 100644 (file)
@@ -1872,8 +1872,16 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
        uasm_l_smp_pgtable_change(l, *p);
 #endif
        iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
-       if (!m4kc_tlbp_war())
+       if (!m4kc_tlbp_war()) {
                build_tlb_probe_entry(p);
+               if (cpu_has_htw) {
+                       /* race condition happens, leaving */
+                       uasm_i_ehb(p);
+                       uasm_i_mfc0(p, wr.r3, C0_INDEX);
+                       uasm_il_bltz(p, r, wr.r3, label_leave);
+                       uasm_i_nop(p);
+               }
+       }
        return wr;
 }
 
index 20102a6d41410fbba6854edab6ee1199d50cc7fa..c427c57781865e13d52dc75ea2bb2f8db5db9ad7 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
  */
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/leds.h>
 #include <linux/platform_device.h>
 
@@ -76,8 +76,4 @@ static int __init led_init(void)
        return platform_device_register(&fled_device);
 }
 
-module_init(led_init);
-
-MODULE_AUTHOR("Chris Dearman <chris@mips.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("LED probe driver for SEAD-3");
+device_initcall(led_init);
index be358a8050c57c14377c1bcc44cd45b332b622b1..6b43af0a34d9dd39c4afd08526342bbdd514b068 100644 (file)
@@ -1,6 +1,10 @@
 obj-y                          += setup.o nlm_hal.o cop2-ex.o dt.o
 obj-$(CONFIG_SMP)              += wakeup.o
-obj-$(CONFIG_USB)              += usb-init.o
-obj-$(CONFIG_USB)              += usb-init-xlp2.o
-obj-$(CONFIG_SATA_AHCI)                += ahci-init.o
-obj-$(CONFIG_SATA_AHCI)                += ahci-init-xlp2.o
+ifdef CONFIG_USB
+obj-y                          += usb-init.o
+obj-y                          += usb-init-xlp2.o
+endif
+ifdef CONFIG_SATA_AHCI
+obj-y                          += ahci-init.o
+obj-y                          += ahci-init-xlp2.o
+endif
index dc939de9b5b0d8fa8022c01f14acc630d7afece8..b4c4b469e320e56e20482d782fd6b929b90eb8cf 100644 (file)
@@ -100,7 +100,6 @@ CONFIG_NETDEVICES=y
 CONFIG_BONDING=m
 CONFIG_DUMMY=m
 CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
 CONFIG_TUN=m
 # CONFIG_NET_VENDOR_3COM is not set
 CONFIG_FS_ENET=y
index e5a648115ada0c6396f65c12a1de19622ee95099..7cb9719abf3dd0c264c9621443c7a2a10fd981e4 100644 (file)
@@ -113,7 +113,6 @@ CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
 CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
index 8317b6010ba6b288503ae2b2ccf6bea0833b31d4..ecabf625d2497d0d37d2ff77ee5e27c018bf8b22 100644 (file)
@@ -114,7 +114,6 @@ CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
 CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
index 124d66f0282c8ef9cd1a222e49911beebaaf8cac..4a4a86fb0d3d240d7a07097a0fbeabd00d3101ab 100644 (file)
@@ -165,7 +165,6 @@ CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
 CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
 CONFIG_INPUT_FF_MEMLESS=m
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
index 1e151594c691585bb2948598587abac4d0118b30..99ea8746bbafcb9d3f2af17a858a9d6228afc598 100644 (file)
@@ -167,7 +167,6 @@ CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
 CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
index 59734916986a9e53cd17ec2b1c9a27ec445d3b85..8a08d6dcb0b4734939af242356509d094cffe88c 100644 (file)
@@ -211,7 +211,6 @@ CONFIG_MV643XX_ETH=y
 # CONFIG_NETDEV_10000 is not set
 # CONFIG_ATM_DRIVERS is not set
 CONFIG_NETCONSOLE=m
-CONFIG_NETPOLL_TRAP=y
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_KEYBOARD is not set
index 20bc5e2d368d05601fd267ac81c2b375c9f95e6d..5830d735c5c3a56b948f2eab77fb4c88c2e50ff8 100644 (file)
@@ -154,7 +154,6 @@ CONFIG_WINDFARM_PM121=y
 CONFIG_BONDING=m
 CONFIG_DUMMY=m
 CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
 CONFIG_TUN=m
 CONFIG_VIRTIO_NET=m
 CONFIG_VHOST_NET=m
index c3a3269b08657d5c29208030dd55a978da3a6308..67885b2d70aae67317f920384783d8d3c3d112e3 100644 (file)
@@ -103,7 +103,6 @@ CONFIG_NETDEVICES=y
 CONFIG_BONDING=m
 CONFIG_DUMMY=m
 CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
 CONFIG_TUN=m
 CONFIG_VORTEX=y
 CONFIG_ACENIC=y
index fec5870f18180140ff7feef928073b461f2e5440..ad6d6b5af7d7b26a6cf7d3637330d0fd0cfa4377 100644 (file)
@@ -629,7 +629,6 @@ CONFIG_SLIP_SMART=y
 CONFIG_NET_FC=y
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_NETPOLL_TRAP=y
 CONFIG_VIRTIO_NET=m
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 CONFIG_INPUT_JOYDEV=m
index dd2a9cab4b5012b69c00694c13347f8a52824f9c..1f97364017c747ea474aafef26fba1cc437bb915 100644 (file)
@@ -133,7 +133,6 @@ CONFIG_DM_UEVENT=y
 CONFIG_BONDING=m
 CONFIG_DUMMY=m
 CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
 CONFIG_TUN=m
 CONFIG_VIRTIO_NET=m
 CONFIG_VHOST_NET=m
index d2008887eb8c7fd8f3271b73438f8d5c578cb2b4..ac7ca585282795b6b0c9395083c049d90b37677c 100644 (file)
@@ -134,7 +134,6 @@ CONFIG_DM_UEVENT=y
 CONFIG_BONDING=m
 CONFIG_DUMMY=m
 CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
 CONFIG_TUN=m
 CONFIG_VIRTIO_NET=m
 CONFIG_VHOST_NET=m
index 4ca90a39d6d01af63da46c73d19b816b0979e538..725247beebecda3493e9e477f7fd4ec29911b558 100644 (file)
@@ -159,8 +159,6 @@ struct pci_dn {
 
        int     pci_ext_config_space;   /* for pci devices */
 
-       bool    force_32bit_msi;
-
        struct  pci_dev *pcidev;        /* back-pointer to the pci device */
 #ifdef CONFIG_EEH
        struct eeh_dev *edev;           /* eeh device */
index f19b1e5cb06096e2bd9b68b8cd620669c8943cac..1ceecdda810b04722b88329d52b866c3c540ad0e 100644 (file)
@@ -65,7 +65,7 @@ static ssize_t eeh_pe_state_show(struct device *dev,
                return -ENODEV;
 
        state = eeh_ops->get_state(edev->pe, NULL);
-       return sprintf(buf, "%0x08x %0x08x\n",
+       return sprintf(buf, "0x%08x 0x%08x\n",
                       state, edev->pe->state);
 }
 
index 155013da27e05cb801ba961b102d41f3edbfb48d..b15194e2c5fc55ca934dba97fe4863b2c273baa5 100644 (file)
@@ -266,13 +266,3 @@ int pcibus_to_node(struct pci_bus *bus)
 }
 EXPORT_SYMBOL(pcibus_to_node);
 #endif
-
-static void quirk_radeon_32bit_msi(struct pci_dev *dev)
-{
-       struct pci_dn *pdn = pci_get_pdn(dev);
-
-       if (pdn)
-               pdn->force_32bit_msi = true;
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi);
index 23eb9a9441bdad612481a7d1b2fcf12dd17a7ed3..c62be60c727485cce5108fcf4770b28f42129eee 100644 (file)
@@ -30,8 +30,8 @@
 V_FUNCTION_BEGIN(__kernel_getcpu)
   .cfi_startproc
        mfspr   r5,SPRN_SPRG_VDSO_READ
-       cmpdi   cr0,r3,0
-       cmpdi   cr1,r4,0
+       cmpwi   cr0,r3,0
+       cmpwi   cr1,r4,0
        clrlwi  r6,r5,16
        rlwinm  r7,r5,16,31-15,31-0
        beq     cr0,1f
index 5e1ed1575aabe23c245edcdff06433cfb0a62327..b322bfb51343f65fdfe76d265cdcb76928011d21 100644 (file)
@@ -57,7 +57,7 @@ static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt)
        };
 
        /* Print things out */
-       if (hmi_evt->version != OpalHMIEvt_V1) {
+       if (hmi_evt->version < OpalHMIEvt_V1) {
                pr_err("HMI Interrupt, Unknown event version %d !\n",
                        hmi_evt->version);
                return;
index 468a0f23c7f2b5f756c1b553315793c03492c0d6..3ba435ec3dcd584e5f466b78eae18379a69d482b 100644 (file)
@@ -1509,7 +1509,6 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
                                  unsigned int is_64, struct msi_msg *msg)
 {
        struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
-       struct pci_dn *pdn = pci_get_pdn(dev);
        unsigned int xive_num = hwirq - phb->msi_base;
        __be32 data;
        int rc;
@@ -1523,7 +1522,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
                return -ENXIO;
 
        /* Force 32-bit MSI on some broken devices */
-       if (pdn && pdn->force_32bit_msi)
+       if (dev->no_64bit_msi)
                is_64 = 0;
 
        /* Assign XIVE to PE */
@@ -1997,7 +1996,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
        if (is_kdump_kernel()) {
                pr_info("  Issue PHB reset ...\n");
                ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
-               ioda_eeh_phb_reset(hose, OPAL_DEASSERT_RESET);
+               ioda_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
        }
 
        /* Configure M64 window */
index b2187d0068b876e6909376c81d390cbf7b8bad00..4b20f2c6b3b24ba950d3ea10014e0b1fffbfc0b6 100644 (file)
@@ -50,7 +50,6 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
 {
        struct pci_controller *hose = pci_bus_to_host(pdev->bus);
        struct pnv_phb *phb = hose->private_data;
-       struct pci_dn *pdn = pci_get_pdn(pdev);
        struct msi_desc *entry;
        struct msi_msg msg;
        int hwirq;
@@ -60,7 +59,7 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
        if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
                return -ENODEV;
 
-       if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
+       if (pdev->no_64bit_msi && !phb->msi32_support)
                return -ENODEV;
 
        list_for_each_entry(entry, &pdev->msi_list, list) {
index 8ab5add4ac824f43c6a6b299b24ed15bf0deafb2..8b909e94fd9a10bbee407c2e1a04df7320e93a71 100644 (file)
@@ -420,7 +420,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
         */
 again:
        if (type == PCI_CAP_ID_MSI) {
-               if (pdn->force_32bit_msi) {
+               if (pdev->no_64bit_msi) {
                        rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
                        if (rc < 0) {
                                /*
index b988b5addf864a581ff8c36e177379c32ba92518..c8efbb37d6e076ab123a3d5d8066f58edd36acd8 100644 (file)
@@ -293,10 +293,10 @@ static inline void disable_surveillance(void)
        args.token = rtas_token("set-indicator");
        if (args.token == RTAS_UNKNOWN_SERVICE)
                return;
-       args.nargs = 3;
-       args.nret = 1;
+       args.nargs = cpu_to_be32(3);
+       args.nret = cpu_to_be32(1);
        args.rets = &args.args[3];
-       args.args[0] = SURVEILLANCE_TOKEN;
+       args.args[0] = cpu_to_be32(SURVEILLANCE_TOKEN);
        args.args[1] = 0;
        args.args[2] = 0;
        enter_rtas(__pa(&args));
index 5b1b52a04ad6283fb67308d9bf84b08494870140..7e064c68c5ec8a0ab538a15947d5c44b2db0a322 100644 (file)
@@ -12,6 +12,14 @@ int dma_supported(struct device *dev, u64 mask);
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+                                 enum dma_data_direction dir)
+{
+       /* Since dma_{alloc,free}_noncoherent() allocated coherent memory, this
+        * routine can be a nop.
+        */
+}
+
 extern struct dma_map_ops *dma_ops;
 extern struct dma_map_ops *leon_dma_ops;
 extern struct dma_map_ops pci32_dma_ops;
index 91de7dd7427ff60601cdf7c056337ee5aaf4425d..37dc9364c4a12485b513429af8288e65a725db37 100644 (file)
@@ -218,7 +218,6 @@ CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_NETPOLL_TRAP=y
 CONFIG_TUN=y
 CONFIG_VETH=m
 CONFIG_NET_DSA_MV88E6060=y
index c7702b7ab7a55eec58e24a54a3e2146df17ea9fb..76a2781dec2c879bf1dde6d861bd22155094ca0b 100644 (file)
@@ -337,7 +337,6 @@ CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_NETPOLL_TRAP=y
 CONFIG_TUN=y
 CONFIG_VETH=m
 CONFIG_NET_DSA_MV88E6060=y
index f48b17df42249e45cca9ef6de99bfd43083507cd..3a52ee0e726d4ca2643ff6b0dec4675f39e296b5 100644 (file)
@@ -20,7 +20,6 @@
 #define THREAD_SIZE_ORDER      1
 #define THREAD_SIZE            (PAGE_SIZE << THREAD_SIZE_ORDER)
 
-#define STACKFAULT_STACK 0
 #define DOUBLEFAULT_STACK 1
 #define NMI_STACK 0
 #define DEBUG_STACK 0
index 678205195ae118e16ca34609a24f472d9875e568..75450b2c7be48393607da8a5fdf050e663eb48c8 100644 (file)
 #define IRQ_STACK_ORDER 2
 #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
 
-#define STACKFAULT_STACK 1
-#define DOUBLEFAULT_STACK 2
-#define NMI_STACK 3
-#define DEBUG_STACK 4
-#define MCE_STACK 5
-#define N_EXCEPTION_STACKS 5  /* hw limit: 7 */
+#define DOUBLEFAULT_STACK 1
+#define NMI_STACK 2
+#define DEBUG_STACK 3
+#define MCE_STACK 4
+#define N_EXCEPTION_STACKS 4  /* hw limit: 7 */
 
 #define PUD_PAGE_SIZE          (_AC(1, UL) << PUD_SHIFT)
 #define PUD_PAGE_MASK          (~(PUD_PAGE_SIZE-1))
index 854053889d4d2d6f74cdb3143da4f6c63e213996..547e344a6dc60d7db27d43c74d44c783326291bb 100644 (file)
@@ -141,7 +141,7 @@ struct thread_info {
 /* Only used for 64 bit */
 #define _TIF_DO_NOTIFY_MASK                                            \
        (_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME |       \
-        _TIF_USER_RETURN_NOTIFY)
+        _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE)
 
 /* flags to check in __switch_to() */
 #define _TIF_WORK_CTXSW                                                        \
index bc8352e7010a9e805c54068da84b3848dcc12048..707adc6549d82335a20bdf18d18b697fa1fe9eab 100644 (file)
@@ -39,6 +39,7 @@ asmlinkage void simd_coprocessor_error(void);
 
 #ifdef CONFIG_TRACING
 asmlinkage void trace_page_fault(void);
+#define trace_stack_segment stack_segment
 #define trace_divide_error divide_error
 #define trace_bounds bounds
 #define trace_invalid_op invalid_op
index 1abcb50b48ae042fd06c2581802af0e1af7f49d5..ff86f19b575849fca7e20a4086e09f798ae8291d 100644 (file)
@@ -24,7 +24,6 @@ static char x86_stack_ids[][8] = {
                [ DEBUG_STACK-1                 ]       = "#DB",
                [ NMI_STACK-1                   ]       = "NMI",
                [ DOUBLEFAULT_STACK-1           ]       = "#DF",
-               [ STACKFAULT_STACK-1            ]       = "#SS",
                [ MCE_STACK-1                   ]       = "#MC",
 #if DEBUG_STKSZ > EXCEPTION_STKSZ
                [ N_EXCEPTION_STACKS ...
index df088bb03fb3ffec9148c7cc44cb65ef1aa36118..c0226ab541061870bb590a9c817d0770e618697c 100644 (file)
@@ -828,9 +828,15 @@ ENTRY(native_iret)
        jnz native_irq_return_ldt
 #endif
 
+.global native_irq_return_iret
 native_irq_return_iret:
+       /*
+        * This may fault.  Non-paranoid faults on return to userspace are
+        * handled by fixup_bad_iret.  These include #SS, #GP, and #NP.
+        * Double-faults due to espfix64 are handled in do_double_fault.
+        * Other faults here are fatal.
+        */
        iretq
-       _ASM_EXTABLE(native_irq_return_iret, bad_iret)
 
 #ifdef CONFIG_X86_ESPFIX64
 native_irq_return_ldt:
@@ -858,25 +864,6 @@ native_irq_return_ldt:
        jmp native_irq_return_iret
 #endif
 
-       .section .fixup,"ax"
-bad_iret:
-       /*
-        * The iret traps when the %cs or %ss being restored is bogus.
-        * We've lost the original trap vector and error code.
-        * #GPF is the most likely one to get for an invalid selector.
-        * So pretend we completed the iret and took the #GPF in user mode.
-        *
-        * We are now running with the kernel GS after exception recovery.
-        * But error_entry expects us to have user GS to match the user %cs,
-        * so swap back.
-        */
-       pushq $0
-
-       SWAPGS
-       jmp general_protection
-
-       .previous
-
        /* edi: workmask, edx: work */
 retint_careful:
        CFI_RESTORE_STATE
@@ -922,37 +909,6 @@ ENTRY(retint_kernel)
        CFI_ENDPROC
 END(common_interrupt)
 
-       /*
-        * If IRET takes a fault on the espfix stack, then we
-        * end up promoting it to a doublefault.  In that case,
-        * modify the stack to make it look like we just entered
-        * the #GP handler from user space, similar to bad_iret.
-        */
-#ifdef CONFIG_X86_ESPFIX64
-       ALIGN
-__do_double_fault:
-       XCPT_FRAME 1 RDI+8
-       movq RSP(%rdi),%rax             /* Trap on the espfix stack? */
-       sarq $PGDIR_SHIFT,%rax
-       cmpl $ESPFIX_PGD_ENTRY,%eax
-       jne do_double_fault             /* No, just deliver the fault */
-       cmpl $__KERNEL_CS,CS(%rdi)
-       jne do_double_fault
-       movq RIP(%rdi),%rax
-       cmpq $native_irq_return_iret,%rax
-       jne do_double_fault             /* This shouldn't happen... */
-       movq PER_CPU_VAR(kernel_stack),%rax
-       subq $(6*8-KERNEL_STACK_OFFSET),%rax    /* Reset to original stack */
-       movq %rax,RSP(%rdi)
-       movq $0,(%rax)                  /* Missing (lost) #GP error code */
-       movq $general_protection,RIP(%rdi)
-       retq
-       CFI_ENDPROC
-END(__do_double_fault)
-#else
-# define __do_double_fault do_double_fault
-#endif
-
 /*
  * APIC interrupts.
  */
@@ -1124,7 +1080,7 @@ idtentry overflow do_overflow has_error_code=0
 idtentry bounds do_bounds has_error_code=0
 idtentry invalid_op do_invalid_op has_error_code=0
 idtentry device_not_available do_device_not_available has_error_code=0
-idtentry double_fault __do_double_fault has_error_code=1 paranoid=1
+idtentry double_fault do_double_fault has_error_code=1 paranoid=1
 idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0
 idtentry invalid_TSS do_invalid_TSS has_error_code=1
 idtentry segment_not_present do_segment_not_present has_error_code=1
@@ -1289,7 +1245,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
 
 idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
 idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
-idtentry stack_segment do_stack_segment has_error_code=1 paranoid=1
+idtentry stack_segment do_stack_segment has_error_code=1
 #ifdef CONFIG_XEN
 idtentry xen_debug do_debug has_error_code=0
 idtentry xen_int3 do_int3 has_error_code=0
@@ -1399,17 +1355,16 @@ error_sti:
 
 /*
  * There are two places in the kernel that can potentially fault with
- * usergs. Handle them here. The exception handlers after iret run with
- * kernel gs again, so don't set the user space flag. B stepping K8s
- * sometimes report an truncated RIP for IRET exceptions returning to
- * compat mode. Check for these here too.
+ * usergs. Handle them here.  B stepping K8s sometimes report a
+ * truncated RIP for IRET exceptions returning to compat mode. Check
+ * for these here too.
  */
 error_kernelspace:
        CFI_REL_OFFSET rcx, RCX+8
        incl %ebx
        leaq native_irq_return_iret(%rip),%rcx
        cmpq %rcx,RIP+8(%rsp)
-       je error_swapgs
+       je error_bad_iret
        movl %ecx,%eax  /* zero extend */
        cmpq %rax,RIP+8(%rsp)
        je bstep_iret
@@ -1420,7 +1375,15 @@ error_kernelspace:
 bstep_iret:
        /* Fix truncated RIP */
        movq %rcx,RIP+8(%rsp)
-       jmp error_swapgs
+       /* fall through */
+
+error_bad_iret:
+       SWAPGS
+       mov %rsp,%rdi
+       call fixup_bad_iret
+       mov %rax,%rsp
+       decl %ebx       /* Return to usergs */
+       jmp error_sti
        CFI_ENDPROC
 END(error_entry)
 
index 0d0e922fafc149400b4320c793e1d311c96d147b..de801f22128a6b183aa5ab21ac3a3cd158af5610 100644 (file)
@@ -233,32 +233,40 @@ DO_ERROR(X86_TRAP_UD,     SIGILL,  "invalid opcode",              invalid_op)
 DO_ERROR(X86_TRAP_OLD_MF, SIGFPE,  "coprocessor segment overrun",coprocessor_segment_overrun)
 DO_ERROR(X86_TRAP_TS,     SIGSEGV, "invalid TSS",              invalid_TSS)
 DO_ERROR(X86_TRAP_NP,     SIGBUS,  "segment not present",      segment_not_present)
-#ifdef CONFIG_X86_32
 DO_ERROR(X86_TRAP_SS,     SIGBUS,  "stack segment",            stack_segment)
-#endif
 DO_ERROR(X86_TRAP_AC,     SIGBUS,  "alignment check",          alignment_check)
 
 #ifdef CONFIG_X86_64
 /* Runs on IST stack */
-dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
-{
-       enum ctx_state prev_state;
-
-       prev_state = exception_enter();
-       if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
-                      X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) {
-               preempt_conditional_sti(regs);
-               do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
-               preempt_conditional_cli(regs);
-       }
-       exception_exit(prev_state);
-}
-
 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
 {
        static const char str[] = "double fault";
        struct task_struct *tsk = current;
 
+#ifdef CONFIG_X86_ESPFIX64
+       extern unsigned char native_irq_return_iret[];
+
+       /*
+        * If IRET takes a non-IST fault on the espfix64 stack, then we
+        * end up promoting it to a doublefault.  In that case, modify
+        * the stack to make it look like we just entered the #GP
+        * handler from user space, similar to bad_iret.
+        */
+       if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
+               regs->cs == __KERNEL_CS &&
+               regs->ip == (unsigned long)native_irq_return_iret)
+       {
+               struct pt_regs *normal_regs = task_pt_regs(current);
+
+               /* Fake a #GP(0) from userspace. */
+               memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
+               normal_regs->orig_ax = 0;  /* Missing (lost) #GP error code */
+               regs->ip = (unsigned long)general_protection;
+               regs->sp = (unsigned long)&normal_regs->orig_ax;
+               return;
+       }
+#endif
+
        exception_enter();
        /* Return not checked because double check cannot be ignored */
        notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
@@ -399,6 +407,35 @@ asmlinkage __visible struct pt_regs *sync_regs(struct pt_regs *eregs)
        return regs;
 }
 NOKPROBE_SYMBOL(sync_regs);
+
+struct bad_iret_stack {
+       void *error_entry_ret;
+       struct pt_regs regs;
+};
+
+asmlinkage __visible
+struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
+{
+       /*
+        * This is called from entry_64.S early in handling a fault
+        * caused by a bad iret to user mode.  To handle the fault
+        * correctly, we want move our stack frame to task_pt_regs
+        * and we want to pretend that the exception came from the
+        * iret target.
+        */
+       struct bad_iret_stack *new_stack =
+               container_of(task_pt_regs(current),
+                            struct bad_iret_stack, regs);
+
+       /* Copy the IRET target to the new stack. */
+       memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
+
+       /* Copy the remainder of the stack from the current stack. */
+       memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
+
+       BUG_ON(!user_mode_vm(&new_stack->regs));
+       return new_stack;
+}
 #endif
 
 /*
@@ -778,7 +815,7 @@ void __init trap_init(void)
        set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
        set_intr_gate(X86_TRAP_TS, invalid_TSS);
        set_intr_gate(X86_TRAP_NP, segment_not_present);
-       set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
+       set_intr_gate(X86_TRAP_SS, stack_segment);
        set_intr_gate(X86_TRAP_GP, general_protection);
        set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
        set_intr_gate(X86_TRAP_MF, coprocessor_error);
index ac1c4de3a48491d9b0cf939897e9af57238b3f71..978f402006eef21ee569720a0d573a6a48e12c97 100644 (file)
@@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
         * kvm mmu, before reclaiming the page, we should
         * unmap it from mmu first.
         */
-       WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn)));
+       WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
 
        if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
                kvm_set_pfn_accessed(pfn);
@@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                spte |= PT_PAGE_SIZE_MASK;
        if (tdp_enabled)
                spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
-                       kvm_is_mmio_pfn(pfn));
+                       kvm_is_reserved_pfn(pfn));
 
        if (host_writable)
                spte |= SPTE_HOST_WRITEABLE;
@@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
         * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
         * here.
         */
-       if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
+       if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
            level == PT_PAGE_TABLE_LEVEL &&
            PageTransCompound(pfn_to_page(pfn)) &&
            !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
index 850246206b1258a697f83e86c39acc9b0a17f973..35c93ff11f35b4e097279cdc3805e46d40ec285b 100644 (file)
@@ -174,7 +174,7 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
                        goto unlock;
        }
 
-       err = memcpy_toiovec(msg->msg_iov, ctx->result, len);
+       err = memcpy_to_msg(msg, ctx->result, len);
 
 unlock:
        release_sock(sk);
index 83187f497c7c65dddd2248170a50976e568d82e5..c3b482bee2081262a56a6698fbede2ae3f39906a 100644 (file)
@@ -298,9 +298,9 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
                        len = min_t(unsigned long, len,
                                    PAGE_SIZE - sg->offset - sg->length);
 
-                       err = memcpy_fromiovec(page_address(sg_page(sg)) +
-                                              sg->offset + sg->length,
-                                              msg->msg_iov, len);
+                       err = memcpy_from_msg(page_address(sg_page(sg)) +
+                                             sg->offset + sg->length,
+                                             msg, len);
                        if (err)
                                goto unlock;
 
@@ -337,8 +337,8 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
                        if (!sg_page(sg + i))
                                goto unlock;
 
-                       err = memcpy_fromiovec(page_address(sg_page(sg + i)),
-                                              msg->msg_iov, plen);
+                       err = memcpy_from_msg(page_address(sg_page(sg + i)),
+                                             msg, plen);
                        if (err) {
                                __free_page(sg_page(sg + i));
                                sg_assign_page(sg + i, NULL);
index 7652e8dc188f93036e03a23a99ac7aee3b543811..21b0bc6a9c969ea677630a827f69c45545a9e78a 100644 (file)
@@ -1225,11 +1225,13 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
        card->config_regs = pci_iomap(dev, 0, CONFIG_RAM_SIZE);
        if (!card->config_regs) {
                dev_warn(&dev->dev, "Failed to ioremap config registers\n");
+               err = -ENOMEM;
                goto out_release_regions;
        }
        card->buffers = pci_iomap(dev, 1, DATA_RAM_SIZE);
        if (!card->buffers) {
                dev_warn(&dev->dev, "Failed to ioremap data buffers\n");
+               err = -ENOMEM;
                goto out_unmap_config;
        }
 
index 24b5b020753a9e4a66a5d3db7c8f7ad8bee0b928..a23ac0c724f014643e66bc2485f7c79cef523920 100644 (file)
@@ -52,29 +52,26 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
 
        tmp = pmc_read(pmc, AT91_PMC_USB);
        usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT;
-       return parent_rate / (usbdiv + 1);
+
+       return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
 }
 
 static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
                                          unsigned long *parent_rate)
 {
        unsigned long div;
-       unsigned long bestrate;
-       unsigned long tmp;
+
+       if (!rate)
+               return -EINVAL;
 
        if (rate >= *parent_rate)
                return *parent_rate;
 
-       div = *parent_rate / rate;
-       if (div >= SAM9X5_USB_MAX_DIV)
-               return *parent_rate / (SAM9X5_USB_MAX_DIV + 1);
-
-       bestrate = *parent_rate / div;
-       tmp = *parent_rate / (div + 1);
-       if (bestrate - rate > rate - tmp)
-               bestrate = tmp;
+       div = DIV_ROUND_CLOSEST(*parent_rate, rate);
+       if (div > SAM9X5_USB_MAX_DIV + 1)
+               div = SAM9X5_USB_MAX_DIV + 1;
 
-       return bestrate;
+       return DIV_ROUND_CLOSEST(*parent_rate, div);
 }
 
 static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
@@ -106,9 +103,13 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
        u32 tmp;
        struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
        struct at91_pmc *pmc = usb->pmc;
-       unsigned long div = parent_rate / rate;
+       unsigned long div;
+
+       if (!rate)
+               return -EINVAL;
 
-       if (parent_rate % rate || div < 1 || div >= SAM9X5_USB_MAX_DIV)
+       div = DIV_ROUND_CLOSEST(parent_rate, rate);
+       if (div > SAM9X5_USB_MAX_DIV + 1 || !div)
                return -EINVAL;
 
        tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV;
@@ -253,7 +254,7 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
 
                tmp_parent_rate = rate * usb->divisors[i];
                tmp_parent_rate = __clk_round_rate(parent, tmp_parent_rate);
-               tmprate = tmp_parent_rate / usb->divisors[i];
+               tmprate = DIV_ROUND_CLOSEST(tmp_parent_rate, usb->divisors[i]);
                if (tmprate < rate)
                        tmpdiff = rate - tmprate;
                else
@@ -281,10 +282,10 @@ static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
        struct at91_pmc *pmc = usb->pmc;
        unsigned long div;
 
-       if (!rate || parent_rate % rate)
+       if (!rate)
                return -EINVAL;
 
-       div = parent_rate / rate;
+       div = DIV_ROUND_CLOSEST(parent_rate, rate);
 
        for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) {
                if (usb->divisors[i] == div) {
index 18a9de29df0e0c31dadd3de0b2bdb2485fab2733..c0a842b335c520c6c28f08308a1b62a743038dd3 100644 (file)
@@ -263,6 +263,14 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
        if (!rate)
                rate = 1;
 
+       /* if read only, just return current value */
+       if (divider->flags & CLK_DIVIDER_READ_ONLY) {
+               bestdiv = readl(divider->reg) >> divider->shift;
+               bestdiv &= div_mask(divider);
+               bestdiv = _get_div(divider, bestdiv);
+               return bestdiv;
+       }
+
        maxdiv = _get_maxdiv(divider);
 
        if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
@@ -361,11 +369,6 @@ const struct clk_ops clk_divider_ops = {
 };
 EXPORT_SYMBOL_GPL(clk_divider_ops);
 
-const struct clk_ops clk_divider_ro_ops = {
-       .recalc_rate = clk_divider_recalc_rate,
-};
-EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
-
 static struct clk *_register_divider(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                void __iomem *reg, u8 shift, u8 width,
@@ -391,10 +394,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
        }
 
        init.name = name;
-       if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
-               init.ops = &clk_divider_ro_ops;
-       else
-               init.ops = &clk_divider_ops;
+       init.ops = &clk_divider_ops;
        init.flags = flags | CLK_IS_BASIC;
        init.parent_names = (parent_name ? &parent_name: NULL);
        init.num_parents = (parent_name ? 1 : 0);
index b345cc791e5defdeeb57d0b8df4d566bd41aef2c..88b9fe13fa444b2a81a3bd8a2588b035357d0048 100644 (file)
@@ -322,7 +322,7 @@ static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw,
        unsigned long ccsr = CCSR;
 
        osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
-       a = cccr & CCCR_A_BIT;
+       a = cccr & (1 << CCCR_A_BIT);
        l  = ccsr & CCSR_L_MASK;
 
        if (osc_forced || a)
@@ -341,7 +341,7 @@ static u8 clk_pxa27x_memory_get_parent(struct clk_hw *hw)
        unsigned long ccsr = CCSR;
 
        osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
-       a = cccr & CCCR_A_BIT;
+       a = cccr & (1 << CCCR_A_BIT);
        if (osc_forced)
                return PXA_MEM_13Mhz;
        if (a)
index dab988ab8cf12740ac931c5f5efaa39b90887ec3..157139a5c1ca956d76d1be30dfb6687f82d01816 100644 (file)
@@ -3122,7 +3122,7 @@ static struct clk_regmap *mmcc_apq8084_clocks[] = {
        [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
        [HDMI_CLK_SRC] = &hdmi_clk_src.clkr,
        [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
-       [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
+       [MMSS_RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
        [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr,
        [MAPLE_CLK_SRC] = &maple_clk_src.clkr,
        [VDP_CLK_SRC] = &vdp_clk_src.clkr,
index 1e68bff481b8e32ec440959002a2467287c269da..880a266f01431b3b9e7040565d3a3e81f0716a8b 100644 (file)
@@ -90,9 +90,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
                div->width = div_width;
                div->lock = lock;
                div->table = div_table;
-               div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
-                                               ? &clk_divider_ro_ops
-                                               : &clk_divider_ops;
+               div_ops = &clk_divider_ops;
        }
 
        clk = clk_register_composite(NULL, name, parent_names, num_parents,
index efb17c3ee120e5ee28fa05099c4c3c7ce09f0ac1..f4a9c0058b4d677382863a12bf887b40202f63fe 100644 (file)
@@ -182,6 +182,12 @@ static void __init sun4i_timer_init(struct device_node *node)
        /* Make sure timer is stopped before playing with interrupts */
        sun4i_clkevt_time_stop(0);
 
+       sun4i_clockevent.cpumask = cpu_possible_mask;
+       sun4i_clockevent.irq = irq;
+
+       clockevents_config_and_register(&sun4i_clockevent, rate,
+                                       TIMER_SYNC_TICKS, 0xffffffff);
+
        ret = setup_irq(irq, &sun4i_timer_irq);
        if (ret)
                pr_warn("failed to setup irq %d\n", irq);
@@ -189,12 +195,6 @@ static void __init sun4i_timer_init(struct device_node *node)
        /* Enable timer0 interrupt */
        val = readl(timer_base + TIMER_IRQ_EN_REG);
        writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
-
-       sun4i_clockevent.cpumask = cpu_possible_mask;
-       sun4i_clockevent.irq = irq;
-
-       clockevents_config_and_register(&sun4i_clockevent, rate,
-                                       TIMER_SYNC_TICKS, 0xffffffff);
 }
 CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
                       sun4i_timer_init);
index 7784911d78ef6fc54d6aeea23950f4585d3c74c4..00fc59762e0df3bba0758d1f18e90328e5726635 100644 (file)
@@ -185,6 +185,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
        if (rdev->flags & RADEON_IS_AGP)
                return false;
 
+       /*
+        * Older chips have a HW limitation, they can only generate 40 bits
+        * of address for "64-bit" MSIs which breaks on some platforms, notably
+        * IBM POWER servers, so we limit them
+        */
+       if (rdev->family < CHIP_BONAIRE) {
+               dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n");
+               rdev->pdev->no_64bit_msi = 1;
+       }
+
        /* force MSI on */
        if (radeon_msi == 1)
                return true;
index 6aac695b1688beaf2adc5336bd842bb7993d2d09..9b55e673b67caf1365c7452ce51a22a37510af02 100644 (file)
@@ -1084,10 +1084,8 @@ static int g762_probe(struct i2c_client *client, const struct i2c_device_id *id)
        if (ret)
                goto clock_dis;
 
-       data->hwmon_dev = devm_hwmon_device_register_with_groups(dev,
-                                                                client->name,
-                                                                data,
-                                                                g762_groups);
+       data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
+                                                           data, g762_groups);
        if (IS_ERR(data->hwmon_dev)) {
                ret = PTR_ERR(data->hwmon_dev);
                goto clock_dis;
index 22c096ce39ad765c6a50d26ff80e77158fe3bbf6..513bd6d14293d80e5ce502080a092b5f970fe840 100644 (file)
@@ -44,6 +44,9 @@
 
 #define BMC150_ACCEL_REG_INT_STATUS_2          0x0B
 #define BMC150_ACCEL_ANY_MOTION_MASK           0x07
+#define BMC150_ACCEL_ANY_MOTION_BIT_X          BIT(0)
+#define BMC150_ACCEL_ANY_MOTION_BIT_Y          BIT(1)
+#define BMC150_ACCEL_ANY_MOTION_BIT_Z          BIT(2)
 #define BMC150_ACCEL_ANY_MOTION_BIT_SIGN       BIT(3)
 
 #define BMC150_ACCEL_REG_PMU_LPW               0x11
@@ -92,9 +95,9 @@
 #define BMC150_ACCEL_SLOPE_THRES_MASK          0xFF
 
 /* Slope duration in terms of number of samples */
-#define BMC150_ACCEL_DEF_SLOPE_DURATION        2
+#define BMC150_ACCEL_DEF_SLOPE_DURATION                1
 /* in terms of multiples of g's/LSB, based on range */
-#define BMC150_ACCEL_DEF_SLOPE_THRESHOLD       5
+#define BMC150_ACCEL_DEF_SLOPE_THRESHOLD       1
 
 #define BMC150_ACCEL_REG_XOUT_L                0x02
 
@@ -536,6 +539,9 @@ static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
        if (ret < 0) {
                dev_err(&data->client->dev,
                        "Failed: bmc150_accel_set_power_state for %d\n", on);
+               if (on)
+                       pm_runtime_put_noidle(&data->client->dev);
+
                return ret;
        }
 
@@ -811,6 +817,7 @@ static int bmc150_accel_write_event_config(struct iio_dev *indio_dev,
 
        ret =  bmc150_accel_setup_any_motion_interrupt(data, state);
        if (ret < 0) {
+               bmc150_accel_set_power_state(data, false);
                mutex_unlock(&data->mutex);
                return ret;
        }
@@ -846,7 +853,7 @@ static const struct attribute_group bmc150_accel_attrs_group = {
 
 static const struct iio_event_spec bmc150_accel_event = {
                .type = IIO_EV_TYPE_ROC,
-               .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING,
+               .dir = IIO_EV_DIR_EITHER,
                .mask_separate = BIT(IIO_EV_INFO_VALUE) |
                                 BIT(IIO_EV_INFO_ENABLE) |
                                 BIT(IIO_EV_INFO_PERIOD)
@@ -1054,6 +1061,7 @@ static int bmc150_accel_data_rdy_trigger_set_state(struct iio_trigger *trig,
        else
                ret = bmc150_accel_setup_new_data_interrupt(data, state);
        if (ret < 0) {
+               bmc150_accel_set_power_state(data, false);
                mutex_unlock(&data->mutex);
                return ret;
        }
@@ -1092,12 +1100,26 @@ static irqreturn_t bmc150_accel_event_handler(int irq, void *private)
        else
                dir = IIO_EV_DIR_RISING;
 
-       if (ret & BMC150_ACCEL_ANY_MOTION_MASK)
+       if (ret & BMC150_ACCEL_ANY_MOTION_BIT_X)
+               iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
+                                                       0,
+                                                       IIO_MOD_X,
+                                                       IIO_EV_TYPE_ROC,
+                                                       dir),
+                                                       data->timestamp);
+       if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Y)
                iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
                                                        0,
-                                                       IIO_MOD_X_OR_Y_OR_Z,
+                                                       IIO_MOD_Y,
                                                        IIO_EV_TYPE_ROC,
-                                                       IIO_EV_DIR_EITHER),
+                                                       dir),
+                                                       data->timestamp);
+       if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Z)
+               iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
+                                                       0,
+                                                       IIO_MOD_Z,
+                                                       IIO_EV_TYPE_ROC,
+                                                       dir),
                                                        data->timestamp);
 ack_intr_status:
        if (!data->dready_trigger_on)
@@ -1354,10 +1376,14 @@ static int bmc150_accel_runtime_suspend(struct device *dev)
 {
        struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
        struct bmc150_accel_data *data = iio_priv(indio_dev);
+       int ret;
 
        dev_dbg(&data->client->dev,  __func__);
+       ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
+       if (ret < 0)
+               return -EAGAIN;
 
-       return bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
+       return 0;
 }
 
 static int bmc150_accel_runtime_resume(struct device *dev)
index a23e58c4ed99b222b674ce2386c9c36a3809efbe..320aa72c0349ecabeae7ea4a0bdb59d2c84cd63d 100644 (file)
@@ -269,6 +269,8 @@ static int kxcjk1013_set_range(struct kxcjk1013_data *data, int range_index)
                return ret;
        }
 
+       ret &= ~(KXCJK1013_REG_CTRL1_BIT_GSEL0 |
+                KXCJK1013_REG_CTRL1_BIT_GSEL1);
        ret |= (KXCJK1013_scale_table[range_index].gsel_0 << 3);
        ret |= (KXCJK1013_scale_table[range_index].gsel_1 << 4);
 
index b58d6302521f4d651359715331e83a5a416583a0..d095efe1ba149caa57136ec1f27f1c6caac10cd8 100644 (file)
@@ -152,6 +152,7 @@ static void men_z188_remove(struct mcb_device *dev)
 
 static const struct mcb_device_id men_z188_ids[] = {
        { .device = 0xbc },
+       { }
 };
 MODULE_DEVICE_TABLE(mcb, men_z188_ids);
 
index 1f967e0d688e47a084f29e2b484621016ada7c3c..d2fa526740ca188e00926f42b733af91d4dcd9df 100644 (file)
@@ -67,6 +67,9 @@
 #define BMG160_REG_INT_EN_0            0x15
 #define BMG160_DATA_ENABLE_INT         BIT(7)
 
+#define BMG160_REG_INT_EN_1            0x16
+#define BMG160_INT1_BIT_OD             BIT(1)
+
 #define BMG160_REG_XOUT_L              0x02
 #define BMG160_AXIS_TO_REG(axis)       (BMG160_REG_XOUT_L + (axis * 2))
 
@@ -82,6 +85,9 @@
 
 #define BMG160_REG_INT_STATUS_2        0x0B
 #define BMG160_ANY_MOTION_MASK         0x07
+#define BMG160_ANY_MOTION_BIT_X                BIT(0)
+#define BMG160_ANY_MOTION_BIT_Y                BIT(1)
+#define BMG160_ANY_MOTION_BIT_Z                BIT(2)
 
 #define BMG160_REG_TEMP                0x08
 #define BMG160_TEMP_CENTER_VAL         23
@@ -222,6 +228,19 @@ static int bmg160_chip_init(struct bmg160_data *data)
        data->slope_thres = ret;
 
        /* Set default interrupt mode */
+       ret = i2c_smbus_read_byte_data(data->client, BMG160_REG_INT_EN_1);
+       if (ret < 0) {
+               dev_err(&data->client->dev, "Error reading reg_int_en_1\n");
+               return ret;
+       }
+       ret &= ~BMG160_INT1_BIT_OD;
+       ret = i2c_smbus_write_byte_data(data->client,
+                                       BMG160_REG_INT_EN_1, ret);
+       if (ret < 0) {
+               dev_err(&data->client->dev, "Error writing reg_int_en_1\n");
+               return ret;
+       }
+
        ret = i2c_smbus_write_byte_data(data->client,
                                        BMG160_REG_INT_RST_LATCH,
                                        BMG160_INT_MODE_LATCH_INT |
@@ -250,6 +269,9 @@ static int bmg160_set_power_state(struct bmg160_data *data, bool on)
        if (ret < 0) {
                dev_err(&data->client->dev,
                        "Failed: bmg160_set_power_state for %d\n", on);
+               if (on)
+                       pm_runtime_put_noidle(&data->client->dev);
+
                return ret;
        }
 #endif
@@ -705,6 +727,7 @@ static int bmg160_write_event_config(struct iio_dev *indio_dev,
 
        ret =  bmg160_setup_any_motion_interrupt(data, state);
        if (ret < 0) {
+               bmg160_set_power_state(data, false);
                mutex_unlock(&data->mutex);
                return ret;
        }
@@ -743,7 +766,7 @@ static const struct attribute_group bmg160_attrs_group = {
 
 static const struct iio_event_spec bmg160_event = {
                .type = IIO_EV_TYPE_ROC,
-               .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING,
+               .dir = IIO_EV_DIR_EITHER,
                .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
                                       BIT(IIO_EV_INFO_ENABLE)
 };
@@ -871,6 +894,7 @@ static int bmg160_data_rdy_trigger_set_state(struct iio_trigger *trig,
        else
                ret = bmg160_setup_new_data_interrupt(data, state);
        if (ret < 0) {
+               bmg160_set_power_state(data, false);
                mutex_unlock(&data->mutex);
                return ret;
        }
@@ -908,10 +932,24 @@ static irqreturn_t bmg160_event_handler(int irq, void *private)
        else
                dir = IIO_EV_DIR_FALLING;
 
-       if (ret & BMG160_ANY_MOTION_MASK)
+       if (ret & BMG160_ANY_MOTION_BIT_X)
                iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
                                                        0,
-                                                       IIO_MOD_X_OR_Y_OR_Z,
+                                                       IIO_MOD_X,
+                                                       IIO_EV_TYPE_ROC,
+                                                       dir),
+                                                       data->timestamp);
+       if (ret & BMG160_ANY_MOTION_BIT_Y)
+               iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
+                                                       0,
+                                                       IIO_MOD_Y,
+                                                       IIO_EV_TYPE_ROC,
+                                                       dir),
+                                                       data->timestamp);
+       if (ret & BMG160_ANY_MOTION_BIT_Z)
+               iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
+                                                       0,
+                                                       IIO_MOD_Z,
                                                        IIO_EV_TYPE_ROC,
                                                        dir),
                                                        data->timestamp);
@@ -1169,8 +1207,15 @@ static int bmg160_runtime_suspend(struct device *dev)
 {
        struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
        struct bmg160_data *data = iio_priv(indio_dev);
+       int ret;
+
+       ret = bmg160_set_mode(data, BMG160_MODE_SUSPEND);
+       if (ret < 0) {
+               dev_err(&data->client->dev, "set mode failed\n");
+               return -EAGAIN;
+       }
 
-       return bmg160_set_mode(data, BMG160_MODE_SUSPEND);
+       return 0;
 }
 
 static int bmg160_runtime_resume(struct device *dev)
index 2ed7905a068fc9033e8998e547bd7d750b1fedb9..fc55f0d15b70118a3a5be5fc221f151475f014e3 100644 (file)
@@ -1179,9 +1179,19 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
                }
 
                ep_irq_in = &intf->cur_altsetting->endpoint[1].desc;
-               usb_fill_bulk_urb(xpad->bulk_out, udev,
-                               usb_sndbulkpipe(udev, ep_irq_in->bEndpointAddress),
-                               xpad->bdata, XPAD_PKT_LEN, xpad_bulk_out, xpad);
+               if (usb_endpoint_is_bulk_out(ep_irq_in)) {
+                       usb_fill_bulk_urb(xpad->bulk_out, udev,
+                                         usb_sndbulkpipe(udev,
+                                                         ep_irq_in->bEndpointAddress),
+                                         xpad->bdata, XPAD_PKT_LEN,
+                                         xpad_bulk_out, xpad);
+               } else {
+                       usb_fill_int_urb(xpad->bulk_out, udev,
+                                        usb_sndintpipe(udev,
+                                                       ep_irq_in->bEndpointAddress),
+                                        xpad->bdata, XPAD_PKT_LEN,
+                                        xpad_bulk_out, xpad, 0);
+               }
 
                /*
                 * Submit the int URB immediately rather than waiting for open
index 3fcb6b3cb0bdaea5ba0f17dfd6a228c6cbd126ad..f2b97802640755aacfcde04005b125717cb63818 100644 (file)
@@ -428,14 +428,6 @@ static void elantech_report_trackpoint(struct psmouse *psmouse,
        int x, y;
        u32 t;
 
-       if (dev_WARN_ONCE(&psmouse->ps2dev.serio->dev,
-                         !tp_dev,
-                         psmouse_fmt("Unexpected trackpoint message\n"))) {
-               if (etd->debug == 1)
-                       elantech_packet_dump(psmouse);
-               return;
-       }
-
        t = get_unaligned_le32(&packet[0]);
 
        switch (t & ~7U) {
@@ -793,7 +785,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
        unsigned char packet_type = packet[3] & 0x03;
        bool sanity_check;
 
-       if ((packet[3] & 0x0f) == 0x06)
+       if (etd->tp_dev && (packet[3] & 0x0f) == 0x06)
                return PACKET_TRACKPOINT;
 
        /*
index 2a7a9174c702a44df3072a2f61c72a4ce16ecb05..f9472920d986368f7aa83eb7d0621489d774b050 100644 (file)
@@ -143,6 +143,10 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
                (const char * const []){"LEN2001", NULL},
                1024, 5022, 2508, 4832
        },
+       {
+               (const char * const []){"LEN2006", NULL},
+               1264, 5675, 1171, 4688
+       },
        { }
 };
 
index 6ae3cdee0681a8008218fbcf25762280b64e48ce..cc4f9d80122ea618e7543f4885843359194770a7 100644 (file)
@@ -217,8 +217,9 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
        }
 
        ret = irq_alloc_domain_generic_chips(domain, 32, 1, name,
-                                            handle_level_irq, 0, 0,
-                                            IRQCHIP_SKIP_SET_WAKE);
+                                            handle_fasteoi_irq,
+                                            IRQ_NOREQUEST | IRQ_NOPROBE |
+                                            IRQ_NOAUTOEN, 0, 0);
        if (ret)
                goto err_domain_remove;
 
@@ -230,7 +231,6 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
                gc->unused = 0;
                gc->wake_enabled = ~0;
                gc->chip_types[0].type = IRQ_TYPE_SENSE_MASK;
-               gc->chip_types[0].handler = handle_fasteoi_irq;
                gc->chip_types[0].chip.irq_eoi = irq_gc_eoi;
                gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
                gc->chip_types[0].chip.irq_shutdown = aic_common_shutdown;
index b9f4fb808e49a4afefa0bf66c707dbc01d7c3fa2..5fb38a2ac2261ca06c5bb338ae044a9ed61dc361 100644 (file)
@@ -101,9 +101,9 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn,
        int parent_irq;
 
        parent_irq = irq_of_parse_and_map(dn, irq);
-       if (parent_irq < 0) {
+       if (!parent_irq) {
                pr_err("failed to map interrupt %d\n", irq);
-               return parent_irq;
+               return -EINVAL;
        }
 
        data->irq_map_mask |= be32_to_cpup(map_mask + irq);
index c15c840987d2808e82cf1b056c231005933c5f8b..14691a4cb84cdf82fb38eefc0081a07460efae7b 100644 (file)
@@ -135,9 +135,9 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np,
        __raw_writel(0xffffffff, data->base + CPU_CLEAR);
 
        data->parent_irq = irq_of_parse_and_map(np, 0);
-       if (data->parent_irq < 0) {
+       if (!data->parent_irq) {
                pr_err("failed to find parent interrupt\n");
-               ret = data->parent_irq;
+               ret = -EINVAL;
                goto out_unmap;
        }
 
index dcbd8589f0c4fc29034da257fa907612dbd3f02e..84b35925ee4dfba74eecaefe85913ee471a74494 100644 (file)
@@ -203,7 +203,7 @@ mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
        if (!skb)
                goto done;
 
-       if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+       if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
                err = -EFAULT;
                goto done;
        }
index b6d64f546574aa223a0be4334c6cd07246aebdca..d6607ee9c85506bb4fcc5d2077b8dc4e582d4b4b 100644 (file)
@@ -148,6 +148,8 @@ config MACVTAP
 
 config IPVLAN
     tristate "IP-VLAN support"
+    depends on INET
+    depends on IPV6
     ---help---
       This allows one to create virtual devices off of a main interface
       and packets will be delivered based on the dest L3 (IPv6/IPv4 addr)
index b9625968daacc0eb89c0f7371a3a4e70242f95ce..4f4c2a7888e5d74ee06ae58df8feaf5f1dea3123 100644 (file)
@@ -377,6 +377,29 @@ static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
+{
+       unsigned int timeout = 1000;
+       u32 reg;
+
+       reg = core_readl(priv, CORE_WATCHDOG_CTRL);
+       reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
+       core_writel(priv, reg, CORE_WATCHDOG_CTRL);
+
+       do {
+               reg = core_readl(priv, CORE_WATCHDOG_CTRL);
+               if (!(reg & SOFTWARE_RESET))
+                       break;
+
+               usleep_range(1000, 2000);
+       } while (timeout-- > 0);
+
+       if (timeout == 0)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
 static int bcm_sf2_sw_setup(struct dsa_switch *ds)
 {
        const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
@@ -404,11 +427,18 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
                *base = of_iomap(dn, i);
                if (*base == NULL) {
                        pr_err("unable to find register: %s\n", reg_names[i]);
-                       return -ENODEV;
+                       ret = -ENOMEM;
+                       goto out_unmap;
                }
                base++;
        }
 
+       ret = bcm_sf2_sw_rst(priv);
+       if (ret) {
+               pr_err("unable to software reset switch: %d\n", ret);
+               goto out_unmap;
+       }
+
        /* Disable all interrupts and request them */
        intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
        intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
@@ -484,7 +514,8 @@ out_free_irq0:
 out_unmap:
        base = &priv->core;
        for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
-               iounmap(*base);
+               if (*base)
+                       iounmap(*base);
                base++;
        }
        return ret;
@@ -733,29 +764,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
        return 0;
 }
 
-static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
-{
-       unsigned int timeout = 1000;
-       u32 reg;
-
-       reg = core_readl(priv, CORE_WATCHDOG_CTRL);
-       reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
-       core_writel(priv, reg, CORE_WATCHDOG_CTRL);
-
-       do {
-               reg = core_readl(priv, CORE_WATCHDOG_CTRL);
-               if (!(reg & SOFTWARE_RESET))
-                       break;
-
-               usleep_range(1000, 2000);
-       } while (timeout-- > 0);
-
-       if (timeout == 0)
-               return -ETIMEDOUT;
-
-       return 0;
-}
-
 static int bcm_sf2_sw_resume(struct dsa_switch *ds)
 {
        struct bcm_sf2_priv *priv = ds_to_priv(ds);
index 1ed1fbba5d58140912258d80bf8279bb9a2686a1..df76050d0a9d26dc5a5b106a0ac44a18d84441b9 100644 (file)
@@ -155,6 +155,7 @@ source "drivers/net/ethernet/qualcomm/Kconfig"
 source "drivers/net/ethernet/realtek/Kconfig"
 source "drivers/net/ethernet/renesas/Kconfig"
 source "drivers/net/ethernet/rdc/Kconfig"
+source "drivers/net/ethernet/rocker/Kconfig"
 
 config S6GMAC
        tristate "S6105 GMAC ethernet support"
index 6e0b629e9859a85b13d82ce3b0d9fb6a7eece3e3..bf56f8b36e90cbb2fcef22627fadff0041fef897 100644 (file)
@@ -65,6 +65,7 @@ obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/
 obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
 obj-$(CONFIG_SH_ETH) += renesas/
 obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
+obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/
 obj-$(CONFIG_S6GMAC) += s6gmac.o
 obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
 obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
index c4bd025c74c96496a4709a4505da8a2d0f028df4..336ef3cf5773daa938cea79c92e6aa737f0a753b 100644 (file)
@@ -12537,7 +12537,7 @@ static int bnx2x_validate_addr(struct net_device *dev)
 }
 
 static int bnx2x_get_phys_port_id(struct net_device *netdev,
-                                 struct netdev_phys_port_id *ppid)
+                                 struct netdev_phys_item_id *ppid)
 {
        struct bnx2x *bp = netdev_priv(netdev);
 
index fcbf1255ae5a919ad5fb052d9bcbb7045534cc9e..f2fadb053d526057e7651bef013de0b48817978a 100644 (file)
@@ -714,6 +714,98 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
        }
 }
 
+static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
+       u32 reg;
+
+       if (enable && !priv->clk_eee_enabled) {
+               clk_prepare_enable(priv->clk_eee);
+               priv->clk_eee_enabled = true;
+       }
+
+       reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
+       if (enable)
+               reg |= EEE_EN;
+       else
+               reg &= ~EEE_EN;
+       bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
+
+       /* Enable EEE and switch to a 27Mhz clock automatically */
+       reg = __raw_readl(priv->base + off);
+       if (enable)
+               reg |= TBUF_EEE_EN | TBUF_PM_EN;
+       else
+               reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
+       __raw_writel(reg, priv->base + off);
+
+       /* Do the same for thing for RBUF */
+       reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
+       if (enable)
+               reg |= RBUF_EEE_EN | RBUF_PM_EN;
+       else
+               reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
+       bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
+
+       if (!enable && priv->clk_eee_enabled) {
+               clk_disable_unprepare(priv->clk_eee);
+               priv->clk_eee_enabled = false;
+       }
+
+       priv->eee.eee_enabled = enable;
+       priv->eee.eee_active = enable;
+}
+
+static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct ethtool_eee *p = &priv->eee;
+
+       if (GENET_IS_V1(priv))
+               return -EOPNOTSUPP;
+
+       e->eee_enabled = p->eee_enabled;
+       e->eee_active = p->eee_active;
+       e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
+
+       return phy_ethtool_get_eee(priv->phydev, e);
+}
+
+static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct ethtool_eee *p = &priv->eee;
+       int ret = 0;
+
+       if (GENET_IS_V1(priv))
+               return -EOPNOTSUPP;
+
+       p->eee_enabled = e->eee_enabled;
+
+       if (!p->eee_enabled) {
+               bcmgenet_eee_enable_set(dev, false);
+       } else {
+               ret = phy_init_eee(priv->phydev, 0);
+               if (ret) {
+                       netif_err(priv, hw, dev, "EEE initialization failed\n");
+                       return ret;
+               }
+
+               bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
+               bcmgenet_eee_enable_set(dev, true);
+       }
+
+       return phy_ethtool_set_eee(priv->phydev, e);
+}
+
+static int bcmgenet_nway_reset(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       return genphy_restart_aneg(priv->phydev);
+}
+
 /* standard ethtool support functions. */
 static struct ethtool_ops bcmgenet_ethtool_ops = {
        .get_strings            = bcmgenet_get_strings,
@@ -727,6 +819,9 @@ static struct ethtool_ops bcmgenet_ethtool_ops = {
        .set_msglevel           = bcmgenet_set_msglevel,
        .get_wol                = bcmgenet_get_wol,
        .set_wol                = bcmgenet_set_wol,
+       .get_eee                = bcmgenet_get_eee,
+       .set_eee                = bcmgenet_set_eee,
+       .nway_reset             = bcmgenet_nway_reset,
 };
 
 /* Power down the unimac, based on mode. */
@@ -2585,6 +2680,12 @@ static int bcmgenet_probe(struct platform_device *pdev)
        if (IS_ERR(priv->clk_wol))
                dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
 
+       priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
+       if (IS_ERR(priv->clk_eee)) {
+               dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
+               priv->clk_eee = NULL;
+       }
+
        err = reset_umac(priv);
        if (err)
                goto err_clk_disable;
@@ -2735,6 +2836,9 @@ static int bcmgenet_resume(struct device *d)
 
        phy_resume(priv->phydev);
 
+       if (priv->eee.eee_enabled)
+               bcmgenet_eee_enable_set(dev, true);
+
        bcmgenet_netif_start(dev);
 
        return 0;
index c4ca7282a6011c382d08dadc69524e5cf9912ef8..b36ddec0cc0a3c5d1c64f9c2818a4b44464c41f8 100644 (file)
@@ -185,6 +185,21 @@ struct bcmgenet_mib_counters {
 #define UMAC_MAC1                      0x010
 #define UMAC_MAX_FRAME_LEN             0x014
 
+#define UMAC_EEE_CTRL                  0x064
+#define  EN_LPI_RX_PAUSE               (1 << 0)
+#define  EN_LPI_TX_PFC                 (1 << 1)
+#define  EN_LPI_TX_PAUSE               (1 << 2)
+#define  EEE_EN                                (1 << 3)
+#define  RX_FIFO_CHECK                 (1 << 4)
+#define  EEE_TX_CLK_DIS                        (1 << 5)
+#define  DIS_EEE_10M                   (1 << 6)
+#define  LP_IDLE_PREDICTION_MODE       (1 << 7)
+
+#define UMAC_EEE_LPI_TIMER             0x068
+#define UMAC_EEE_WAKE_TIMER            0x06C
+#define UMAC_EEE_REF_COUNT             0x070
+#define  EEE_REFERENCE_COUNT_MASK      0xffff
+
 #define UMAC_TX_FLUSH                  0x334
 
 #define UMAC_MIB_START                 0x400
@@ -232,6 +247,10 @@ struct bcmgenet_mib_counters {
 #define  RBUF_RXCHK_EN                 (1 << 0)
 #define  RBUF_SKIP_FCS                 (1 << 4)
 
+#define RBUF_ENERGY_CTRL               0x9c
+#define  RBUF_EEE_EN                   (1 << 0)
+#define  RBUF_PM_EN                    (1 << 1)
+
 #define RBUF_TBUF_SIZE_CTRL            0xb4
 
 #define RBUF_HFB_CTRL_V1               0x38
@@ -247,6 +266,9 @@ struct bcmgenet_mib_counters {
 
 #define TBUF_CTRL                      0x00
 #define TBUF_BP_MC                     0x0C
+#define TBUF_ENERGY_CTRL               0x14
+#define  TBUF_EEE_EN                   (1 << 0)
+#define  TBUF_PM_EN                    (1 << 1)
 
 #define TBUF_CTRL_V1                   0x80
 #define TBUF_BP_MC_V1                  0xA0
@@ -551,6 +573,8 @@ struct bcmgenet_priv {
        struct device_node *phy_dn;
        struct mii_bus *mii_bus;
        u16 gphy_rev;
+       struct clk *clk_eee;
+       bool clk_eee_enabled;
 
        /* PHY device variables */
        int old_link;
@@ -587,6 +611,8 @@ struct bcmgenet_priv {
        u32 wolopts;
 
        struct bcmgenet_mib_counters mib;
+
+       struct ethtool_eee eee;
 };
 
 #define GENET_IO_MACRO(name, offset)                                   \
index 2dc001559a971845e36da7ed345018c671661eb4..43fd1b72c1eadac57fbee0fd91855aa558b6b415 100644 (file)
@@ -8563,7 +8563,8 @@ static int tg3_init_rings(struct tg3 *tp)
                if (tnapi->rx_rcb)
                        memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
 
-               if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
+               if (tnapi->prodring.rx_std &&
+                   tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
                        tg3_free_rings(tp);
                        return -ENOMEM;
                }
index a576da1eedf4f62fd2173e47af6cae0c55170409..3aea82bb9039df40f67e754020015ab591944799 100644 (file)
@@ -210,114 +210,25 @@ struct filter_entry {
                         NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
                         NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
 
-#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
-
-static const struct pci_device_id cxgb4_pci_tbl[] = {
-       CH_DEVICE(0xa000, 0),  /* PE10K */
-       CH_DEVICE(0x4001, -1),
-       CH_DEVICE(0x4002, -1),
-       CH_DEVICE(0x4003, -1),
-       CH_DEVICE(0x4004, -1),
-       CH_DEVICE(0x4005, -1),
-       CH_DEVICE(0x4006, -1),
-       CH_DEVICE(0x4007, -1),
-       CH_DEVICE(0x4008, -1),
-       CH_DEVICE(0x4009, -1),
-       CH_DEVICE(0x400a, -1),
-       CH_DEVICE(0x400d, -1),
-       CH_DEVICE(0x400e, -1),
-       CH_DEVICE(0x4080, -1),
-       CH_DEVICE(0x4081, -1),
-       CH_DEVICE(0x4082, -1),
-       CH_DEVICE(0x4083, -1),
-       CH_DEVICE(0x4084, -1),
-       CH_DEVICE(0x4085, -1),
-       CH_DEVICE(0x4086, -1),
-       CH_DEVICE(0x4087, -1),
-       CH_DEVICE(0x4088, -1),
-       CH_DEVICE(0x4401, 4),
-       CH_DEVICE(0x4402, 4),
-       CH_DEVICE(0x4403, 4),
-       CH_DEVICE(0x4404, 4),
-       CH_DEVICE(0x4405, 4),
-       CH_DEVICE(0x4406, 4),
-       CH_DEVICE(0x4407, 4),
-       CH_DEVICE(0x4408, 4),
-       CH_DEVICE(0x4409, 4),
-       CH_DEVICE(0x440a, 4),
-       CH_DEVICE(0x440d, 4),
-       CH_DEVICE(0x440e, 4),
-       CH_DEVICE(0x4480, 4),
-       CH_DEVICE(0x4481, 4),
-       CH_DEVICE(0x4482, 4),
-       CH_DEVICE(0x4483, 4),
-       CH_DEVICE(0x4484, 4),
-       CH_DEVICE(0x4485, 4),
-       CH_DEVICE(0x4486, 4),
-       CH_DEVICE(0x4487, 4),
-       CH_DEVICE(0x4488, 4),
-       CH_DEVICE(0x5001, 4),
-       CH_DEVICE(0x5002, 4),
-       CH_DEVICE(0x5003, 4),
-       CH_DEVICE(0x5004, 4),
-       CH_DEVICE(0x5005, 4),
-       CH_DEVICE(0x5006, 4),
-       CH_DEVICE(0x5007, 4),
-       CH_DEVICE(0x5008, 4),
-       CH_DEVICE(0x5009, 4),
-       CH_DEVICE(0x500A, 4),
-       CH_DEVICE(0x500B, 4),
-       CH_DEVICE(0x500C, 4),
-       CH_DEVICE(0x500D, 4),
-       CH_DEVICE(0x500E, 4),
-       CH_DEVICE(0x500F, 4),
-       CH_DEVICE(0x5010, 4),
-       CH_DEVICE(0x5011, 4),
-       CH_DEVICE(0x5012, 4),
-       CH_DEVICE(0x5013, 4),
-       CH_DEVICE(0x5014, 4),
-       CH_DEVICE(0x5015, 4),
-       CH_DEVICE(0x5080, 4),
-       CH_DEVICE(0x5081, 4),
-       CH_DEVICE(0x5082, 4),
-       CH_DEVICE(0x5083, 4),
-       CH_DEVICE(0x5084, 4),
-       CH_DEVICE(0x5085, 4),
-       CH_DEVICE(0x5086, 4),
-       CH_DEVICE(0x5087, 4),
-       CH_DEVICE(0x5088, 4),
-       CH_DEVICE(0x5401, 4),
-       CH_DEVICE(0x5402, 4),
-       CH_DEVICE(0x5403, 4),
-       CH_DEVICE(0x5404, 4),
-       CH_DEVICE(0x5405, 4),
-       CH_DEVICE(0x5406, 4),
-       CH_DEVICE(0x5407, 4),
-       CH_DEVICE(0x5408, 4),
-       CH_DEVICE(0x5409, 4),
-       CH_DEVICE(0x540A, 4),
-       CH_DEVICE(0x540B, 4),
-       CH_DEVICE(0x540C, 4),
-       CH_DEVICE(0x540D, 4),
-       CH_DEVICE(0x540E, 4),
-       CH_DEVICE(0x540F, 4),
-       CH_DEVICE(0x5410, 4),
-       CH_DEVICE(0x5411, 4),
-       CH_DEVICE(0x5412, 4),
-       CH_DEVICE(0x5413, 4),
-       CH_DEVICE(0x5414, 4),
-       CH_DEVICE(0x5415, 4),
-       CH_DEVICE(0x5480, 4),
-       CH_DEVICE(0x5481, 4),
-       CH_DEVICE(0x5482, 4),
-       CH_DEVICE(0x5483, 4),
-       CH_DEVICE(0x5484, 4),
-       CH_DEVICE(0x5485, 4),
-       CH_DEVICE(0x5486, 4),
-       CH_DEVICE(0x5487, 4),
-       CH_DEVICE(0x5488, 4),
-       { 0, }
-};
+/* Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+       static struct pci_device_id cxgb4_pci_tbl[] = {
+#define CH_PCI_DEVICE_ID_FUNCTION 0x4
+
+/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
+ * called for both.
+ */
+#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+               {PCI_VDEVICE(CHELSIO, (devid)), 4}
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
+               { 0, } \
+       }
+
+#include "t4_pci_id_tbl.h"
 
 #define FW4_FNAME "cxgb4/t4fw.bin"
 #define FW5_FNAME "cxgb4/t5fw.bin"
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
new file mode 100644 (file)
index 0000000..9e4f95a
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+ * This file is part of the Chelsio T4/T5 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __T4_PCI_ID_TBL_H__
+#define __T4_PCI_ID_TBL_H__
+
+/* The code can defined cpp macros for creating a PCI Device ID Table. This is
+ * useful because it allows the PCI ID Table to be maintained in a single place.
+ *
+ * The macros are:
+ *
+ * CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+ *   -- Used to start the definition of the PCI ID Table.
+ *
+ * CH_PCI_DEVICE_ID_FUNCTION
+ *   -- The PCI Function Number to use in the PCI Device ID Table.  "0"
+ *   -- for drivers attaching to PF0-3, "4" for drivers attaching to PF4,
+ *   -- "8" for drivers attaching to SR-IOV Virtual Functions, etc.
+ *
+ * CH_PCI_DEVICE_ID_FUNCTION2 [optional]
+ *   -- If defined, create a PCI Device ID Table with both
+ *   -- CH_PCI_DEVICE_ID_FUNCTION and CH_PCI_DEVICE_ID_FUNCTION2 populated.
+ *
+ * CH_PCI_ID_TABLE_ENTRY(DeviceID)
+ *   -- Used for the individual PCI Device ID entries.  Note that we will
+ *   -- be adding a trailing comma (",") after all of the entries (and
+ *   -- between the pairs of entries if CH_PCI_DEVICE_ID_FUNCTION2 is defined).
+ *
+ * CH_PCI_DEVICE_ID_TABLE_DEFINE_END
+ *   -- Used to finish the definition of the PCI ID Table.  Note that we
+ *   -- will be adding a trailing semi-colon (";") here.
+ */
+#ifdef CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+
+#ifndef CH_PCI_DEVICE_ID_FUNCTION
+#error CH_PCI_DEVICE_ID_FUNCTION not defined!
+#endif
+#ifndef CH_PCI_ID_TABLE_ENTRY
+#error CH_PCI_ID_TABLE_ENTRY not defined!
+#endif
+#ifndef CH_PCI_DEVICE_ID_TABLE_DEFINE_END
+#error CH_PCI_DEVICE_ID_TABLE_DEFINE_END not defined!
+#endif
+
+/* T4 and later ASICs use a PCI Device ID scheme of 0xVFPP where:
+ *
+ *   V  = "4" for T4; "5" for T5, etc.
+ *   F  = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs
+ *   PP = adapter product designation
+ *
+ * We use this consistency in order to create the proper PCI Device IDs
+ * for the specified CH_PCI_DEVICE_ID_FUNCTION.
+ */
+#ifndef CH_PCI_DEVICE_ID_FUNCTION2
+#define CH_PCI_ID_TABLE_FENTRY(devid) \
+       CH_PCI_ID_TABLE_ENTRY((devid) | \
+                             ((CH_PCI_DEVICE_ID_FUNCTION) << 8))
+#else
+#define CH_PCI_ID_TABLE_FENTRY(devid) \
+       CH_PCI_ID_TABLE_ENTRY((devid) | \
+                             ((CH_PCI_DEVICE_ID_FUNCTION) << 8)), \
+       CH_PCI_ID_TABLE_ENTRY((devid) | \
+                             ((CH_PCI_DEVICE_ID_FUNCTION2) << 8))
+#endif
+
+CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+       /* T4 adapters:
+        */
+       CH_PCI_ID_TABLE_FENTRY(0x4000), /* T440-dbg */
+       CH_PCI_ID_TABLE_FENTRY(0x4001), /* T420-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x4002), /* T422-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x4003), /* T440-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x4004), /* T420-bch */
+       CH_PCI_ID_TABLE_FENTRY(0x4005), /* T440-bch */
+       CH_PCI_ID_TABLE_FENTRY(0x4006), /* T440-ch */
+       CH_PCI_ID_TABLE_FENTRY(0x4007), /* T420-so */
+       CH_PCI_ID_TABLE_FENTRY(0x4008), /* T420-cx */
+       CH_PCI_ID_TABLE_FENTRY(0x4009), /* T420-bt */
+       CH_PCI_ID_TABLE_FENTRY(0x400a), /* T404-bt */
+       CH_PCI_ID_TABLE_FENTRY(0x400b), /* B420-sr */
+       CH_PCI_ID_TABLE_FENTRY(0x400c), /* B404-bt */
+       CH_PCI_ID_TABLE_FENTRY(0x400d), /* T480-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x400e), /* T440-LP-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x4080), /* Custom T480-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x4081), /* Custom T440-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x4082), /* Custom T420-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x4083), /* Custom T420-xaui */
+       CH_PCI_ID_TABLE_FENTRY(0x4084), /* Custom T440-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x4085), /* Custom T420-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x4086), /* Custom T440-bt */
+       CH_PCI_ID_TABLE_FENTRY(0x4087), /* Custom T440-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x4088), /* Custom T440 2-xaui, 2-xfi */
+
+       /* T5 adapters:
+        */
+       CH_PCI_ID_TABLE_FENTRY(0x5000), /* T580-dbg */
+       CH_PCI_ID_TABLE_FENTRY(0x5001), /* T520-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x5002), /* T522-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x5003), /* T540-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x5004), /* T520-bch */
+       CH_PCI_ID_TABLE_FENTRY(0x5005), /* T540-bch */
+       CH_PCI_ID_TABLE_FENTRY(0x5006), /* T540-ch */
+       CH_PCI_ID_TABLE_FENTRY(0x5007), /* T520-so */
+       CH_PCI_ID_TABLE_FENTRY(0x5008), /* T520-cx */
+       CH_PCI_ID_TABLE_FENTRY(0x5009), /* T520-bt */
+       CH_PCI_ID_TABLE_FENTRY(0x500a), /* T504-bt */
+       CH_PCI_ID_TABLE_FENTRY(0x500b), /* B520-sr */
+       CH_PCI_ID_TABLE_FENTRY(0x500c), /* B504-bt */
+       CH_PCI_ID_TABLE_FENTRY(0x500d), /* T580-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x500e), /* T540-LP-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x5010), /* T580-LP-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x5011), /* T520-LL-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x5012), /* T560-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */
+       CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */
+       CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
+       CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x5083), /* Custom T540-LP-CR */
+       CH_PCI_ID_TABLE_FENTRY(0x5084), /* Custom T580-cr */
+       CH_PCI_ID_TABLE_FENTRY(0x5085), /* Custom 3x T580-CR */
+       CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */
+       CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */
+       CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */
+CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
+
+#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
+
+#endif /* __T4_PCI_ID_TBL_H__ */
index 099f7ce056f290fdcc158b14ed91eef33fc86e56..ad88246a428e6146ed0acb099113cc676666f09a 100644 (file)
@@ -2908,67 +2908,18 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
        pci_set_drvdata(pdev, NULL);
 }
 
-/*
- * PCI Device registration data structures.
- */
-#define CH_DEVICE(devid) \
-       { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }
-
-static const struct pci_device_id cxgb4vf_pci_tbl[] = {
-       CH_DEVICE(0xb000),      /* PE10K FPGA */
-       CH_DEVICE(0x4801),      /* T420-cr */
-       CH_DEVICE(0x4802),      /* T422-cr */
-       CH_DEVICE(0x4803),      /* T440-cr */
-       CH_DEVICE(0x4804),      /* T420-bch */
-       CH_DEVICE(0x4805),      /* T440-bch */
-       CH_DEVICE(0x4806),      /* T460-ch */
-       CH_DEVICE(0x4807),      /* T420-so */
-       CH_DEVICE(0x4808),      /* T420-cx */
-       CH_DEVICE(0x4809),      /* T420-bt */
-       CH_DEVICE(0x480a),      /* T404-bt */
-       CH_DEVICE(0x480d),      /* T480-cr */
-       CH_DEVICE(0x480e),      /* T440-lp-cr */
-       CH_DEVICE(0x4880),
-       CH_DEVICE(0x4881),
-       CH_DEVICE(0x4882),
-       CH_DEVICE(0x4883),
-       CH_DEVICE(0x4884),
-       CH_DEVICE(0x4885),
-       CH_DEVICE(0x4886),
-       CH_DEVICE(0x4887),
-       CH_DEVICE(0x4888),
-       CH_DEVICE(0x5801),      /* T520-cr */
-       CH_DEVICE(0x5802),      /* T522-cr */
-       CH_DEVICE(0x5803),      /* T540-cr */
-       CH_DEVICE(0x5804),      /* T520-bch */
-       CH_DEVICE(0x5805),      /* T540-bch */
-       CH_DEVICE(0x5806),      /* T540-ch */
-       CH_DEVICE(0x5807),      /* T520-so */
-       CH_DEVICE(0x5808),      /* T520-cx */
-       CH_DEVICE(0x5809),      /* T520-bt */
-       CH_DEVICE(0x580a),      /* T504-bt */
-       CH_DEVICE(0x580b),      /* T520-sr */
-       CH_DEVICE(0x580c),      /* T504-bt */
-       CH_DEVICE(0x580d),      /* T580-cr */
-       CH_DEVICE(0x580e),      /* T540-lp-cr */
-       CH_DEVICE(0x580f),      /* Amsterdam */
-       CH_DEVICE(0x5810),      /* T580-lp-cr */
-       CH_DEVICE(0x5811),      /* T520-lp-cr */
-       CH_DEVICE(0x5812),      /* T560-cr */
-       CH_DEVICE(0x5813),      /* T580-cr */
-       CH_DEVICE(0x5814),      /* T580-so-cr */
-       CH_DEVICE(0x5815),      /* T502-bt */
-       CH_DEVICE(0x5880),
-       CH_DEVICE(0x5881),
-       CH_DEVICE(0x5882),
-       CH_DEVICE(0x5883),
-       CH_DEVICE(0x5884),
-       CH_DEVICE(0x5885),
-       CH_DEVICE(0x5886),
-       CH_DEVICE(0x5887),
-       CH_DEVICE(0x5888),
-       { 0, }
-};
+/* Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+       static struct pci_device_id cxgb4vf_pci_tbl[] = {
+#define CH_PCI_DEVICE_ID_FUNCTION      0x8
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+               { PCI_VDEVICE(CHELSIO, (devid)), 0 }
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
+
+#include "../cxgb4/t4_pci_id_tbl.h"
 
 MODULE_DESCRIPTION(DRV_DESC);
 MODULE_AUTHOR("Chelsio Communications");
index 9070b98e15c4d34a7715f8a9b1752463a352466c..e0ab7673afe7a912a236c091f12fdc54b8fd160d 100644 (file)
@@ -4311,11 +4311,16 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
                return -EOPNOTSUPP;
 
        br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+       if (!br_spec)
+               return -EINVAL;
 
        nla_for_each_nested(attr, br_spec, rem) {
                if (nla_type(attr) != IFLA_BRIDGE_MODE)
                        continue;
 
+               if (nla_len(attr) < sizeof(mode))
+                       return -EINVAL;
+
                mode = nla_get_u16(attr);
                if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
                        return -EINVAL;
@@ -4362,7 +4367,8 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
 
        return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
                                       hsw_mode == PORT_FWD_TYPE_VEPA ?
-                                      BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
+                                      BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
+                                      0, 0);
 }
 
 #ifdef CONFIG_BE2NET_VXLAN
index 7262077ad547f8f504a526f171fc2c001583cd79..9ae4270db0b332e61dd8d36de3fd7e2797d4a61e 100644 (file)
@@ -7511,7 +7511,7 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
 
 #endif
 static int i40e_get_phys_port_id(struct net_device *netdev,
-                                struct netdev_phys_port_id *ppid)
+                                struct netdev_phys_item_id *ppid)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_pf *pf = np->vsi->back;
@@ -7536,7 +7536,7 @@ static int i40e_get_phys_port_id(struct net_device *netdev,
  */
 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                            struct net_device *dev,
-                           const unsigned char *addr,
+                           const unsigned char *addr, u16 vid,
                            u16 flags)
 {
        struct i40e_netdev_priv *np = netdev_priv(dev);
index b0e12e7c4a3d2e8df473caa218518af19c8c1fbc..3c0221620c9dc8abae3bf4edb06102475bd27df9 100644 (file)
@@ -1012,7 +1012,8 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
        /* igb_get_stats64() might access the rings on this vector,
         * we must wait a grace period before freeing it.
         */
-       kfree_rcu(q_vector, rcu);
+       if (q_vector)
+               kfree_rcu(q_vector, rcu);
 }
 
 /**
@@ -1792,8 +1793,10 @@ void igb_down(struct igb_adapter *adapter)
        adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
 
        for (i = 0; i < adapter->num_q_vectors; i++) {
-               napi_synchronize(&(adapter->q_vector[i]->napi));
-               napi_disable(&(adapter->q_vector[i]->napi));
+               if (adapter->q_vector[i]) {
+                       napi_synchronize(&adapter->q_vector[i]->napi);
+                       napi_disable(&adapter->q_vector[i]->napi);
+               }
        }
 
 
@@ -3714,7 +3717,8 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_tx_queues; i++)
-               igb_free_tx_resources(adapter->tx_ring[i]);
+               if (adapter->tx_ring[i])
+                       igb_free_tx_resources(adapter->tx_ring[i]);
 }
 
 void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
@@ -3779,7 +3783,8 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_tx_queues; i++)
-               igb_clean_tx_ring(adapter->tx_ring[i]);
+               if (adapter->tx_ring[i])
+                       igb_clean_tx_ring(adapter->tx_ring[i]);
 }
 
 /**
@@ -3816,7 +3821,8 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
-               igb_free_rx_resources(adapter->rx_ring[i]);
+               if (adapter->rx_ring[i])
+                       igb_free_rx_resources(adapter->rx_ring[i]);
 }
 
 /**
@@ -3871,7 +3877,8 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
-               igb_clean_rx_ring(adapter->rx_ring[i]);
+               if (adapter->rx_ring[i])
+                       igb_clean_rx_ring(adapter->rx_ring[i]);
 }
 
 /**
@@ -7401,6 +7408,8 @@ static int igb_resume(struct device *dev)
        pci_restore_state(pdev);
        pci_save_state(pdev);
 
+       if (!pci_device_is_present(pdev))
+               return -ENODEV;
        err = pci_enable_device_mem(pdev);
        if (err) {
                dev_err(&pdev->dev,
index be2989e600099266846fe8bf78487f9b3de63309..35e6fa643c7ebc8bfc9905d01f95313fb7a2d2e1 100644 (file)
@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
 
 ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
               ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
-              ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o
+              ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o
 
 ixgbe-$(CONFIG_IXGBE_DCB) +=  ixgbe_dcb.o ixgbe_dcb_82598.o \
                               ixgbe_dcb_82599.o ixgbe_dcb_nl.o
index 5032a602d5c98265d63037d6aac1e1bfc3188c76..b6137be43920ddd47f27b2684250105a4b575e28 100644 (file)
@@ -300,16 +300,17 @@ enum ixgbe_ring_f_enum {
        RING_F_ARRAY_SIZE      /* must be last in enum set */
 };
 
-#define IXGBE_MAX_RSS_INDICES  16
-#define IXGBE_MAX_VMDQ_INDICES 64
-#define IXGBE_MAX_FDIR_INDICES 63      /* based on q_vector limit */
-#define IXGBE_MAX_FCOE_INDICES  8
-#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
-#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
-#define IXGBE_MAX_L2A_QUEUES 4
-#define IXGBE_BAD_L2A_QUEUE 3
-#define IXGBE_MAX_MACVLANS     31
-#define IXGBE_MAX_DCBMACVLANS  8
+#define IXGBE_MAX_RSS_INDICES          16
+#define IXGBE_MAX_RSS_INDICES_X550     64
+#define IXGBE_MAX_VMDQ_INDICES         64
+#define IXGBE_MAX_FDIR_INDICES         63      /* based on q_vector limit */
+#define IXGBE_MAX_FCOE_INDICES         8
+#define MAX_RX_QUEUES                  (IXGBE_MAX_FDIR_INDICES + 1)
+#define MAX_TX_QUEUES                  (IXGBE_MAX_FDIR_INDICES + 1)
+#define IXGBE_MAX_L2A_QUEUES           4
+#define IXGBE_BAD_L2A_QUEUE            3
+#define IXGBE_MAX_MACVLANS             31
+#define IXGBE_MAX_DCBMACVLANS          8
 
 struct ixgbe_ring_feature {
        u16 limit;      /* upper limit on feature indices */
@@ -553,11 +554,6 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
        return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
 }
 
-static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
-{
-       writel(value, ring->tail);
-}
-
 #define IXGBE_RX_DESC(R, i)        \
        (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
 #define IXGBE_TX_DESC(R, i)        \
@@ -769,6 +765,21 @@ struct ixgbe_adapter {
        unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
 };
 
+static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
+{
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               return IXGBE_MAX_RSS_INDICES;
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+               return IXGBE_MAX_RSS_INDICES_X550;
+       default:
+               return 0;
+       }
+}
+
 struct ixgbe_fdir_filter {
        struct hlist_node fdir_node;
        union ixgbe_atr_input filter;
@@ -804,11 +815,15 @@ enum ixgbe_boards {
        board_82598,
        board_82599,
        board_X540,
+       board_X550,
+       board_X550EM_x,
 };
 
 extern struct ixgbe_info ixgbe_82598_info;
 extern struct ixgbe_info ixgbe_82599_info;
 extern struct ixgbe_info ixgbe_X540_info;
+extern struct ixgbe_info ixgbe_X550_info;
+extern struct ixgbe_info ixgbe_X550EM_x_info;
 #ifdef CONFIG_IXGBE_DCB
 extern const struct dcbnl_rtnl_ops dcbnl_ops;
 #endif
index 0e754b4c4220c1cc6bebe4ba9c7e49dc5b42cd92..9c66babd4edd6116139575fe4fab3cf37d8470e9 100644 (file)
@@ -1625,7 +1625,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
  *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
  *  @hw: pointer to hardware structure
  **/
-u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
 {
        u16 i;
        u16 j;
@@ -1636,7 +1636,7 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
 
        /* Include 0x0-0x3F in the checksum */
        for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
-               if (hw->eeprom.ops.read(hw, i, &word) != 0) {
+               if (hw->eeprom.ops.read(hw, i, &word)) {
                        hw_dbg(hw, "EEPROM read failed\n");
                        break;
                }
@@ -1645,24 +1645,35 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
 
        /* Include all data from pointers except for the fw pointer */
        for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
-               hw->eeprom.ops.read(hw, i, &pointer);
+               if (hw->eeprom.ops.read(hw, i, &pointer)) {
+                       hw_dbg(hw, "EEPROM read failed\n");
+                       return IXGBE_ERR_EEPROM;
+               }
+
+               /* If the pointer seems invalid */
+               if (pointer == 0xFFFF || pointer == 0)
+                       continue;
 
-               /* Make sure the pointer seems valid */
-               if (pointer != 0xFFFF && pointer != 0) {
-                       hw->eeprom.ops.read(hw, pointer, &length);
+               if (hw->eeprom.ops.read(hw, pointer, &length)) {
+                       hw_dbg(hw, "EEPROM read failed\n");
+                       return IXGBE_ERR_EEPROM;
+               }
+
+               if (length == 0xFFFF || length == 0)
+                       continue;
 
-                       if (length != 0xFFFF && length != 0) {
-                               for (j = pointer+1; j <= pointer+length; j++) {
-                                       hw->eeprom.ops.read(hw, j, &word);
-                                       checksum += word;
-                               }
+               for (j = pointer + 1; j <= pointer + length; j++) {
+                       if (hw->eeprom.ops.read(hw, j, &word)) {
+                               hw_dbg(hw, "EEPROM read failed\n");
+                               return IXGBE_ERR_EEPROM;
                        }
+                       checksum += word;
                }
        }
 
        checksum = (u16)IXGBE_EEPROM_SUM - checksum;
 
-       return checksum;
+       return (s32)checksum;
 }
 
 /**
@@ -1686,26 +1697,33 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
         * EEPROM read fails
         */
        status = hw->eeprom.ops.read(hw, 0, &checksum);
+       if (status) {
+               hw_dbg(hw, "EEPROM read failed\n");
+               return status;
+       }
 
-       if (status == 0) {
-               checksum = hw->eeprom.ops.calc_checksum(hw);
-
-               hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
+       status = hw->eeprom.ops.calc_checksum(hw);
+       if (status < 0)
+               return status;
 
-               /*
-                * Verify read checksum from EEPROM is the same as
-                * calculated checksum
-                */
-               if (read_checksum != checksum)
-                       status = IXGBE_ERR_EEPROM_CHECKSUM;
+       checksum = (u16)(status & 0xffff);
 
-               /* If the user cares, return the calculated checksum */
-               if (checksum_val)
-                       *checksum_val = checksum;
-       } else {
+       status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
+       if (status) {
                hw_dbg(hw, "EEPROM read failed\n");
+               return status;
        }
 
+       /* Verify read checksum from EEPROM is the same as
+        * calculated checksum
+        */
+       if (read_checksum != checksum)
+               status = IXGBE_ERR_EEPROM_CHECKSUM;
+
+       /* If the user cares, return the calculated checksum */
+       if (checksum_val)
+               *checksum_val = checksum;
+
        return status;
 }
 
@@ -1724,15 +1742,19 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
         * EEPROM read fails
         */
        status = hw->eeprom.ops.read(hw, 0, &checksum);
-
-       if (status == 0) {
-               checksum = hw->eeprom.ops.calc_checksum(hw);
-               status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
-                                             checksum);
-       } else {
+       if (status) {
                hw_dbg(hw, "EEPROM read failed\n");
+               return status;
        }
 
+       status = hw->eeprom.ops.calc_checksum(hw);
+       if (status < 0)
+               return status;
+
+       checksum = (u16)(status & 0xffff);
+
+       status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
+
        return status;
 }
 
@@ -2469,7 +2491,7 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
  *  Acquires the SWFW semaphore through the GSSR register for the specified
  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
  **/
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
 {
        u32 gssr = 0;
        u32 swmask = mask;
@@ -2514,7 +2536,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
  *  Releases the SWFW semaphore through the GSSR register for the specified
  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
  **/
-void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
 {
        u32 gssr;
        u32 swmask = mask;
@@ -3446,23 +3468,34 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
  *  @buffer: contains the command to write and where the return status will
  *           be placed
  *  @length: length of buffer, must be multiple of 4 bytes
+ *  @timeout: time in ms to wait for command completion
+ *  @return_data: read and return data from the buffer (true) or not (false)
+ *  Needed because FW structures are big endian and decoding of
+ *  these fields can be 8 bit or 16 bit based on command. Decoding
+ *  is not easily understood without making a table of commands.
+ *  So we will leave this up to the caller to read back the data
+ *  in these cases.
  *
  *  Communicates with the manageability block.  On success return 0
  *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
  **/
-static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
-                                       u32 length)
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+                                u32 length, u32 timeout,
+                                bool return_data)
 {
-       u32 hicr, i, bi;
+       u32 hicr, i, bi, fwsts;
        u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
-       u8 buf_len, dword_len;
+       u16 buf_len, dword_len;
 
-       if (length == 0 || length & 0x3 ||
-           length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
-               hw_dbg(hw, "Buffer length failure.\n");
+       if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+               hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
                return IXGBE_ERR_HOST_INTERFACE_COMMAND;
        }
 
+       /* Set bit 9 of FWSTS clearing FW reset indication */
+       fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
+       IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
+
        /* Check that the host interface is enabled. */
        hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
        if ((hicr & IXGBE_HICR_EN) == 0) {
@@ -3470,7 +3503,12 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
                return IXGBE_ERR_HOST_INTERFACE_COMMAND;
        }
 
-       /* Calculate length in DWORDs */
+       /* Calculate length in DWORDs. We must be DWORD aligned */
+       if ((length % (sizeof(u32))) != 0) {
+               hw_dbg(hw, "Buffer length failure, not aligned to dword");
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
        dword_len = length >> 2;
 
        /*
@@ -3484,7 +3522,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
        /* Setting this bit tells the ARC that a new command is pending. */
        IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
 
-       for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
+       for (i = 0; i < timeout; i++) {
                hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
                if (!(hicr & IXGBE_HICR_C))
                        break;
@@ -3492,12 +3530,15 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
        }
 
        /* Check command successful completion. */
-       if (i == IXGBE_HI_COMMAND_TIMEOUT ||
+       if ((timeout != 0 && i == timeout) ||
            (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
                hw_dbg(hw, "Command has failed with no status valid.\n");
                return IXGBE_ERR_HOST_INTERFACE_COMMAND;
        }
 
+       if (!return_data)
+               return 0;
+
        /* Calculate length in DWORDs */
        dword_len = hdr_size >> 2;
 
@@ -3568,7 +3609,9 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
 
        for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
                ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
-                                                      sizeof(fw_cmd));
+                                                      sizeof(fw_cmd),
+                                                      IXGBE_HI_COMMAND_TIMEOUT,
+                                                      true);
                if (ret_val != 0)
                        continue;
 
index 2ae5d4b8fc93e318bba749c6042affb8eb1616a7..8cfadcb2676ed3a30386927b359e70408e692d9a 100644 (file)
@@ -64,7 +64,7 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
                                       u16 *data);
 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
                                              u16 words, u16 *data);
-u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
                                           u16 *checksum_val);
 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
@@ -84,8 +84,8 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
 void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
 
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
-void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask);
 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
@@ -110,6 +110,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
                                 u8 build, u8 ver);
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+                                u32 length, u32 timeout, bool return_data);
 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
 bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
 
index 26fd85e2bca5b06b69947dd00dedbd9b328829ea..e5be0dd508deab592d5219d031ccaff8f2662e9b 100644 (file)
@@ -2927,7 +2927,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
                max_combined = IXGBE_MAX_FDIR_INDICES;
        } else {
                /* support up to 16 queues with RSS */
-               max_combined = IXGBE_MAX_RSS_INDICES;
+               max_combined = ixgbe_max_rss_indices(adapter);
        }
 
        return max_combined;
@@ -2975,6 +2975,7 @@ static int ixgbe_set_channels(struct net_device *dev,
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        unsigned int count = ch->combined_count;
+       u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
 
        /* verify they are not requesting separate vectors */
        if (!count || ch->rx_count || ch->tx_count)
@@ -2991,9 +2992,9 @@ static int ixgbe_set_channels(struct net_device *dev,
        /* update feature limits from largest to smallest supported values */
        adapter->ring_feature[RING_F_FDIR].limit = count;
 
-       /* cap RSS limit at 16 */
-       if (count > IXGBE_MAX_RSS_INDICES)
-               count = IXGBE_MAX_RSS_INDICES;
+       /* cap RSS limit */
+       if (count > max_rss_indices)
+               count = max_rss_indices;
        adapter->ring_feature[RING_F_RSS].limit = count;
 
 #ifdef IXGBE_FCOE
index 932f77961d664dad24d8e6acaf54026be81b00d9..82d418729dd48c182162d37de78a3b5899183cb8 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/slab.h>
 #include <net/checksum.h>
 #include <net/ip6_checksum.h>
+#include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/if.h>
 #include <linux/if_vlan.h>
 #include <linux/prefetch.h>
 #include <scsi/fc/fc_fcoe.h>
 
+#ifdef CONFIG_OF
+#include <linux/of_net.h>
+#endif
+
+#ifdef CONFIG_SPARC
+#include <asm/idprom.h>
+#include <asm/prom.h>
+#endif
+
 #include "ixgbe.h"
 #include "ixgbe_common.h"
 #include "ixgbe_dcb_82599.h"
@@ -65,15 +75,17 @@ char ixgbe_default_device_descr[] =
 static char ixgbe_default_device_descr[] =
                              "Intel(R) 10 Gigabit Network Connection";
 #endif
-#define DRV_VERSION "3.19.1-k"
+#define DRV_VERSION "4.0.1-k"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
                                "Copyright (c) 1999-2014 Intel Corporation.";
 
 static const struct ixgbe_info *ixgbe_info_tbl[] = {
-       [board_82598] = &ixgbe_82598_info,
-       [board_82599] = &ixgbe_82599_info,
-       [board_X540] = &ixgbe_X540_info,
+       [board_82598]           = &ixgbe_82598_info,
+       [board_82599]           = &ixgbe_82599_info,
+       [board_X540]            = &ixgbe_X540_info,
+       [board_X550]            = &ixgbe_X550_info,
+       [board_X550EM_x]        = &ixgbe_X550EM_x_info,
 };
 
 /* ixgbe_pci_tbl - PCI Device ID Table
@@ -115,6 +127,9 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
        /* required last entry */
        {0, }
 };
@@ -1416,40 +1431,21 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
-static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
-{
-       rx_ring->next_to_use = val;
-
-       /* update next to alloc since we have filled the ring */
-       rx_ring->next_to_alloc = val;
-       /*
-        * Force memory writes to complete before letting h/w
-        * know there are new descriptors to fetch.  (Only
-        * applicable for weak-ordered memory model archs,
-        * such as IA-64).
-        */
-       wmb();
-       ixgbe_write_tail(rx_ring, val);
-}
-
 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
                                    struct ixgbe_rx_buffer *bi)
 {
        struct page *page = bi->page;
-       dma_addr_t dma = bi->dma;
+       dma_addr_t dma;
 
        /* since we are recycling buffers we should seldom need to alloc */
-       if (likely(dma))
+       if (likely(page))
                return true;
 
        /* alloc new page for storage */
-       if (likely(!page)) {
-               page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
-               if (unlikely(!page)) {
-                       rx_ring->rx_stats.alloc_rx_page_failed++;
-                       return false;
-               }
-               bi->page = page;
+       page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
+       if (unlikely(!page)) {
+               rx_ring->rx_stats.alloc_rx_page_failed++;
+               return false;
        }
 
        /* map page for use */
@@ -1462,13 +1458,13 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
         */
        if (dma_mapping_error(rx_ring->dev, dma)) {
                __free_pages(page, ixgbe_rx_pg_order(rx_ring));
-               bi->page = NULL;
 
                rx_ring->rx_stats.alloc_rx_page_failed++;
                return false;
        }
 
        bi->dma = dma;
+       bi->page = page;
        bi->page_offset = 0;
 
        return true;
@@ -1512,16 +1508,28 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
                        i -= rx_ring->count;
                }
 
-               /* clear the hdr_addr for the next_to_use descriptor */
-               rx_desc->read.hdr_addr = 0;
+               /* clear the status bits for the next_to_use descriptor */
+               rx_desc->wb.upper.status_error = 0;
 
                cleaned_count--;
        } while (cleaned_count);
 
        i += rx_ring->count;
 
-       if (rx_ring->next_to_use != i)
-               ixgbe_release_rx_desc(rx_ring, i);
+       if (rx_ring->next_to_use != i) {
+               rx_ring->next_to_use = i;
+
+               /* update next to alloc since we have filled the ring */
+               rx_ring->next_to_alloc = i;
+
+               /* Force memory writes to complete before letting h/w
+                * know there are new descriptors to fetch.  (Only
+                * applicable for weak-ordered memory model archs,
+                * such as IA-64).
+                */
+               wmb();
+               writel(i, rx_ring->tail);
+       }
 }
 
 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
@@ -1798,9 +1806,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
        rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 
        /* transfer page from old buffer to new buffer */
-       new_buff->page = old_buff->page;
-       new_buff->dma = old_buff->dma;
-       new_buff->page_offset = old_buff->page_offset;
+       *new_buff = *old_buff;
 
        /* sync the buffer for use by the device */
        dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
@@ -1809,6 +1815,11 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
                                         DMA_FROM_DEVICE);
 }
 
+static inline bool ixgbe_page_is_reserved(struct page *page)
+{
+       return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+}
+
 /**
  * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
  * @rx_ring: rx descriptor ring to transact packets on
@@ -1844,12 +1855,12 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
 
                memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
-               /* we can reuse buffer as-is, just make sure it is local */
-               if (likely(page_to_nid(page) == numa_node_id()))
+               /* page is not reserved, we can reuse buffer as-is */
+               if (likely(!ixgbe_page_is_reserved(page)))
                        return true;
 
                /* this page cannot be reused so discard it */
-               put_page(page);
+               __free_pages(page, ixgbe_rx_pg_order(rx_ring));
                return false;
        }
 
@@ -1857,7 +1868,7 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
                        rx_buffer->page_offset, size, truesize);
 
        /* avoid re-using remote pages */
-       if (unlikely(page_to_nid(page) != numa_node_id()))
+       if (unlikely(ixgbe_page_is_reserved(page)))
                return false;
 
 #if (PAGE_SIZE < 8192)
@@ -1867,22 +1878,19 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
 
        /* flip page offset to other buffer */
        rx_buffer->page_offset ^= truesize;
-
-       /* Even if we own the page, we are not allowed to use atomic_set()
-        * This would break get_page_unless_zero() users.
-        */
-       atomic_inc(&page->_count);
 #else
        /* move offset up to the next cache line */
        rx_buffer->page_offset += truesize;
 
        if (rx_buffer->page_offset > last_offset)
                return false;
-
-       /* bump ref count on page before it is given to the stack */
-       get_page(page);
 #endif
 
+       /* Even if we own the page, we are not allowed to use atomic_set()
+        * This would break get_page_unless_zero() users.
+        */
+       atomic_inc(&page->_count);
+
        return true;
 }
 
@@ -1945,6 +1953,8 @@ dma_sync:
                                              rx_buffer->page_offset,
                                              ixgbe_rx_bufsz(rx_ring),
                                              DMA_FROM_DEVICE);
+
+               rx_buffer->skb = NULL;
        }
 
        /* pull page into skb */
@@ -1962,8 +1972,6 @@ dma_sync:
        }
 
        /* clear contents of buffer_info */
-       rx_buffer->skb = NULL;
-       rx_buffer->dma = 0;
        rx_buffer->page = NULL;
 
        return skb;
@@ -3214,7 +3222,9 @@ static void ixgbe_setup_reta(struct ixgbe_adapter *adapter, const u32 *seed)
        struct ixgbe_hw *hw = &adapter->hw;
        u32 reta = 0;
        int i, j;
+       int reta_entries = 128;
        u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+       int indices_multi;
 
        /*
         * Program table for at least 2 queues w/ SR-IOV so that VFs can
@@ -3228,22 +3238,67 @@ static void ixgbe_setup_reta(struct ixgbe_adapter *adapter, const u32 *seed)
        for (i = 0; i < 10; i++)
                IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
 
+       /* Fill out the redirection table as follows:
+        * 82598: 128 (8 bit wide) entries containing pair of 4 bit RSS indices
+        * 82599/X540: 128 (8 bit wide) entries containing 4 bit RSS index
+        * X550: 512 (8 bit wide) entries containing 6 bit RSS index
+        */
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+               indices_multi = 0x11;
+       else
+               indices_multi = 0x1;
+
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+               if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+                       reta_entries = 512;
+       default:
+               break;
+       }
+
        /* Fill out redirection table */
-       for (i = 0, j = 0; i < 128; i++, j++) {
+       for (i = 0, j = 0; i < reta_entries; i++, j++) {
                if (j == rss_i)
                        j = 0;
-               /* reta = 4-byte sliding window of
-                * 0x00..(indices-1)(indices-1)00..etc. */
-               reta = (reta << 8) | (j * 0x11);
+               reta = (reta << 8) | (j * indices_multi);
+               if ((i & 3) == 3) {
+                       if (i < 128)
+                               IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+                       else
+                               IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
+                                               reta);
+               }
+       }
+}
+
+static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter, const u32 *seed)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 vfreta = 0;
+       u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+       unsigned int pf_pool = adapter->num_vfs;
+       int i, j;
+
+       /* Fill out hash function seeds */
+       for (i = 0; i < 10; i++)
+               IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), seed[i]);
+
+       /* Fill out the redirection table */
+       for (i = 0, j = 0; i < 64; i++, j++) {
+               if (j == rss_i)
+                       j = 0;
+               vfreta = (vfreta << 8) | j;
                if ((i & 3) == 3)
-                       IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+                       IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
+                                       vfreta);
        }
 }
 
 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 mrqc = 0, rss_field = 0;
+       u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
        u32 rss_key[10];
        u32 rxcsum;
 
@@ -3289,9 +3344,24 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
                rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
 
        netdev_rss_key_fill(rss_key, sizeof(rss_key));
-       ixgbe_setup_reta(adapter, rss_key);
-       mrqc |= rss_field;
-       IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+       if ((hw->mac.type >= ixgbe_mac_X550) &&
+           (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
+               unsigned int pf_pool = adapter->num_vfs;
+
+               /* Enable VF RSS mode */
+               mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
+               IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+               /* Setup RSS through the VF registers */
+               ixgbe_setup_vfreta(adapter, rss_key);
+               vfmrqc = IXGBE_MRQC_RSSEN;
+               vfmrqc |= rss_field;
+               IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
+       } else {
+               ixgbe_setup_reta(adapter, rss_key);
+               mrqc |= rss_field;
+               IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+       }
 }
 
 /**
@@ -3968,8 +4038,8 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
                 * if SR-IOV and VMDQ are disabled - otherwise ensure
                 * that hardware VLAN filters remain enabled.
                 */
-               if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
-                                       IXGBE_FLAG_SRIOV_ENABLED)))
+               if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
+                                     IXGBE_FLAG_SRIOV_ENABLED))
                        vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
        } else {
                if (netdev->flags & IFF_ALLMULTI) {
@@ -4344,29 +4414,26 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
 
        /* Free all the Rx ring sk_buffs */
        for (i = 0; i < rx_ring->count; i++) {
-               struct ixgbe_rx_buffer *rx_buffer;
+               struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
 
-               rx_buffer = &rx_ring->rx_buffer_info[i];
                if (rx_buffer->skb) {
                        struct sk_buff *skb = rx_buffer->skb;
-                       if (IXGBE_CB(skb)->page_released) {
+                       if (IXGBE_CB(skb)->page_released)
                                dma_unmap_page(dev,
                                               IXGBE_CB(skb)->dma,
                                               ixgbe_rx_bufsz(rx_ring),
                                               DMA_FROM_DEVICE);
-                               IXGBE_CB(skb)->page_released = false;
-                       }
                        dev_kfree_skb(skb);
                        rx_buffer->skb = NULL;
                }
-               if (rx_buffer->dma)
-                       dma_unmap_page(dev, rx_buffer->dma,
-                                      ixgbe_rx_pg_size(rx_ring),
-                                      DMA_FROM_DEVICE);
-               rx_buffer->dma = 0;
-               if (rx_buffer->page)
-                       __free_pages(rx_buffer->page,
-                                    ixgbe_rx_pg_order(rx_ring));
+
+               if (!rx_buffer->page)
+                       continue;
+
+               dma_unmap_page(dev, rx_buffer->dma,
+                              ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+               __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
+
                rx_buffer->page = NULL;
        }
 
@@ -5056,7 +5123,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
        hw->subsystem_device_id = pdev->subsystem_device;
 
        /* Set common capability flags and settings */
-       rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
+       rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
        adapter->ring_feature[RING_F_RSS].limit = rss;
        adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
        adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
@@ -6318,6 +6385,66 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
        }
 }
 
+#ifdef CONFIG_PCI_IOV
+static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter,
+                                     struct pci_dev *vfdev)
+{
+       if (!pci_wait_for_pending_transaction(vfdev))
+               e_dev_warn("Issuing VFLR with pending transactions\n");
+
+       e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev));
+       pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+
+       msleep(100);
+}
+
+static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct pci_dev *pdev = adapter->pdev;
+       struct pci_dev *vfdev;
+       u32 gpc;
+       int pos;
+       unsigned short vf_id;
+
+       if (!(netif_carrier_ok(adapter->netdev)))
+               return;
+
+       gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
+       if (gpc) /* If incrementing then no need for the check below */
+               return;
+       /* Check to see if a bad DMA write target from an errant or
+        * malicious VF has caused a PCIe error.  If so then we can
+        * issue a VFLR to the offending VF(s) and then resume without
+        * requesting a full slot reset.
+        */
+
+       if (!pdev)
+               return;
+
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+       if (!pos)
+               return;
+
+       /* get the device ID for the VF */
+       pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
+
+       /* check status reg for all VFs owned by this PF */
+       vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
+       while (vfdev) {
+               if (vfdev->is_virtfn && (vfdev->physfn == pdev)) {
+                       u16 status_reg;
+
+                       pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
+                       if (status_reg & PCI_STATUS_REC_MASTER_ABORT)
+                               /* issue VFLR */
+                               ixgbe_issue_vf_flr(adapter, vfdev);
+               }
+
+               vfdev = pci_get_device(pdev->vendor, vf_id, vfdev);
+       }
+}
+
 static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
 {
        u32 ssvpc;
@@ -6338,6 +6465,17 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
 
        e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
 }
+#else
+static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
+{
+}
+
+static void
+ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
+{
+}
+#endif /* CONFIG_PCI_IOV */
+
 
 /**
  * ixgbe_watchdog_subtask - check and bring link up
@@ -6358,6 +6496,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
        else
                ixgbe_watchdog_link_is_down(adapter);
 
+       ixgbe_check_for_bad_vf(adapter);
        ixgbe_spoof_check(adapter);
        ixgbe_update_stats(adapter);
 
@@ -6469,51 +6608,6 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
        clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
 }
 
-#ifdef CONFIG_PCI_IOV
-static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
-{
-       int vf;
-       struct ixgbe_hw *hw = &adapter->hw;
-       struct net_device *netdev = adapter->netdev;
-       u32 gpc;
-       u32 ciaa, ciad;
-
-       gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
-       if (gpc) /* If incrementing then no need for the check below */
-               return;
-       /*
-        * Check to see if a bad DMA write target from an errant or
-        * malicious VF has caused a PCIe error.  If so then we can
-        * issue a VFLR to the offending VF(s) and then resume without
-        * requesting a full slot reset.
-        */
-
-       for (vf = 0; vf < adapter->num_vfs; vf++) {
-               ciaa = (vf << 16) | 0x80000000;
-               /* 32 bit read so align, we really want status at offset 6 */
-               ciaa |= PCI_COMMAND;
-               IXGBE_WRITE_REG(hw, IXGBE_CIAA_BY_MAC(hw), ciaa);
-               ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_BY_MAC(hw));
-               ciaa &= 0x7FFFFFFF;
-               /* disable debug mode asap after reading data */
-               IXGBE_WRITE_REG(hw, IXGBE_CIAA_BY_MAC(hw), ciaa);
-               /* Get the upper 16 bits which will be the PCI status reg */
-               ciad >>= 16;
-               if (ciad & PCI_STATUS_REC_MASTER_ABORT) {
-                       netdev_err(netdev, "VF %d Hung DMA\n", vf);
-                       /* Issue VFLR */
-                       ciaa = (vf << 16) | 0x80000000;
-                       ciaa |= 0xA8;
-                       IXGBE_WRITE_REG(hw, IXGBE_CIAA_BY_MAC(hw), ciaa);
-                       ciad = 0x00008000;  /* VFLR */
-                       IXGBE_WRITE_REG(hw, IXGBE_CIAD_BY_MAC(hw), ciad);
-                       ciaa &= 0x7FFFFFFF;
-                       IXGBE_WRITE_REG(hw, IXGBE_CIAA_BY_MAC(hw), ciaa);
-               }
-       }
-}
-
-#endif
 /**
  * ixgbe_service_timer - Timer Call-back
  * @data: pointer to adapter cast into an unsigned long
@@ -6522,7 +6616,6 @@ static void ixgbe_service_timer(unsigned long data)
 {
        struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
        unsigned long next_event_offset;
-       bool ready = true;
 
        /* poll faster when waiting for link */
        if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
@@ -6530,32 +6623,10 @@ static void ixgbe_service_timer(unsigned long data)
        else
                next_event_offset = HZ * 2;
 
-#ifdef CONFIG_PCI_IOV
-       /*
-        * don't bother with SR-IOV VF DMA hang check if there are
-        * no VFs or the link is down
-        */
-       if (!adapter->num_vfs ||
-           (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
-               goto normal_timer_service;
-
-       /* If we have VFs allocated then we must check for DMA hangs */
-       ixgbe_check_for_bad_vf(adapter);
-       next_event_offset = HZ / 50;
-       adapter->timer_event_accumulator++;
-
-       if (adapter->timer_event_accumulator >= 100)
-               adapter->timer_event_accumulator = 0;
-       else
-               ready = false;
-
-normal_timer_service:
-#endif
        /* Reset the timer */
        mod_timer(&adapter->service_timer, next_event_offset + jiffies);
 
-       if (ready)
-               ixgbe_service_event_schedule(adapter);
+       ixgbe_service_event_schedule(adapter);
 }
 
 static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
@@ -6960,8 +7031,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
        ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
        if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
-               /* notify HW of packet */
-               ixgbe_write_tail(tx_ring, i);
+               writel(i, tx_ring->tail);
+
+               /* we need this if more than one processor can write to our tail
+                * at a time, it synchronizes IO on IA64/Altix systems
+                */
+               mmiowb();
        }
 
        return;
@@ -7708,7 +7783,7 @@ static int ixgbe_set_features(struct net_device *netdev,
 
 static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                             struct net_device *dev,
-                            const unsigned char *addr,
+                            const unsigned char *addr, u16 vid,
                             u16 flags)
 {
        /* guarantee we can provide a unique filter for the unicast address */
@@ -7717,7 +7792,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                        return -ENOMEM;
        }
 
-       return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags);
+       return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
 }
 
 static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
@@ -7731,6 +7806,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
                return -EOPNOTSUPP;
 
        br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+       if (!br_spec)
+               return -EINVAL;
 
        nla_for_each_nested(attr, br_spec, rem) {
                __u16 mode;
@@ -7739,6 +7816,9 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
                if (nla_type(attr) != IFLA_BRIDGE_MODE)
                        continue;
 
+               if (nla_len(attr) < sizeof(mode))
+                       return -EINVAL;
+
                mode = nla_get_u16(attr);
                if (mode == BRIDGE_MODE_VEPA) {
                        reg = 0;
@@ -7773,7 +7853,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
        else
                mode = BRIDGE_MODE_VEPA;
 
-       return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
+       return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0);
 }
 
 static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
@@ -8021,6 +8101,29 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
        return is_wol_supported;
 }
 
+/**
+ * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM
+ * @adapter: Pointer to adapter struct
+ */
+static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter)
+{
+#ifdef CONFIG_OF
+       struct device_node *dp = pci_device_to_OF_node(adapter->pdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+       const unsigned char *addr;
+
+       addr = of_get_mac_address(dp);
+       if (addr) {
+               ether_addr_copy(hw->mac.perm_addr, addr);
+               return;
+       }
+#endif /* CONFIG_OF */
+
+#ifdef CONFIG_SPARC
+       ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr);
+#endif /* CONFIG_SPARC */
+}
+
 /**
  * ixgbe_probe - Device Initialization Routine
  * @pdev: PCI device information struct
@@ -8041,6 +8144,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        int i, err, pci_using_dac, expected_gts;
        unsigned int indices = MAX_TX_QUEUES;
        u8 part_str[IXGBE_PBANUM_LENGTH];
+       bool disable_dev = false;
 #ifdef IXGBE_FCOE
        u16 device_caps;
 #endif
@@ -8102,7 +8206,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
        adapter = netdev_priv(netdev);
-       pci_set_drvdata(pdev, adapter);
 
        adapter->netdev = netdev;
        adapter->pdev = pdev;
@@ -8289,6 +8392,8 @@ skip_sriov:
                goto err_sw_init;
        }
 
+       ixgbe_get_platform_mac_addr(adapter);
+
        memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
 
        if (!is_valid_ether_addr(netdev->dev_addr)) {
@@ -8380,6 +8485,8 @@ skip_sriov:
        if (err)
                goto err_register;
 
+       pci_set_drvdata(pdev, adapter);
+
        /* power down the optics for 82599 SFP+ fiber */
        if (hw->mac.ops.disable_tx_laser)
                hw->mac.ops.disable_tx_laser(hw);
@@ -8435,13 +8542,14 @@ err_sw_init:
        iounmap(adapter->io_addr);
        kfree(adapter->mac_table);
 err_ioremap:
+       disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
        free_netdev(netdev);
 err_alloc_etherdev:
        pci_release_selected_regions(pdev,
                                     pci_select_bars(pdev, IORESOURCE_MEM));
 err_pci_reg:
 err_dma:
-       if (!adapter || !test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+       if (!adapter || disable_dev)
                pci_disable_device(pdev);
        return err;
 }
@@ -8458,8 +8566,14 @@ err_dma:
 static void ixgbe_remove(struct pci_dev *pdev)
 {
        struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
-       struct net_device *netdev = adapter->netdev;
+       struct net_device *netdev;
+       bool disable_dev;
 
+       /* if !adapter then we already cleaned up in probe */
+       if (!adapter)
+               return;
+
+       netdev  = adapter->netdev;
        ixgbe_dbg_adapter_exit(adapter);
 
        set_bit(__IXGBE_REMOVING, &adapter->state);
@@ -8508,11 +8622,12 @@ static void ixgbe_remove(struct pci_dev *pdev)
        e_dev_info("complete\n");
 
        kfree(adapter->mac_table);
+       disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
        free_netdev(netdev);
 
        pci_disable_pcie_error_reporting(pdev);
 
-       if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+       if (disable_dev)
                pci_disable_device(pdev);
 }
 
@@ -8605,8 +8720,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
                 * VFLR.  Just clean up the AER in that case.
                 */
                if (vfdev) {
-                       e_dev_err("Issuing VFLR to VF %d\n", vf);
-                       pci_write_config_dword(vfdev, 0xA8, 0x00008000);
+                       ixgbe_issue_vf_flr(adapter, vfdev);
                        /* Free device reference count */
                        pci_dev_put(vfdev);
                }
index acafe391f0a3dcff0c9a64ea7f5b4abcddfd040e..8a2be444113dd65044d30f7831a81ba1d118f58f 100644 (file)
@@ -49,6 +49,188 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
 static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
 static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
 
+/**
+ *  ixgbe_out_i2c_byte_ack - Send I2C byte with ack
+ *  @hw: pointer to the hardware structure
+ *  @byte: byte to send
+ *
+ *  Returns an error code on error.
+ **/
+static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
+{
+       s32 status;
+
+       status = ixgbe_clock_out_i2c_byte(hw, byte);
+       if (status)
+               return status;
+       return ixgbe_get_i2c_ack(hw);
+}
+
+/**
+ *  ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack
+ *  @hw: pointer to the hardware structure
+ *  @byte: pointer to a u8 to receive the byte
+ *
+ *  Returns an error code on error.
+ **/
+static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
+{
+       s32 status;
+
+       status = ixgbe_clock_in_i2c_byte(hw, byte);
+       if (status)
+               return status;
+       /* ACK */
+       return ixgbe_clock_out_i2c_bit(hw, false);
+}
+
+/**
+ *  ixgbe_ones_comp_byte_add - Perform one's complement addition
+ *  @add1: addend 1
+ *  @add2: addend 2
+ *
+ *  Returns one's complement 8-bit sum.
+ **/
+static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
+{
+       u16 sum = add1 + add2;
+
+       sum = (sum & 0xFF) + (sum >> 8);
+       return sum & 0xFF;
+}
+
+/**
+ *  ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
+ *  @hw: pointer to the hardware structure
+ *  @addr: I2C bus address to read from
+ *  @reg: I2C device register to read from
+ *  @val: pointer to location to receive read value
+ *
+ *  Returns an error code on error.
+ **/
+s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+                                   u16 reg, u16 *val)
+{
+       u32 swfw_mask = hw->phy.phy_semaphore_mask;
+       int max_retry = 10;
+       int retry = 0;
+       u8 csum_byte;
+       u8 high_bits;
+       u8 low_bits;
+       u8 reg_high;
+       u8 csum;
+
+       reg_high = ((reg >> 7) & 0xFE) | 1;     /* Indicate read combined */
+       csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
+       csum = ~csum;
+       do {
+               if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+                       return IXGBE_ERR_SWFW_SYNC;
+               ixgbe_i2c_start(hw);
+               /* Device Address and write indication */
+               if (ixgbe_out_i2c_byte_ack(hw, addr))
+                       goto fail;
+               /* Write bits 14:8 */
+               if (ixgbe_out_i2c_byte_ack(hw, reg_high))
+                       goto fail;
+               /* Write bits 7:0 */
+               if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
+                       goto fail;
+               /* Write csum */
+               if (ixgbe_out_i2c_byte_ack(hw, csum))
+                       goto fail;
+               /* Re-start condition */
+               ixgbe_i2c_start(hw);
+               /* Device Address and read indication */
+               if (ixgbe_out_i2c_byte_ack(hw, addr | 1))
+                       goto fail;
+               /* Get upper bits */
+               if (ixgbe_in_i2c_byte_ack(hw, &high_bits))
+                       goto fail;
+               /* Get low bits */
+               if (ixgbe_in_i2c_byte_ack(hw, &low_bits))
+                       goto fail;
+               /* Get csum */
+               if (ixgbe_clock_in_i2c_byte(hw, &csum_byte))
+                       goto fail;
+               /* NACK */
+               if (ixgbe_clock_out_i2c_bit(hw, false))
+                       goto fail;
+               ixgbe_i2c_stop(hw);
+               hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+               *val = (high_bits << 8) | low_bits;
+               return 0;
+
+fail:
+               ixgbe_i2c_bus_clear(hw);
+               hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+               retry++;
+               if (retry < max_retry)
+                       hw_dbg(hw, "I2C byte read combined error - Retry.\n");
+               else
+                       hw_dbg(hw, "I2C byte read combined error.\n");
+       } while (retry < max_retry);
+
+       return IXGBE_ERR_I2C;
+}
+
+/**
+ *  ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
+ *  @hw: pointer to the hardware structure
+ *  @addr: I2C bus address to write to
+ *  @reg: I2C device register to write to
+ *  @val: value to write
+ *
+ *  Returns an error code on error.
+ **/
+s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+                                    u8 addr, u16 reg, u16 val)
+{
+       int max_retry = 1;
+       int retry = 0;
+       u8 reg_high;
+       u8 csum;
+
+       reg_high = (reg >> 7) & 0xFE;   /* Indicate write combined */
+       csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
+       csum = ixgbe_ones_comp_byte_add(csum, val >> 8);
+       csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
+       csum = ~csum;
+       do {
+               ixgbe_i2c_start(hw);
+               /* Device Address and write indication */
+               if (ixgbe_out_i2c_byte_ack(hw, addr))
+                       goto fail;
+               /* Write bits 14:8 */
+               if (ixgbe_out_i2c_byte_ack(hw, reg_high))
+                       goto fail;
+               /* Write bits 7:0 */
+               if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
+                       goto fail;
+               /* Write data 15:8 */
+               if (ixgbe_out_i2c_byte_ack(hw, val >> 8))
+                       goto fail;
+               /* Write data 7:0 */
+               if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF))
+                       goto fail;
+               /* Write csum */
+               if (ixgbe_out_i2c_byte_ack(hw, csum))
+                       goto fail;
+               ixgbe_i2c_stop(hw);
+               return 0;
+
+fail:
+               ixgbe_i2c_bus_clear(hw);
+               retry++;
+               if (retry < max_retry)
+                       hw_dbg(hw, "I2C byte write combined error - Retry.\n");
+               else
+                       hw_dbg(hw, "I2C byte write combined error.\n");
+       } while (retry < max_retry);
+
+       return IXGBE_ERR_I2C;
+}
+
 /**
  *  ixgbe_identify_phy_generic - Get physical layer module
  *  @hw: pointer to hardware structure
@@ -60,6 +242,15 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
        u32 phy_addr;
        u16 ext_ability = 0;
 
+       if (!hw->phy.phy_semaphore_mask) {
+               hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) &
+                                IXGBE_STATUS_LAN_ID_1;
+               if (hw->phy.lan_id)
+                       hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+               else
+                       hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+       }
+
        if (hw->phy.type == ixgbe_phy_unknown) {
                for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
                        hw->phy.mdio.prtad = phy_addr;
@@ -315,12 +506,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
                               u32 device_type, u16 *phy_data)
 {
        s32 status;
-       u16 gssr;
-
-       if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
-               gssr = IXGBE_GSSR_PHY1_SM;
-       else
-               gssr = IXGBE_GSSR_PHY0_SM;
+       u32 gssr = hw->phy.phy_semaphore_mask;
 
        if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
                status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
@@ -418,7 +604,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
                                u32 device_type, u16 phy_data)
 {
        s32 status;
-       u16 gssr;
+       u32 gssr;
 
        if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
                gssr = IXGBE_GSSR_PHY1_SM;
@@ -1469,15 +1655,10 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
        s32 status;
        u32 max_retry = 10;
        u32 retry = 0;
-       u16 swfw_mask = 0;
+       u32 swfw_mask = hw->phy.phy_semaphore_mask;
        bool nack = true;
        *data = 0;
 
-       if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
-               swfw_mask = IXGBE_GSSR_PHY1_SM;
-       else
-               swfw_mask = IXGBE_GSSR_PHY0_SM;
-
        do {
                if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
                        return IXGBE_ERR_SWFW_SYNC;
@@ -1555,12 +1736,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
        s32 status;
        u32 max_retry = 1;
        u32 retry = 0;
-       u16 swfw_mask = 0;
-
-       if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
-               swfw_mask = IXGBE_GSSR_PHY1_SM;
-       else
-               swfw_mask = IXGBE_GSSR_PHY0_SM;
+       u32 swfw_mask = hw->phy.phy_semaphore_mask;
 
        if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
                return IXGBE_ERR_SWFW_SYNC;
index 54071ed17e3b3d589d8250f4536f622e796049dc..43464388128787116f4c1f6a37943f946fd9cdab 100644 (file)
 #define IXGBE_I2C_EEPROM_STATUS_PASS           0x1
 #define IXGBE_I2C_EEPROM_STATUS_FAIL           0x2
 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS    0x3
+#define IXGBE_CS4227                           0xBE    /* CS4227 address */
+#define IXGBE_CS4227_SPARE24_LSB               0x12B0  /* Reg to program EDC */
+#define IXGBE_CS4227_EDC_MODE_CX1              0x0002
+#define IXGBE_CS4227_EDC_MODE_SR               0x0004
+
 /* Flow control defines */
 #define IXGBE_TAF_SYM_PAUSE                  0x400
 #define IXGBE_TAF_ASM_PAUSE                  0x800
 /* SFP+ SFF-8472 Compliance code */
 #define IXGBE_SFF_SFF_8472_UNSUP      0x00
 
-s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
 s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
 s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
 s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
@@ -157,4 +161,8 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
                                   u8 *sff8472_data);
 s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
                                   u8 eeprom_data);
+s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+                                   u16 reg, u16 *val);
+s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+                                    u16 reg, u16 val);
 #endif /* _IXGBE_PHY_H_ */
index 04eee7c7b65350e31fbba069502db00cccd8330a..c76ba90ecc6ecc44a6899fa487eda911b5a52d5a 100644 (file)
@@ -221,7 +221,8 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
        if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
                adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
                adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
-               rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
+               rss = min_t(int, ixgbe_max_rss_indices(adapter),
+                           num_online_cpus());
        } else {
                rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
        }
index 64de20d1de569930ff97c42d9f4a91a18cc7d25a..d101b25dc4b6fdc88abff20fc761ba56a3126385 100644 (file)
 #define IXGBE_DEV_ID_82599_QSFP_SF_QP    0x1558
 #define IXGBE_DEV_ID_X540T1              0x1560
 
+#define IXGBE_DEV_ID_X550T             0x1563
+#define IXGBE_DEV_ID_X550EM_X_KX4      0x15AA
+#define IXGBE_DEV_ID_X550EM_X_KR       0x15AB
+#define IXGBE_DEV_ID_X550EM_X_SFP      0x15AC
+#define IXGBE_DEV_ID_X550EM_X_10G_T    0x15AD
+#define IXGBE_DEV_ID_X550EM_X_1G_T     0x15AE
+#define IXGBE_DEV_ID_X550_VF_HV        0x1564
+#define IXGBE_DEV_ID_X550_VF           0x1565
+#define IXGBE_DEV_ID_X550EM_X_VF       0x15A8
+#define IXGBE_DEV_ID_X550EM_X_VF_HV    0x15A9
+
 /* VF Device IDs */
 #define IXGBE_DEV_ID_82599_VF           0x10ED
 #define IXGBE_DEV_ID_X540_VF            0x1515
@@ -297,6 +308,7 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_IMIRVP    0x05AC0
 #define IXGBE_VMD_CTL   0x0581C
 #define IXGBE_RETA(_i)  (0x05C00 + ((_i) * 4))  /* 32 of these (0-31) */
+#define IXGBE_ERETA(_i)        (0x0EE80 + ((_i) * 4))  /* 96 of these (0-95) */
 #define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4))  /* 10 of these (0-9) */
 
 /* Registers for setting up RSS on X550 with SRIOV
@@ -740,6 +752,24 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_LDPCECL   0x0E820
 #define IXGBE_LDPCECH   0x0E821
 
+/* MII clause 22/28 definitions */
+#define IXGBE_MDIO_PHY_LOW_POWER_MODE  0x0800
+
+#define IXGBE_MDIO_XENPAK_LASI_STATUS  0x9005 /* XENPAK LASI Status register */
+#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */
+
+#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS        0x4 /* Indicates if link is up */
+
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s H Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s F Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */
+
 /* Management */
 #define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
 #define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -1141,6 +1171,13 @@ struct ixgbe_thermal_sensor_data {
 
 /* MDIO definitions */
 
+#define IXGBE_MDIO_PMA_PMD_DEV_TYPE            0x1
+#define IXGBE_MDIO_PCS_DEV_TYPE                0x3
+#define IXGBE_MDIO_PHY_XS_DEV_TYPE             0x4
+#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE           0x7
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE  0x1E   /* Device 30 */
+#define IXGBE_TWINAX_DEV                       1
+
 #define IXGBE_MDIO_COMMAND_TIMEOUT     100 /* PHY Timeout for 1 GB mode */
 
 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL      0x0    /* VS1 Control Reg */
@@ -1150,9 +1187,23 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED    0x0018
 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED     0x0010
 
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR     0xC30A /* PHY_XS SDA/SCL Addr Reg */
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA     0xC30B /* PHY_XS SDA/SCL Data Reg */
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT     0xC30C /* PHY_XS SDA/SCL Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_CONTROL    0x0 /* AUTO_NEG Control Reg */
+#define IXGBE_MDIO_AUTO_NEG_STATUS     0x1 /* AUTO_NEG Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT        0xC800 /* AUTO_NEG Vendor Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_ADVT       0x10 /* AUTO_NEG Advt Reg */
+#define IXGBE_MDIO_AUTO_NEG_LP         0x13 /* AUTO_NEG LP Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT   0x3C /* AUTO_NEG EEE Advt Reg */
+
+#define IXGBE_MDIO_TX_VENDOR_ALARMS_3  0xCC02 /* Vendor Alarms 3 Reg */
+#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */
+#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */
+#define IXGBE_MDIO_POWER_UP_STALL      0x8000 /* Power Up Stall */
+
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR        0xC30A /* PHY_XS SDA/SCL Addr Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA        0xC30B /* PHY_XS SDA/SCL Data Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT        0xC30C /* PHY_XS SDA/SCL Stat Reg */
+#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR     0x9 /* Standard Tx Dis Reg */
+#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE       0x0001 /* PMD Global Tx Dis */
 
 /* MII clause 22/28 definitions */
 #define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
@@ -1696,12 +1747,14 @@ enum {
 #define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
 
 /* SW_FW_SYNC/GSSR definitions */
-#define IXGBE_GSSR_EEP_SM     0x0001
-#define IXGBE_GSSR_PHY0_SM    0x0002
-#define IXGBE_GSSR_PHY1_SM    0x0004
-#define IXGBE_GSSR_MAC_CSR_SM 0x0008
-#define IXGBE_GSSR_FLASH_SM   0x0010
-#define IXGBE_GSSR_SW_MNG_SM  0x0400
+#define IXGBE_GSSR_EEP_SM              0x0001
+#define IXGBE_GSSR_PHY0_SM             0x0002
+#define IXGBE_GSSR_PHY1_SM             0x0004
+#define IXGBE_GSSR_MAC_CSR_SM          0x0008
+#define IXGBE_GSSR_FLASH_SM            0x0010
+#define IXGBE_GSSR_SW_MNG_SM           0x0400
+#define IXGBE_GSSR_SHARED_I2C_SM       0x1806 /* Wait for both phys & I2Cs */
+#define IXGBE_GSSR_I2C_MASK            0x1800
 
 /* FW Status register bitmask */
 #define IXGBE_FWSTS_FWRI    0x00000200 /* Firmware Reset Indication */
@@ -1735,27 +1788,32 @@ enum {
 #define IXGBE_PBANUM_LENGTH 11
 
 /* Checksum and EEPROM pointers */
-#define IXGBE_PBANUM_PTR_GUARD  0xFAFA
-#define IXGBE_EEPROM_CHECKSUM   0x3F
-#define IXGBE_EEPROM_SUM        0xBABA
-#define IXGBE_PCIE_ANALOG_PTR   0x03
-#define IXGBE_ATLAS0_CONFIG_PTR 0x04
-#define IXGBE_PHY_PTR           0x04
-#define IXGBE_ATLAS1_CONFIG_PTR 0x05
-#define IXGBE_OPTION_ROM_PTR    0x05
-#define IXGBE_PCIE_GENERAL_PTR  0x06
-#define IXGBE_PCIE_CONFIG0_PTR  0x07
-#define IXGBE_PCIE_CONFIG1_PTR  0x08
-#define IXGBE_CORE0_PTR         0x09
-#define IXGBE_CORE1_PTR         0x0A
-#define IXGBE_MAC0_PTR          0x0B
-#define IXGBE_MAC1_PTR          0x0C
-#define IXGBE_CSR0_CONFIG_PTR   0x0D
-#define IXGBE_CSR1_CONFIG_PTR   0x0E
-#define IXGBE_FW_PTR            0x0F
-#define IXGBE_PBANUM0_PTR       0x15
-#define IXGBE_PBANUM1_PTR       0x16
-#define IXGBE_FREE_SPACE_PTR    0X3E
+#define IXGBE_PBANUM_PTR_GUARD         0xFAFA
+#define IXGBE_EEPROM_CHECKSUM          0x3F
+#define IXGBE_EEPROM_SUM               0xBABA
+#define IXGBE_PCIE_ANALOG_PTR          0x03
+#define IXGBE_ATLAS0_CONFIG_PTR                0x04
+#define IXGBE_PHY_PTR                  0x04
+#define IXGBE_ATLAS1_CONFIG_PTR                0x05
+#define IXGBE_OPTION_ROM_PTR           0x05
+#define IXGBE_PCIE_GENERAL_PTR         0x06
+#define IXGBE_PCIE_CONFIG0_PTR         0x07
+#define IXGBE_PCIE_CONFIG1_PTR         0x08
+#define IXGBE_CORE0_PTR                        0x09
+#define IXGBE_CORE1_PTR                        0x0A
+#define IXGBE_MAC0_PTR                 0x0B
+#define IXGBE_MAC1_PTR                 0x0C
+#define IXGBE_CSR0_CONFIG_PTR          0x0D
+#define IXGBE_CSR1_CONFIG_PTR          0x0E
+#define IXGBE_PCIE_ANALOG_PTR_X550     0x02
+#define IXGBE_SHADOW_RAM_SIZE_X550     0x4000
+#define IXGBE_IXGBE_PCIE_GENERAL_SIZE  0x24
+#define IXGBE_PCIE_CONFIG_SIZE         0x08
+#define IXGBE_EEPROM_LAST_WORD         0x41
+#define IXGBE_FW_PTR                   0x0F
+#define IXGBE_PBANUM0_PTR              0x15
+#define IXGBE_PBANUM1_PTR              0x16
+#define IXGBE_FREE_SPACE_PTR           0X3E
 
 /* External Thermal Sensor Config */
 #define IXGBE_ETS_CFG                   0x26
@@ -2016,6 +2074,7 @@ enum {
 #define IXGBE_MRQC_RSS_FIELD_IPV4_UDP    0x00400000
 #define IXGBE_MRQC_RSS_FIELD_IPV6_UDP    0x00800000
 #define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
+#define IXGBE_MRQC_MULTIPLE_RSS          0x00002000
 #define IXGBE_MRQC_L3L4TXSWEN            0x00008000
 
 #define IXGBE_FWSM_TS_ENABLED  0x1
@@ -2312,18 +2371,32 @@ enum ixgbe_fdir_pballoc_type {
 #define IXGBE_FDIR_DROP_QUEUE                   127
 
 /* Manageablility Host Interface defines */
-#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH       1792 /* Num of bytes in range */
-#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH      448 /* Num of dwords in range */
-#define IXGBE_HI_COMMAND_TIMEOUT             500 /* Process HI command limit */
+#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
+#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH        448 /* Num of dwords in range */
+#define IXGBE_HI_COMMAND_TIMEOUT       500 /* Process HI command limit */
+#define IXGBE_HI_FLASH_ERASE_TIMEOUT   1000 /* Process Erase command limit */
+#define IXGBE_HI_FLASH_UPDATE_TIMEOUT  5000 /* Process Update command limit */
+#define IXGBE_HI_FLASH_APPLY_TIMEOUT   0 /* Process Apply command limit */
 
 /* CEM Support */
-#define FW_CEM_HDR_LEN                0x4
-#define FW_CEM_CMD_DRIVER_INFO        0xDD
-#define FW_CEM_CMD_DRIVER_INFO_LEN    0x5
-#define FW_CEM_CMD_RESERVED           0x0
-#define FW_CEM_UNUSED_VER             0x0
-#define FW_CEM_MAX_RETRIES            3
-#define FW_CEM_RESP_STATUS_SUCCESS    0x1
+#define FW_CEM_HDR_LEN                 0x4
+#define FW_CEM_CMD_DRIVER_INFO         0xDD
+#define FW_CEM_CMD_DRIVER_INFO_LEN     0x5
+#define FW_CEM_CMD_RESERVED            0x0
+#define FW_CEM_UNUSED_VER              0x0
+#define FW_CEM_MAX_RETRIES             3
+#define FW_CEM_RESP_STATUS_SUCCESS     0x1
+#define FW_READ_SHADOW_RAM_CMD         0x31
+#define FW_READ_SHADOW_RAM_LEN         0x6
+#define FW_WRITE_SHADOW_RAM_CMD                0x33
+#define FW_WRITE_SHADOW_RAM_LEN                0xA /* 8 plus 1 WORD to write */
+#define FW_SHADOW_RAM_DUMP_CMD         0x36
+#define FW_SHADOW_RAM_DUMP_LEN         0
+#define FW_DEFAULT_CHECKSUM            0xFF /* checksum always 0xFF */
+#define FW_NVM_DATA_OFFSET             3
+#define FW_MAX_READ_BUFFER_SIZE                1024
+#define FW_DISABLE_RXEN_CMD            0xDE
+#define FW_DISABLE_RXEN_LEN            0x1
 
 /* Host Interface Command Structures */
 struct ixgbe_hic_hdr {
@@ -2336,6 +2409,25 @@ struct ixgbe_hic_hdr {
        u8 checksum;
 };
 
+struct ixgbe_hic_hdr2_req {
+       u8 cmd;
+       u8 buf_lenh;
+       u8 buf_lenl;
+       u8 checksum;
+};
+
+struct ixgbe_hic_hdr2_rsp {
+       u8 cmd;
+       u8 buf_lenl;
+       u8 buf_lenh_status;     /* 7-5: high bits of buf_len, 4-0: status */
+       u8 checksum;
+};
+
+union ixgbe_hic_hdr2 {
+       struct ixgbe_hic_hdr2_req req;
+       struct ixgbe_hic_hdr2_rsp rsp;
+};
+
 struct ixgbe_hic_drv_info {
        struct ixgbe_hic_hdr hdr;
        u8 port_num;
@@ -2347,6 +2439,32 @@ struct ixgbe_hic_drv_info {
        u16 pad2; /* end spacing to ensure length is mult. of dword2 */
 };
 
+/* These need to be dword aligned */
+struct ixgbe_hic_read_shadow_ram {
+       union ixgbe_hic_hdr2 hdr;
+       u32 address;
+       u16 length;
+       u16 pad2;
+       u16 data;
+       u16 pad3;
+};
+
+struct ixgbe_hic_write_shadow_ram {
+       union ixgbe_hic_hdr2 hdr;
+       u32 address;
+       u16 length;
+       u16 pad2;
+       u16 data;
+       u16 pad3;
+};
+
+struct ixgbe_hic_disable_rxen {
+       struct ixgbe_hic_hdr hdr;
+       u8  port_number;
+       u8  pad2;
+       u16 pad3;
+};
+
 /* Transmit Descriptor - Advanced */
 union ixgbe_adv_tx_desc {
        struct {
@@ -2623,6 +2741,9 @@ enum ixgbe_phy_type {
        ixgbe_phy_none,
        ixgbe_phy_tn,
        ixgbe_phy_aq,
+       ixgbe_phy_x550em_kr,
+       ixgbe_phy_x550em_kx4,
+       ixgbe_phy_x550em_ext_t,
        ixgbe_phy_cu_unknown,
        ixgbe_phy_qt,
        ixgbe_phy_xaui,
@@ -2866,7 +2987,7 @@ struct ixgbe_eeprom_operations {
        s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
        s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
        s32 (*update_checksum)(struct ixgbe_hw *);
-       u16 (*calc_checksum)(struct ixgbe_hw *);
+       s32 (*calc_checksum)(struct ixgbe_hw *);
 };
 
 struct ixgbe_mac_operations {
@@ -2888,8 +3009,8 @@ struct ixgbe_mac_operations {
        s32 (*disable_rx_buff)(struct ixgbe_hw *);
        s32 (*enable_rx_buff)(struct ixgbe_hw *);
        s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
-       s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
-       void (*release_swfw_sync)(struct ixgbe_hw *, u16);
+       s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
+       void (*release_swfw_sync)(struct ixgbe_hw *, u32);
        s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
        s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
 
@@ -2935,6 +3056,11 @@ struct ixgbe_mac_operations {
        s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
        s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
        s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
+
+       /* DMA Coalescing */
+       s32 (*dmac_config)(struct ixgbe_hw *hw);
+       s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
+       s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
 };
 
 struct ixgbe_phy_operations {
@@ -2947,6 +3073,7 @@ struct ixgbe_phy_operations {
        s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
        s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
        s32 (*setup_link)(struct ixgbe_hw *);
+       s32 (*setup_internal_link)(struct ixgbe_hw *);
        s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
        s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
        s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
@@ -2955,6 +3082,8 @@ struct ixgbe_phy_operations {
        s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
        s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
        s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+       s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
+       s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
        s32 (*check_overtemp)(struct ixgbe_hw *);
 };
 
@@ -3007,6 +3136,8 @@ struct ixgbe_phy_info {
        bool                            sfp_setup_needed;
        u32                             revision;
        enum ixgbe_media_type           media_type;
+       u8                              lan_id;
+       u32                             phy_semaphore_mask;
        bool                            reset_disable;
        ixgbe_autoneg_advertised        autoneg_advertised;
        enum ixgbe_smart_speed          smart_speed;
@@ -3113,4 +3244,71 @@ struct ixgbe_info {
 #define IXGBE_ERR_HOST_INTERFACE_COMMAND        -33
 #define IXGBE_NOT_IMPLEMENTED                   0x7FFFFFFF
 
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010))
+#define IXGBE_KRM_LINK_CTRL_1(P)       ((P == 0) ? (0x420C) : (0x820C))
+#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634))
+#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638))
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P)        ((P == 0) ? (0x4B00) : (0x8B00))
+#define IXGBE_KRM_PMD_DFX_BURNIN(P)    ((P == 0) ? (0x4E00) : (0x8E00))
+#define IXGBE_KRM_TX_COEFF_CTRL_1(P)   ((P == 0) ? (0x5520) : (0x9520))
+#define IXGBE_KRM_RX_ANA_CTL(P)                ((P == 0) ? (0x5A00) : (0x9A00))
+
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B           (1 << 9)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS         (1 << 11)
+
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK    (0x7 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G      (2 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G     (4 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ          (1 << 14)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC          (1 << 15)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX           (1 << 16)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR           (1 << 18)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX          (1 << 24)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR          (1 << 26)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE           (1 << 29)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART          (1 << 31)
+
+#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN                        (1 << 6)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN           (1 << 15)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN          (1 << 16)
+
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL  (1 << 4)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS   (1 << 2)
+
+#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK      (0x3 << 16)
+
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN     (1 << 1)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN      (1 << 2)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN             (1 << 3)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN             (1 << 31)
+
+#define IXGBE_KX4_LINK_CNTL_1                          0x4C
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX           (1 << 16)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4          (1 << 17)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX          (1 << 24)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4         (1 << 25)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE           (1 << 29)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP       (1 << 30)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART          (1 << 31)
+
+#define IXGBE_SB_IOSF_INDIRECT_CTRL            0x00011144
+#define IXGBE_SB_IOSF_INDIRECT_DATA            0x00011148
+
+#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT          0
+#define IXGBE_SB_IOSF_CTRL_ADDR_MASK           0xFF
+#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT     18
+#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \
+                               (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT      20
+#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \
+                               (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28
+#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK  0x7
+#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT          31
+#define IXGBE_SB_IOSF_CTRL_BUSY                (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
+#define IXGBE_SB_IOSF_TARGET_KR_PHY    0
+#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY        1
+#define IXGBE_SB_IOSF_TARGET_KX4_PCS0  2
+#define IXGBE_SB_IOSF_TARGET_KX4_PCS1  3
+
 #endif /* _IXGBE_TYPE_H_ */
index e88305d5d18dc20bae9eb66dd3ef5e41128d7411..ba54ff07b438cd1e42c33768f060334b7f310834 100644 (file)
@@ -32,6 +32,7 @@
 
 #include "ixgbe.h"
 #include "ixgbe_phy.h"
+#include "ixgbe_x540.h"
 
 #define IXGBE_X540_MAX_TX_QUEUES       128
 #define IXGBE_X540_MAX_RX_QUEUES       128
 
 static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
 static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
-static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
-static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
 static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
 static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
 
-static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
 {
        return ixgbe_media_type_copper;
 }
 
-static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
+s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
 {
        struct ixgbe_mac_info *mac = &hw->mac;
 
@@ -76,9 +75,8 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
  *  @speed: new link speed
  *  @autoneg_wait_to_complete: true when waiting for completion is needed
  **/
-static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
-                                    ixgbe_link_speed speed,
-                                    bool autoneg_wait_to_complete)
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+                             bool autoneg_wait_to_complete)
 {
        return hw->phy.ops.setup_link_speed(hw, speed,
                                            autoneg_wait_to_complete);
@@ -92,7 +90,7 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
  *  reset.
  **/
-static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
 {
        s32 status;
        u32 ctrl, i;
@@ -179,7 +177,7 @@ mac_reset_top:
  *  and the generation start_hw function.
  *  Then performs revision-specific operations, if any.
  **/
-static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
 {
        s32 ret_val;
 
@@ -197,7 +195,7 @@ static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
  *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
  *  ixgbe_hw struct in order to set up EEPROM access.
  **/
-static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
 {
        struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
        u32 eec;
@@ -316,7 +314,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
  *
  *  @hw: pointer to hardware structure
  **/
-static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
 {
        u16 i;
        u16 j;
@@ -324,6 +322,8 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
        u16 length = 0;
        u16 pointer = 0;
        u16 word = 0;
+       u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM;
+       u16 ptr_start = IXGBE_PCIE_ANALOG_PTR;
 
        /*
         * Do not use hw->eeprom.ops.read because we do not want to take
@@ -332,10 +332,10 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
         */
 
        /* Include 0x0-0x3F in the checksum */
-       for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
-               if (ixgbe_read_eerd_generic(hw, i, &word) != 0) {
+       for (i = 0; i < checksum_last_word; i++) {
+               if (ixgbe_read_eerd_generic(hw, i, &word)) {
                        hw_dbg(hw, "EEPROM read failed\n");
-                       break;
+                       return IXGBE_ERR_EEPROM;
                }
                checksum += word;
        }
@@ -344,11 +344,11 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
         * Include all data from pointers 0x3, 0x6-0xE.  This excludes the
         * FW, PHY module, and PCIe Expansion/Option ROM pointers.
         */
-       for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+       for (i = ptr_start; i < IXGBE_FW_PTR; i++) {
                if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
                        continue;
 
-               if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) {
+               if (ixgbe_read_eerd_generic(hw, i, &pointer)) {
                        hw_dbg(hw, "EEPROM read failed\n");
                        break;
                }
@@ -358,8 +358,9 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
                    pointer >= hw->eeprom.word_size)
                        continue;
 
-               if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) {
+               if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
                        hw_dbg(hw, "EEPROM read failed\n");
+                       return IXGBE_ERR_EEPROM;
                        break;
                }
 
@@ -368,10 +369,10 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
                    (pointer + length) >= hw->eeprom.word_size)
                        continue;
 
-               for (j = pointer+1; j <= pointer+length; j++) {
-                       if (ixgbe_read_eerd_generic(hw, j, &word) != 0) {
+               for (j = pointer + 1; j <= pointer + length; j++) {
+                       if (ixgbe_read_eerd_generic(hw, j, &word)) {
                                hw_dbg(hw, "EEPROM read failed\n");
-                               break;
+                               return IXGBE_ERR_EEPROM;
                        }
                        checksum += word;
                }
@@ -379,7 +380,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
 
        checksum = (u16)IXGBE_EEPROM_SUM - checksum;
 
-       return checksum;
+       return (s32)checksum;
 }
 
 /**
@@ -410,23 +411,34 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
        if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
                return IXGBE_ERR_SWFW_SYNC;
 
-       checksum = hw->eeprom.ops.calc_checksum(hw);
+       status = hw->eeprom.ops.calc_checksum(hw);
+       if (status < 0)
+               goto out;
+
+       checksum = (u16)(status & 0xffff);
 
        /* Do not use hw->eeprom.ops.read because we do not want to take
         * the synchronization semaphores twice here.
         */
        status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
                                         &read_checksum);
+       if (status)
+               goto out;
 
-       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+       /* Verify read checksum from EEPROM is the same as
+        * calculated checksum
+        */
+       if (read_checksum != checksum) {
+               hw_dbg(hw, "Invalid EEPROM checksum");
+               status = IXGBE_ERR_EEPROM_CHECKSUM;
+       }
 
        /* If the user cares, return the calculated checksum */
        if (checksum_val)
                *checksum_val = checksum;
 
-       /* Verify read and calculated checksums are the same */
-       if (read_checksum != checksum)
-               return IXGBE_ERR_EEPROM_CHECKSUM;
+out:
+       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
 
        return status;
 }
@@ -457,15 +469,22 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
        if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
                return  IXGBE_ERR_SWFW_SYNC;
 
-       checksum = hw->eeprom.ops.calc_checksum(hw);
+       status = hw->eeprom.ops.calc_checksum(hw);
+       if (status < 0)
+               goto out;
+
+       checksum = (u16)(status & 0xffff);
 
        /* Do not use hw->eeprom.ops.write because we do not want to
         * take the synchronization semaphores twice here.
         */
        status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum);
-       if (!status)
-               status = ixgbe_update_flash_X540(hw);
+       if (status)
+               goto out;
+
+       status = ixgbe_update_flash_X540(hw);
 
+out:
        hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
        return status;
 }
@@ -544,7 +563,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
  * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
  * the specified function (CSR, PHY0, PHY1, NVM, Flash)
  **/
-static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
 {
        u32 swfw_sync;
        u32 swmask = mask;
@@ -612,7 +631,7 @@ static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
  * Releases the SWFW semaphore through the SW_FW_SYNC register
  * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
  **/
-static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
 {
        u32 swfw_sync;
        u32 swmask = mask;
@@ -699,7 +718,7 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
  * Devices that implement the version 2 interface:
  *   X540
  **/
-static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
 {
        u32 macc_reg;
        u32 ledctl_reg;
@@ -735,7 +754,7 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
  * Devices that implement the version 2 interface:
  *   X540
  **/
-static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
 {
        u32 macc_reg;
        u32 ledctl_reg;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
new file mode 100644 (file)
index 0000000..a1468b1
--- /dev/null
@@ -0,0 +1,39 @@
+/*******************************************************************************
+ *
+ * Intel 10 Gigabit PCI Express Linux driver
+ *  Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  The full GNU General Public License is included in this distribution in
+ *  the file called "COPYING".
+ *
+ *  Contact Information:
+ *  Linux NICS <linux.nics@intel.com>
+ *  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ *  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+                             bool autoneg_wait_to_complete);
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+                             bool autoneg_wait_to_complete);
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
new file mode 100644 (file)
index 0000000..ffdd123
--- /dev/null
@@ -0,0 +1,1432 @@
+/*******************************************************************************
+ *
+ *  Intel 10 Gigabit PCI Express Linux driver
+ *  Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  The full GNU General Public License is included in this distribution in
+ *  the file called "COPYING".
+ *
+ *  Contact Information:
+ *  Linux NICS <linux.nics@intel.com>
+ *  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ *  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+#include "ixgbe_x540.h"
+#include "ixgbe_type.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+/** ixgbe_identify_phy_x550em - Get PHY type based on device id
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns error code
+ */
+static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
+{
+       u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_X550EM_X_SFP:
+               /* set up for CS4227 usage */
+               hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+               if (hw->bus.lan_id) {
+                       esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
+                       esdp |= IXGBE_ESDP_SDP1_DIR;
+               }
+               esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
+               IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+
+               return ixgbe_identify_module_generic(hw);
+       case IXGBE_DEV_ID_X550EM_X_KX4:
+               hw->phy.type = ixgbe_phy_x550em_kx4;
+               break;
+       case IXGBE_DEV_ID_X550EM_X_KR:
+               hw->phy.type = ixgbe_phy_x550em_kr;
+               break;
+       case IXGBE_DEV_ID_X550EM_X_1G_T:
+       case IXGBE_DEV_ID_X550EM_X_10G_T:
+               return ixgbe_identify_phy_generic(hw);
+       default:
+               break;
+       }
+       return 0;
+}
+
+static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+                                    u32 device_type, u16 *phy_data)
+{
+       return IXGBE_NOT_IMPLEMENTED;
+}
+
+static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+                                     u32 device_type, u16 phy_data)
+{
+       return IXGBE_NOT_IMPLEMENTED;
+}
+
+/** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
+ *  @hw: pointer to hardware structure
+ *
+ *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ *  ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
+{
+       struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+       u32 eec;
+       u16 eeprom_size;
+
+       if (eeprom->type == ixgbe_eeprom_uninitialized) {
+               eeprom->semaphore_delay = 10;
+               eeprom->type = ixgbe_flash;
+
+               eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+               eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+                                   IXGBE_EEC_SIZE_SHIFT);
+               eeprom->word_size = 1 << (eeprom_size +
+                                         IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+               hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
+                      eeprom->type, eeprom->word_size);
+       }
+
+       return 0;
+}
+
+/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the
+ *  IOSF device
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit PHY register to write
+ *  @device_type: 3 bit device type
+ *  @phy_data: Pointer to read data from the register
+ **/
+s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+                               u32 device_type, u32 *data)
+{
+       u32 i, command, error;
+
+       command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
+                  (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+
+       /* Write IOSF control register */
+       IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+
+       /* Check every 10 usec to see if the address cycle completed.
+        * The SB IOSF BUSY bit will clear when the operation is
+        * complete
+        */
+       for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+               usleep_range(10, 20);
+
+               command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
+               if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
+                       break;
+       }
+
+       if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
+               error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+                        IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+               hw_dbg(hw, "Failed to read, error %x\n", error);
+               return IXGBE_ERR_PHY;
+       }
+
+       if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+               hw_dbg(hw, "Read timed out\n");
+               return IXGBE_ERR_PHY;
+       }
+
+       *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
+
+       return 0;
+}
+
+/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface
+ *  command assuming that the semaphore is already obtained.
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+       s32 status;
+       struct ixgbe_hic_read_shadow_ram buffer;
+
+       buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+       buffer.hdr.req.buf_lenh = 0;
+       buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+       buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+       /* convert offset from words to bytes */
+       buffer.address = cpu_to_be32(offset * 2);
+       /* one word */
+       buffer.length = cpu_to_be16(sizeof(u16));
+
+       status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+                                             sizeof(buffer),
+                                             IXGBE_HI_COMMAND_TIMEOUT, false);
+       if (status)
+               return status;
+
+       *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+                                         FW_NVM_DATA_OFFSET);
+
+       return 0;
+}
+
+/** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @words: number of words
+ *  @data: word(s) read from the EEPROM
+ *
+ *  Reads a 16 bit word(s) from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+                                    u16 offset, u16 words, u16 *data)
+{
+       struct ixgbe_hic_read_shadow_ram buffer;
+       u32 current_word = 0;
+       u16 words_to_read;
+       s32 status;
+       u32 i;
+
+       /* Take semaphore for the entire operation. */
+       status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+       if (status) {
+               hw_dbg(hw, "EEPROM read buffer - semaphore failed\n");
+               return status;
+       }
+
+       while (words) {
+               if (words > FW_MAX_READ_BUFFER_SIZE / 2)
+                       words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
+               else
+                       words_to_read = words;
+
+               buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+               buffer.hdr.req.buf_lenh = 0;
+               buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+               buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+               /* convert offset from words to bytes */
+               buffer.address = cpu_to_be32((offset + current_word) * 2);
+               buffer.length = cpu_to_be16(words_to_read * 2);
+
+               status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+                                                     sizeof(buffer),
+                                                     IXGBE_HI_COMMAND_TIMEOUT,
+                                                     false);
+               if (status) {
+                       hw_dbg(hw, "Host interface command failed\n");
+                       goto out;
+               }
+
+               for (i = 0; i < words_to_read; i++) {
+                       u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
+                                 2 * i;
+                       u32 value = IXGBE_READ_REG(hw, reg);
+
+                       data[current_word] = (u16)(value & 0xffff);
+                       current_word++;
+                       i++;
+                       if (i < words_to_read) {
+                               value >>= 16;
+                               data[current_word] = (u16)(value & 0xffff);
+                               current_word++;
+                       }
+               }
+               words -= words_to_read;
+       }
+
+out:
+       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+       return status;
+}
+
+/** ixgbe_checksum_ptr_x550 - Checksum one pointer region
+ *  @hw: pointer to hardware structure
+ *  @ptr: pointer offset in eeprom
+ *  @size: size of section pointed by ptr, if 0 first word will be used as size
+ *  @csum: address of checksum to update
+ *
+ *  Returns error status for any failure
+ **/
+static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
+                                  u16 size, u16 *csum, u16 *buffer,
+                                  u32 buffer_size)
+{
+       u16 buf[256];
+       s32 status;
+       u16 length, bufsz, i, start;
+       u16 *local_buffer;
+
+       bufsz = sizeof(buf) / sizeof(buf[0]);
+
+       /* Read a chunk at the pointer location */
+       if (!buffer) {
+               status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
+               if (status) {
+                       hw_dbg(hw, "Failed to read EEPROM image\n");
+                       return status;
+               }
+               local_buffer = buf;
+       } else {
+               if (buffer_size < ptr)
+                       return  IXGBE_ERR_PARAM;
+               local_buffer = &buffer[ptr];
+       }
+
+       if (size) {
+               start = 0;
+               length = size;
+       } else {
+               start = 1;
+               length = local_buffer[0];
+
+               /* Skip pointer section if length is invalid. */
+               if (length == 0xFFFF || length == 0 ||
+                   (ptr + length) >= hw->eeprom.word_size)
+                       return 0;
+       }
+
+       if (buffer && ((u32)start + (u32)length > buffer_size))
+               return IXGBE_ERR_PARAM;
+
+       for (i = start; length; i++, length--) {
+               if (i == bufsz && !buffer) {
+                       ptr += bufsz;
+                       i = 0;
+                       if (length < bufsz)
+                               bufsz = length;
+
+                       /* Read a chunk at the pointer location */
+                       status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
+                                                                 bufsz, buf);
+                       if (status) {
+                               hw_dbg(hw, "Failed to read EEPROM image\n");
+                               return status;
+                       }
+               }
+               *csum += local_buffer[i];
+       }
+       return 0;
+}
+
+/** ixgbe_calc_checksum_X550 - Calculates and returns the checksum
+ *  @hw: pointer to hardware structure
+ *  @buffer: pointer to buffer containing calculated checksum
+ *  @buffer_size: size of buffer
+ *
+ *  Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
+{
+       u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
+       u16 *local_buffer;
+       s32 status;
+       u16 checksum = 0;
+       u16 pointer, i, size;
+
+       hw->eeprom.ops.init_params(hw);
+
+       if (!buffer) {
+               /* Read pointer area */
+               status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
+                                               IXGBE_EEPROM_LAST_WORD + 1,
+                                               eeprom_ptrs);
+               if (status) {
+                       hw_dbg(hw, "Failed to read EEPROM image\n");
+                       return status;
+               }
+               local_buffer = eeprom_ptrs;
+       } else {
+               if (buffer_size < IXGBE_EEPROM_LAST_WORD)
+                       return IXGBE_ERR_PARAM;
+               local_buffer = buffer;
+       }
+
+       /* For X550 hardware include 0x0-0x41 in the checksum, skip the
+        * checksum word itself
+        */
+       for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
+               if (i != IXGBE_EEPROM_CHECKSUM)
+                       checksum += local_buffer[i];
+
+       /* Include all data from pointers 0x3, 0x6-0xE.  This excludes the
+        * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+        */
+       for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
+               if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+                       continue;
+
+               pointer = local_buffer[i];
+
+               /* Skip pointer section if the pointer is invalid. */
+               if (pointer == 0xFFFF || pointer == 0 ||
+                   pointer >= hw->eeprom.word_size)
+                       continue;
+
+               switch (i) {
+               case IXGBE_PCIE_GENERAL_PTR:
+                       size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
+                       break;
+               case IXGBE_PCIE_CONFIG0_PTR:
+               case IXGBE_PCIE_CONFIG1_PTR:
+                       size = IXGBE_PCIE_CONFIG_SIZE;
+                       break;
+               default:
+                       size = 0;
+                       break;
+               }
+
+               status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
+                                                buffer, buffer_size);
+               if (status)
+                       return status;
+       }
+
+       checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+       return (s32)checksum;
+}
+
+/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
+{
+       return ixgbe_calc_checksum_X550(hw, NULL, 0);
+}
+
+/** ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @data: word read from the EEPROM
+ *
+ *   Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+       s32 status = 0;
+
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
+               status = ixgbe_read_ee_hostif_data_X550(hw, offset, data);
+               hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+       } else {
+               status = IXGBE_ERR_SWFW_SYNC;
+       }
+
+       return status;
+}
+
+/** ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
+ *  @hw: pointer to hardware structure
+ *  @checksum_val: calculated checksum
+ *
+ *  Performs checksum calculation and validates the EEPROM checksum.  If the
+ *  caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+       s32 status;
+       u16 checksum;
+       u16 read_checksum = 0;
+
+       /* Read the first word from the EEPROM. If this times out or fails, do
+        * not continue or we could be in for a very long wait while every
+        * EEPROM read fails
+        */
+       status = hw->eeprom.ops.read(hw, 0, &checksum);
+       if (status) {
+               hw_dbg(hw, "EEPROM read failed\n");
+               return status;
+       }
+
+       status = hw->eeprom.ops.calc_checksum(hw);
+       if (status < 0)
+               return status;
+
+       checksum = (u16)(status & 0xffff);
+
+       status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
+                                          &read_checksum);
+       if (status)
+               return status;
+
+       /* Verify read checksum from EEPROM is the same as
+        * calculated checksum
+        */
+       if (read_checksum != checksum) {
+               status = IXGBE_ERR_EEPROM_CHECKSUM;
+               hw_dbg(hw, "Invalid EEPROM checksum");
+       }
+
+       /* If the user cares, return the calculated checksum */
+       if (checksum_val)
+               *checksum_val = checksum;
+
+       return status;
+}
+
+/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to write
+ *  @data: word write to the EEPROM
+ *
+ *  Write a 16 bit word to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+       s32 status;
+       struct ixgbe_hic_write_shadow_ram buffer;
+
+       buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
+       buffer.hdr.req.buf_lenh = 0;
+       buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
+       buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+       /* one word */
+       buffer.length = cpu_to_be16(sizeof(u16));
+       buffer.data = data;
+       buffer.address = cpu_to_be32(offset * 2);
+
+       status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+                                             sizeof(buffer),
+                                             IXGBE_HI_COMMAND_TIMEOUT, false);
+       return status;
+}
+
+/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to write
+ *  @data: word write to the EEPROM
+ *
+ *  Write a 16 bit word to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+       s32 status = 0;
+
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
+               status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
+               hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+       } else {
+               hw_dbg(hw, "write ee hostif failed to get semaphore");
+               status = IXGBE_ERR_SWFW_SYNC;
+       }
+
+       return status;
+}
+
+/** ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
+ *  @hw: pointer to hardware structure
+ *
+ *  Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
+ **/
+s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
+{
+       s32 status = 0;
+       union ixgbe_hic_hdr2 buffer;
+
+       buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
+       buffer.req.buf_lenh = 0;
+       buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
+       buffer.req.checksum = FW_DEFAULT_CHECKSUM;
+
+       status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+                                             sizeof(buffer),
+                                             IXGBE_HI_COMMAND_TIMEOUT, false);
+       return status;
+}
+
+/** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
+ *  @hw: pointer to hardware structure
+ *
+ *  After writing EEPROM to shadow RAM using EEWR register, software calculates
+ *  checksum and updates the EEPROM and instructs the hardware to update
+ *  the flash.
+ **/
+s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
+{
+       s32 status;
+       u16 checksum = 0;
+
+       /* Read the first word from the EEPROM. If this times out or fails, do
+        * not continue or we could be in for a very long wait while every
+        * EEPROM read fails
+        */
+       status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
+       if (status) {
+               hw_dbg(hw, "EEPROM read failed\n");
+               return status;
+       }
+
+       status = ixgbe_calc_eeprom_checksum_X550(hw);
+       if (status < 0)
+               return status;
+
+       checksum = (u16)(status & 0xffff);
+
+       status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
+                                           checksum);
+       if (status)
+               return status;
+
+       status = ixgbe_update_flash_X550(hw);
+
+       return status;
+}
+
+/** ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to write
+ *  @words: number of words
+ *  @data: word(s) write to the EEPROM
+ *
+ *
+ *  Write a 16 bit word(s) to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+                                     u16 offset, u16 words, u16 *data)
+{
+       s32 status = 0;
+       u32 i = 0;
+
+       /* Take semaphore for the entire operation. */
+       status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+       if (status) {
+               hw_dbg(hw, "EEPROM write buffer - semaphore failed\n");
+               return status;
+       }
+
+       for (i = 0; i < words; i++) {
+               status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
+                                                        data[i]);
+               if (status) {
+                       hw_dbg(hw, "Eeprom buffered write failed\n");
+                       break;
+               }
+       }
+
+       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+       return status;
+}
+
+/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
+ *  @hw: pointer to hardware structure
+ **/
+void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mac_info *mac = &hw->mac;
+
+       /* CS4227 does not support autoneg, so disable the laser control
+        * functions for SFP+ fiber
+        */
+       if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
+               mac->ops.disable_tx_laser = NULL;
+               mac->ops.enable_tx_laser = NULL;
+               mac->ops.flap_tx_laser = NULL;
+       }
+}
+
+/** ixgbe_setup_sfp_modules_X550em - Setup SFP module
+ * @hw: pointer to hardware structure
+ */
+s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
+{
+       bool setup_linear;
+       u16 reg_slice, edc_mode;
+       s32 ret_val;
+
+       switch (hw->phy.sfp_type) {
+       case ixgbe_sfp_type_unknown:
+               return 0;
+       case ixgbe_sfp_type_not_present:
+               return IXGBE_ERR_SFP_NOT_PRESENT;
+       case ixgbe_sfp_type_da_cu_core0:
+       case ixgbe_sfp_type_da_cu_core1:
+               setup_linear = true;
+               break;
+       case ixgbe_sfp_type_srlr_core0:
+       case ixgbe_sfp_type_srlr_core1:
+       case ixgbe_sfp_type_da_act_lmt_core0:
+       case ixgbe_sfp_type_da_act_lmt_core1:
+       case ixgbe_sfp_type_1g_sx_core0:
+       case ixgbe_sfp_type_1g_sx_core1:
+               setup_linear = false;
+               break;
+       default:
+               return IXGBE_ERR_SFP_NOT_SUPPORTED;
+       }
+
+       ixgbe_init_mac_link_ops_X550em(hw);
+       hw->phy.ops.reset = NULL;
+
+       /* The CS4227 slice address is the base address + the port-pair reg
+        * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0.
+        */
+       reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12);
+
+       if (setup_linear)
+               edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+       else
+               edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
+
+       /* Configure CS4227 for connection type. */
+       ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
+                                                edc_mode);
+
+       if (ret_val)
+               ret_val = hw->phy.ops.write_i2c_combined(hw, 0x80, reg_slice,
+                                                        edc_mode);
+
+       return ret_val;
+}
+
+/** ixgbe_get_link_capabilities_x550em - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ **/
+s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+                                      ixgbe_link_speed *speed,
+                                      bool *autoneg)
+{
+       /* SFP */
+       if (hw->phy.media_type == ixgbe_media_type_fiber) {
+               /* CS4227 SFP must not enable auto-negotiation */
+               *autoneg = false;
+
+               if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+                   hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+                       *speed = IXGBE_LINK_SPEED_1GB_FULL;
+                       return 0;
+               }
+
+               /* Link capabilities are based on SFP */
+               if (hw->phy.multispeed_fiber)
+                       *speed = IXGBE_LINK_SPEED_10GB_FULL |
+                                IXGBE_LINK_SPEED_1GB_FULL;
+               else
+                       *speed = IXGBE_LINK_SPEED_10GB_FULL;
+       } else {
+               *speed = IXGBE_LINK_SPEED_10GB_FULL |
+                        IXGBE_LINK_SPEED_1GB_FULL;
+               *autoneg = true;
+       }
+       return 0;
+}
+
+/** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the
+ *  IOSF device
+ *
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit PHY register to write
+ *  @device_type: 3 bit device type
+ *  @data: Data to write to the register
+ **/
+s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+                                u32 device_type, u32 data)
+{
+       u32 i, command, error;
+
+       command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
+                  (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+
+       /* Write IOSF control register */
+       IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+
+       /* Write IOSF data register */
+       IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
+
+       /* Check every 10 usec to see if the address cycle completed.
+        * The SB IOSF BUSY bit will clear when the operation is
+        * complete
+        */
+       for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+               usleep_range(10, 20);
+
+               command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
+               if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
+                       break;
+       }
+
+       if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
+               error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+                        IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+               hw_dbg(hw, "Failed to write, error %x\n", error);
+               return IXGBE_ERR_PHY;
+       }
+
+       if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+               hw_dbg(hw, "Write timed out\n");
+               return IXGBE_ERR_PHY;
+       }
+
+       return 0;
+}
+
+/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
+ *  @hw: pointer to hardware structure
+ *  @speed: the link speed to force
+ *
+ *  Configures the integrated KR PHY to use iXFI mode. Used to connect an
+ *  internal and external PHY at a specific speed, without autonegotiation.
+ **/
+static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+{
+       s32 status;
+       u32 reg_val;
+
+       /* Disable AN and force speed to 10G Serial. */
+       status = ixgbe_read_iosf_sb_reg_x550(hw,
+                                       IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+                                       IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+       if (status)
+               return status;
+
+       reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+       reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+
+       /* Select forced link speed for internal PHY. */
+       switch (*speed) {
+       case IXGBE_LINK_SPEED_10GB_FULL:
+               reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
+               break;
+       case IXGBE_LINK_SPEED_1GB_FULL:
+               reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+               break;
+       default:
+               /* Other link speeds are not supported by internal KR PHY. */
+               return IXGBE_ERR_LINK_SETUP;
+       }
+
+       status = ixgbe_write_iosf_sb_reg_x550(hw,
+                               IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+                               IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+       if (status)
+               return status;
+
+       /* Disable training protocol FSM. */
+       status = ixgbe_read_iosf_sb_reg_x550(hw,
+                               IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+                               IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+       if (status)
+               return status;
+
+       reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
+       status = ixgbe_write_iosf_sb_reg_x550(hw,
+                               IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+                               IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+       if (status)
+               return status;
+
+       /* Disable Flex from training TXFFE. */
+       status = ixgbe_read_iosf_sb_reg_x550(hw,
+                               IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
+                               IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+       if (status)
+               return status;
+
+       reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+       reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+       reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+       status = ixgbe_write_iosf_sb_reg_x550(hw,
+                               IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
+                               IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+       if (status)
+               return status;
+
+       status = ixgbe_read_iosf_sb_reg_x550(hw,
+                               IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
+                               IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+       if (status)
+               return status;
+
+       reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+       reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+       reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+       status = ixgbe_write_iosf_sb_reg_x550(hw,
+                               IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
+                               IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+       if (status)
+               return status;
+
+       /* Enable override for coefficients. */
+       status = ixgbe_read_iosf_sb_reg_x550(hw,
+                               IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
+                               IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+       if (status)
+               return status;
+
+       reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
+       reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
+       reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
+       reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
+       status = ixgbe_write_iosf_sb_reg_x550(hw,
+                               IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
+                               IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+       if (status)
+               return status;
+
+       /* Toggle port SW reset by AN reset. */
+       status = ixgbe_read_iosf_sb_reg_x550(hw,
+                               IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+                               IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+       if (status)
+               return status;
+
+       reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+       status = ixgbe_write_iosf_sb_reg_x550(hw,
+                               IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+                               IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+       return status;
+}
+
+/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY.
+ *  @hw: pointer to hardware structure
+ *
+ *   Configures the integrated KX4 PHY.
+ **/
+s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
+{
+       s32 status;
+       u32 reg_val;
+
+       status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
+                                            IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
+                                            hw->bus.lan_id, &reg_val);
+       if (status)
+               return status;
+
+       reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 |
+                    IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX);
+
+       reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE;
+
+       /* Advertise 10G support. */
+       if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+               reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4;
+
+       /* Advertise 1G support. */
+       if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+               reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX;
+
+       /* Restart auto-negotiation. */
+       reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
+       status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
+                                             IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
+                                             hw->bus.lan_id, reg_val);
+
+       return status;
+}
+
+/**  ixgbe_setup_kr_x550em - Configure the KR PHY.
+ *   @hw: pointer to hardware structure
+ *
+ *   Configures the integrated KR PHY.
+ **/
+s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
+{
+       s32 status;
+       u32 reg_val;
+
+       status = ixgbe_read_iosf_sb_reg_x550(hw,
+                                       IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+                                       IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+       if (status)
+               return status;
+
+       reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+       reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ;
+       reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
+       reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
+                    IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
+
+       /* Advertise 10G support. */
+       if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+               reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
+
+       /* Advertise 1G support. */
+       if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+               reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
+
+       /* Restart auto-negotiation. */
+       reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+       status = ixgbe_write_iosf_sb_reg_x550(hw,
+                                       IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+                                       IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+       return status;
+}
+
+/** ixgbe_setup_internal_phy_x550em - Configure integrated KR PHY
+ *  @hw: point to hardware structure
+ *
+ *  Configures the integrated KR PHY to talk to the external PHY. The base
+ *  driver will call this function when it gets notification via interrupt from
+ *  the external PHY. This function forces the internal PHY into iXFI mode at
+ *  the correct speed.
+ *
+ *  A return of a non-zero value indicates an error, and the base driver should
+ *  not report link up.
+ **/
+s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
+{
+       u32 status;
+       u16 lasi, autoneg_status, speed;
+       ixgbe_link_speed force_speed;
+
+       /* Verify that the external link status has changed */
+       status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_XENPAK_LASI_STATUS,
+                                     IXGBE_MDIO_PMA_PMD_DEV_TYPE, &lasi);
+       if (status)
+               return status;
+
+       /* If there was no change in link status, we can just exit */
+       if (!(lasi & IXGBE_XENPAK_LASI_LINK_STATUS_ALARM))
+               return 0;
+
+       /* we read this twice back to back to indicate current status */
+       status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+                                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                                     &autoneg_status);
+       if (status)
+               return status;
+
+       status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+                                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                                     &autoneg_status);
+       if (status)
+               return status;
+
+       /* If link is not up return an error indicating treat link as down */
+       if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
+               return IXGBE_ERR_INVALID_LINK_SETTINGS;
+
+       status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
+                                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                                     &speed);
+
+       /* clear everything but the speed and duplex bits */
+       speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
+
+       switch (speed) {
+       case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
+               force_speed = IXGBE_LINK_SPEED_10GB_FULL;
+               break;
+       case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
+               force_speed = IXGBE_LINK_SPEED_1GB_FULL;
+               break;
+       default:
+               /* Internal PHY does not support anything else */
+               return IXGBE_ERR_INVALID_LINK_SETTINGS;
+       }
+
+       return ixgbe_setup_ixfi_x550em(hw, &force_speed);
+}
+
+/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize any function pointers that were not able to be
+ *  set during init_shared_code because the PHY/SFP type was
+ *  not known.  Perform the SFP init if necessary.
+ **/
+s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
+{
+       struct ixgbe_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u32 esdp;
+
+       if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
+               esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+               phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+
+               if (hw->bus.lan_id) {
+                       esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
+                       esdp |= IXGBE_ESDP_SDP1_DIR;
+               }
+               esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
+               IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+       }
+
+       /* Identify the PHY or SFP module */
+       ret_val = phy->ops.identify(hw);
+
+       /* Setup function pointers based on detected SFP module and speeds */
+       ixgbe_init_mac_link_ops_X550em(hw);
+       if (phy->sfp_type != ixgbe_sfp_type_unknown)
+               phy->ops.reset = NULL;
+
+       /* Set functions pointers based on phy type */
+       switch (hw->phy.type) {
+       case ixgbe_phy_x550em_kx4:
+               phy->ops.setup_link = ixgbe_setup_kx4_x550em;
+               phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+               phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+               break;
+       case ixgbe_phy_x550em_kr:
+               phy->ops.setup_link = ixgbe_setup_kr_x550em;
+               phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+               phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+               break;
+       case ixgbe_phy_x550em_ext_t:
+               phy->ops.setup_internal_link = ixgbe_setup_internal_phy_x550em;
+               break;
+       default:
+               break;
+       }
+       return ret_val;
+}
+
+/** ixgbe_get_media_type_X550em - Get media type
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the media type (fiber, copper, backplane)
+ *
+ */
+enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
+{
+       enum ixgbe_media_type media_type;
+
+       /* Detect if there is a copper PHY attached. */
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_X550EM_X_KR:
+       case IXGBE_DEV_ID_X550EM_X_KX4:
+               media_type = ixgbe_media_type_backplane;
+               break;
+       case IXGBE_DEV_ID_X550EM_X_SFP:
+               media_type = ixgbe_media_type_fiber;
+               break;
+       case IXGBE_DEV_ID_X550EM_X_1G_T:
+       case IXGBE_DEV_ID_X550EM_X_10G_T:
+                media_type = ixgbe_media_type_copper;
+               break;
+       default:
+               media_type = ixgbe_media_type_unknown;
+               break;
+       }
+       return media_type;
+}
+
+/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
+ ** @hw: pointer to hardware structure
+ **/
+s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
+{
+       u32 status;
+       u16 reg;
+       u32 retries = 2;
+
+       do {
+               /* decrement retries counter and exit if we hit 0 */
+               if (retries < 1) {
+                       hw_dbg(hw, "External PHY not yet finished resetting.");
+                       return IXGBE_ERR_PHY;
+               }
+               retries--;
+
+               status = hw->phy.ops.read_reg(hw,
+                                             IXGBE_MDIO_TX_VENDOR_ALARMS_3,
+                                             IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                             &reg);
+               if (status)
+                       return status;
+
+               /* Verify PHY FW reset has completed */
+       } while ((reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) != 1);
+
+       /* Set port to low power mode */
+       status = hw->phy.ops.read_reg(hw,
+                                     IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+                                     IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+                                     &reg);
+       if (status)
+               return status;
+
+       /* Enable the transmitter */
+       status = hw->phy.ops.read_reg(hw,
+                                     IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
+                                     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                     &reg);
+       if (status)
+               return status;
+
+       reg &= ~IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE;
+
+       status = hw->phy.ops.write_reg(hw,
+                                      IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
+                                      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                      reg);
+       if (status)
+               return status;
+
+       /* Un-stall the PHY FW */
+       status = hw->phy.ops.read_reg(hw,
+                                     IXGBE_MDIO_GLOBAL_RES_PR_10,
+                                     IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+                                     &reg);
+       if (status)
+               return status;
+
+       reg &= ~IXGBE_MDIO_POWER_UP_STALL;
+
+       status = hw->phy.ops.write_reg(hw,
+                                      IXGBE_MDIO_GLOBAL_RES_PR_10,
+                                      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+                                      reg);
+       return status;
+}
+
+/**  ixgbe_reset_hw_X550em - Perform hardware reset
+ **  @hw: pointer to hardware structure
+ **
+ **  Resets the hardware by resetting the transmit and receive units, masks
+ **  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ **  reset.
+ **/
+s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+{
+       ixgbe_link_speed link_speed;
+       s32 status;
+       u32 ctrl = 0;
+       u32 i;
+       bool link_up = false;
+
+       /* Call adapter stop to disable Tx/Rx and clear interrupts */
+       status = hw->mac.ops.stop_adapter(hw);
+       if (status)
+               return status;
+
+       /* flush pending Tx transactions */
+       ixgbe_clear_tx_pending(hw);
+
+       /* PHY ops must be identified and initialized prior to reset */
+
+       /* Identify PHY and related function pointers */
+       status = hw->phy.ops.init(hw);
+
+       /* start the external PHY */
+       if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
+               status = ixgbe_init_ext_t_x550em(hw);
+               if (status)
+                       return status;
+       }
+
+       /* Setup SFP module if there is one present. */
+       if (hw->phy.sfp_setup_needed) {
+               status = hw->mac.ops.setup_sfp(hw);
+               hw->phy.sfp_setup_needed = false;
+       }
+
+       /* Reset PHY */
+       if (!hw->phy.reset_disable && hw->phy.ops.reset)
+               hw->phy.ops.reset(hw);
+
+mac_reset_top:
+       /* Issue global reset to the MAC.  Needs to be SW reset if link is up.
+        * If link reset is used when link is up, it might reset the PHY when
+        * mng is using it.  If link is down or the flag to force full link
+        * reset is set, then perform link reset.
+        */
+       ctrl = IXGBE_CTRL_LNK_RST;
+
+       if (!hw->force_full_reset) {
+               hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+               if (link_up)
+                       ctrl = IXGBE_CTRL_RST;
+       }
+
+       ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+       IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+       IXGBE_WRITE_FLUSH(hw);
+
+       /* Poll for reset bit to self-clear meaning reset is complete */
+       for (i = 0; i < 10; i++) {
+               udelay(1);
+               ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+               if (!(ctrl & IXGBE_CTRL_RST_MASK))
+                       break;
+       }
+
+       if (ctrl & IXGBE_CTRL_RST_MASK) {
+               status = IXGBE_ERR_RESET_FAILED;
+               hw_dbg(hw, "Reset polling failed to complete.\n");
+       }
+
+       msleep(50);
+
+       /* Double resets are required for recovery from certain error
+        * clear the multicast table.  Also reset num_rar_entries to 128,
+        * since we modify this value when programming the SAN MAC address.
+        */
+       if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+               hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+               goto mac_reset_top;
+       }
+
+       /* Store the permanent mac address */
+       hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+       /* Store MAC address from RAR0, clear receive address registers, and
+        * clear the multicast table.  Also reset num_rar_entries to 128,
+        * since we modify this value when programming the SAN MAC address.
+        */
+       hw->mac.num_rar_entries = 128;
+       hw->mac.ops.init_rx_addrs(hw);
+
+       return status;
+}
+
+#define X550_COMMON_MAC \
+       .init_hw                        = &ixgbe_init_hw_generic, \
+       .start_hw                       = &ixgbe_start_hw_X540, \
+       .clear_hw_cntrs                 = &ixgbe_clear_hw_cntrs_generic, \
+       .enable_rx_dma                  = &ixgbe_enable_rx_dma_generic, \
+       .get_mac_addr                   = &ixgbe_get_mac_addr_generic, \
+       .get_device_caps                = &ixgbe_get_device_caps_generic, \
+       .stop_adapter                   = &ixgbe_stop_adapter_generic, \
+       .get_bus_info                   = &ixgbe_get_bus_info_generic, \
+       .set_lan_id                     = &ixgbe_set_lan_id_multi_port_pcie, \
+       .read_analog_reg8               = NULL, \
+       .write_analog_reg8              = NULL, \
+       .set_rxpba                      = &ixgbe_set_rxpba_generic, \
+       .check_link                     = &ixgbe_check_mac_link_generic, \
+       .led_on                         = &ixgbe_led_on_generic, \
+       .led_off                        = &ixgbe_led_off_generic, \
+       .blink_led_start                = &ixgbe_blink_led_start_X540, \
+       .blink_led_stop                 = &ixgbe_blink_led_stop_X540, \
+       .set_rar                        = &ixgbe_set_rar_generic, \
+       .clear_rar                      = &ixgbe_clear_rar_generic, \
+       .set_vmdq                       = &ixgbe_set_vmdq_generic, \
+       .set_vmdq_san_mac               = &ixgbe_set_vmdq_san_mac_generic, \
+       .clear_vmdq                     = &ixgbe_clear_vmdq_generic, \
+       .init_rx_addrs                  = &ixgbe_init_rx_addrs_generic, \
+       .update_mc_addr_list            = &ixgbe_update_mc_addr_list_generic, \
+       .enable_mc                      = &ixgbe_enable_mc_generic, \
+       .disable_mc                     = &ixgbe_disable_mc_generic, \
+       .clear_vfta                     = &ixgbe_clear_vfta_generic, \
+       .set_vfta                       = &ixgbe_set_vfta_generic, \
+       .fc_enable                      = &ixgbe_fc_enable_generic, \
+       .set_fw_drv_ver                 = &ixgbe_set_fw_drv_ver_generic, \
+       .init_uta_tables                = &ixgbe_init_uta_tables_generic, \
+       .set_mac_anti_spoofing          = &ixgbe_set_mac_anti_spoofing, \
+       .set_vlan_anti_spoofing         = &ixgbe_set_vlan_anti_spoofing, \
+       .acquire_swfw_sync              = &ixgbe_acquire_swfw_sync_X540, \
+       .release_swfw_sync              = &ixgbe_release_swfw_sync_X540, \
+       .disable_rx_buff                = &ixgbe_disable_rx_buff_generic, \
+       .enable_rx_buff                 = &ixgbe_enable_rx_buff_generic, \
+       .get_thermal_sensor_data        = NULL, \
+       .init_thermal_sensor_thresh     = NULL, \
+       .prot_autoc_read                = &prot_autoc_read_generic, \
+       .prot_autoc_write               = &prot_autoc_write_generic, \
+
+static struct ixgbe_mac_operations mac_ops_X550 = {
+       X550_COMMON_MAC
+       .reset_hw               = &ixgbe_reset_hw_X540,
+       .get_media_type         = &ixgbe_get_media_type_X540,
+       .get_san_mac_addr       = &ixgbe_get_san_mac_addr_generic,
+       .get_wwn_prefix         = &ixgbe_get_wwn_prefix_generic,
+       .setup_link             = &ixgbe_setup_mac_link_X540,
+       .set_rxpba              = &ixgbe_set_rxpba_generic,
+       .get_link_capabilities  = &ixgbe_get_copper_link_capabilities_generic,
+       .setup_sfp              = NULL,
+};
+
+static struct ixgbe_mac_operations mac_ops_X550EM_x = {
+       X550_COMMON_MAC
+       .reset_hw               = &ixgbe_reset_hw_X550em,
+       .get_media_type         = &ixgbe_get_media_type_X550em,
+       .get_san_mac_addr       = NULL,
+       .get_wwn_prefix         = NULL,
+       .setup_link             = NULL, /* defined later */
+       .get_link_capabilities  = &ixgbe_get_link_capabilities_X550em,
+       .setup_sfp              = ixgbe_setup_sfp_modules_X550em,
+
+};
+
+#define X550_COMMON_EEP \
+       .read                   = &ixgbe_read_ee_hostif_X550, \
+       .read_buffer            = &ixgbe_read_ee_hostif_buffer_X550, \
+       .write                  = &ixgbe_write_ee_hostif_X550, \
+       .write_buffer           = &ixgbe_write_ee_hostif_buffer_X550, \
+       .validate_checksum      = &ixgbe_validate_eeprom_checksum_X550, \
+       .update_checksum        = &ixgbe_update_eeprom_checksum_X550, \
+       .calc_checksum          = &ixgbe_calc_eeprom_checksum_X550, \
+
+static struct ixgbe_eeprom_operations eeprom_ops_X550 = {
+       X550_COMMON_EEP
+       .init_params            = &ixgbe_init_eeprom_params_X550,
+};
+
+static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
+       X550_COMMON_EEP
+       .init_params            = &ixgbe_init_eeprom_params_X540,
+};
+
+#define X550_COMMON_PHY        \
+       .identify_sfp           = &ixgbe_identify_module_generic, \
+       .reset                  = NULL, \
+       .setup_link_speed       = &ixgbe_setup_phy_link_speed_generic, \
+       .read_i2c_byte          = &ixgbe_read_i2c_byte_generic, \
+       .write_i2c_byte         = &ixgbe_write_i2c_byte_generic, \
+       .read_i2c_sff8472       = &ixgbe_read_i2c_sff8472_generic, \
+       .read_i2c_eeprom        = &ixgbe_read_i2c_eeprom_generic, \
+       .write_i2c_eeprom       = &ixgbe_write_i2c_eeprom_generic, \
+       .check_overtemp         = &ixgbe_tn_check_overtemp, \
+       .get_firmware_version   = &ixgbe_get_phy_firmware_version_generic,
+
+static struct ixgbe_phy_operations phy_ops_X550 = {
+       X550_COMMON_PHY
+       .init                   = NULL,
+       .identify               = &ixgbe_identify_phy_generic,
+       .read_reg               = &ixgbe_read_phy_reg_generic,
+       .write_reg              = &ixgbe_write_phy_reg_generic,
+       .setup_link             = &ixgbe_setup_phy_link_generic,
+       .read_i2c_combined      = &ixgbe_read_i2c_combined_generic,
+       .write_i2c_combined     = &ixgbe_write_i2c_combined_generic,
+};
+
+static struct ixgbe_phy_operations phy_ops_X550EM_x = {
+       X550_COMMON_PHY
+       .init                   = &ixgbe_init_phy_ops_X550em,
+       .identify               = &ixgbe_identify_phy_x550em,
+       .read_reg               = NULL, /* defined later */
+       .write_reg              = NULL, /* defined later */
+       .setup_link             = NULL, /* defined later */
+};
+
+struct ixgbe_info ixgbe_X550_info = {
+       .mac                    = ixgbe_mac_X550,
+       .get_invariants         = &ixgbe_get_invariants_X540,
+       .mac_ops                = &mac_ops_X550,
+       .eeprom_ops             = &eeprom_ops_X550,
+       .phy_ops                = &phy_ops_X550,
+       .mbx_ops                = &mbx_ops_generic,
+};
+
+struct ixgbe_info ixgbe_X550EM_x_info = {
+       .mac                    = ixgbe_mac_X550EM_x,
+       .get_invariants         = &ixgbe_get_invariants_X540,
+       .mac_ops                = &mac_ops_X550EM_x,
+       .eeprom_ops             = &eeprom_ops_X550EM_x,
+       .phy_ops                = &phy_ops_X550EM_x,
+       .mbx_ops                = &mbx_ops_generic,
+};
index 05e4f32d84f7410ea1ccdc5212eaedb5fbcf40cb..7412d378b77b95cb6032d4293086715002d72893 100644 (file)
@@ -31,6 +31,8 @@
 /* Device IDs */
 #define IXGBE_DEV_ID_82599_VF           0x10ED
 #define IXGBE_DEV_ID_X540_VF            0x1515
+#define IXGBE_DEV_ID_X550_VF           0x1565
+#define IXGBE_DEV_ID_X550EM_X_VF       0x15A8
 
 #define IXGBE_VF_IRQ_CLEAR_MASK         7
 #define IXGBE_VF_MAX_TX_QUEUES          8
index bb6726cbeb86ac19bd9ea90876fdc6c8e11e13e8..8c44ab25f3fa7100b3c0c351c75b0733b4c6bcd6 100644 (file)
@@ -432,10 +432,14 @@ enum ixbgevf_state_t {
 enum ixgbevf_boards {
        board_82599_vf,
        board_X540_vf,
+       board_X550_vf,
+       board_X550EM_x_vf,
 };
 
 extern const struct ixgbevf_info ixgbevf_82599_vf_info;
 extern const struct ixgbevf_info ixgbevf_X540_vf_info;
+extern const struct ixgbevf_info ixgbevf_X550_vf_info;
+extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
 extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
 
 /* needed by ethtool.c */
index 755f71f07ae105b6efcc580aefe271f16e04a30d..3b0ddf757fb666f4ccc8c2825c7a4187244df642 100644 (file)
@@ -66,6 +66,8 @@ static char ixgbevf_copyright[] =
 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
        [board_82599_vf] = &ixgbevf_82599_vf_info,
        [board_X540_vf]  = &ixgbevf_X540_vf_info,
+       [board_X550_vf]  = &ixgbevf_X550_vf_info,
+       [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
 };
 
 /* ixgbevf_pci_tbl - PCI Device ID Table
@@ -79,6 +81,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
 static const struct pci_device_id ixgbevf_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
        /* required last entry */
        {0, }
 };
@@ -3529,7 +3533,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
                max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
                break;
        default:
-               if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
+               if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
                        max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
                break;
        }
@@ -3733,6 +3737,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct ixgbe_hw *hw = NULL;
        const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
        int err, pci_using_dac;
+       bool disable_dev = false;
 
        err = pci_enable_device(pdev);
        if (err)
@@ -3767,7 +3772,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
-       pci_set_drvdata(pdev, netdev);
        adapter = netdev_priv(netdev);
 
        adapter->netdev = netdev;
@@ -3856,16 +3860,28 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto err_register;
 
+       pci_set_drvdata(pdev, netdev);
        netif_carrier_off(netdev);
 
        ixgbevf_init_last_counter_stats(adapter);
 
-       /* print the MAC address */
-       hw_dbg(hw, "%pM\n", netdev->dev_addr);
+       /* print the VF info */
+       dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
+       dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
 
-       hw_dbg(hw, "MAC: %d\n", hw->mac.type);
+       switch (hw->mac.type) {
+       case ixgbe_mac_X550_vf:
+               dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
+               break;
+       case ixgbe_mac_X540_vf:
+               dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
+               break;
+       case ixgbe_mac_82599_vf:
+       default:
+               dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
+               break;
+       }
 
-       hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
        return 0;
 
 err_register:
@@ -3874,12 +3890,13 @@ err_sw_init:
        ixgbevf_reset_interrupt_capability(adapter);
        iounmap(adapter->io_addr);
 err_ioremap:
+       disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
        free_netdev(netdev);
 err_alloc_etherdev:
        pci_release_regions(pdev);
 err_pci_reg:
 err_dma:
-       if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
+       if (!adapter || disable_dev)
                pci_disable_device(pdev);
        return err;
 }
@@ -3896,7 +3913,13 @@ err_dma:
 static void ixgbevf_remove(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
-       struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+       struct ixgbevf_adapter *adapter;
+       bool disable_dev;
+
+       if (!netdev)
+               return;
+
+       adapter = netdev_priv(netdev);
 
        set_bit(__IXGBEVF_REMOVING, &adapter->state);
 
@@ -3916,9 +3939,10 @@ static void ixgbevf_remove(struct pci_dev *pdev)
 
        hw_dbg(&adapter->hw, "Remove complete\n");
 
+       disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
        free_netdev(netdev);
 
-       if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
+       if (disable_dev)
                pci_disable_device(pdev);
 }
 
index 9cddd56d02c39305a69e13a25c2de88b4d92ccbb..cdb53be7d9958e4cb0f92456f459eacea122e038 100644 (file)
@@ -617,3 +617,13 @@ const struct ixgbevf_info ixgbevf_X540_vf_info = {
        .mac = ixgbe_mac_X540_vf,
        .mac_ops = &ixgbevf_mac_ops,
 };
+
+const struct ixgbevf_info ixgbevf_X550_vf_info = {
+       .mac = ixgbe_mac_X550_vf,
+       .mac_ops = &ixgbevf_mac_ops,
+};
+
+const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
+       .mac = ixgbe_mac_X550EM_x_vf,
+       .mac_ops = &ixgbevf_mac_ops,
+};
index aa8cc8dc25d15af5bf72621a0afb66297bac18e5..5b172427f459a1c30070fdf52856ca2f0116bedb 100644 (file)
@@ -74,6 +74,8 @@ enum ixgbe_mac_type {
        ixgbe_mac_unknown = 0,
        ixgbe_mac_82599_vf,
        ixgbe_mac_X540_vf,
+       ixgbe_mac_X550_vf,
+       ixgbe_mac_X550EM_x_vf,
        ixgbe_num_macs
 };
 
index 53a1cc52d49664c9ff12014e9bf660da32b7c97e..f8ab220bd72cc831eabdaf832b3ffbd5d059b5b2 100644 (file)
@@ -1361,7 +1361,9 @@ static void sky2_rx_clean(struct sky2_port *sky2)
 {
        unsigned i;
 
-       memset(sky2->rx_le, 0, RX_LE_BYTES);
+       if (sky2->rx_le)
+               memset(sky2->rx_le, 0, RX_LE_BYTES);
+
        for (i = 0; i < sky2->rx_pending; i++) {
                struct rx_ring_info *re = sky2->rx_ring + i;
 
index b7c99780aef30af4d6f25892f0dba743632c14fc..1597fb07576cedbddc93e248dea906dd188a5fed 100644 (file)
@@ -2259,7 +2259,7 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st
 
 #define PORT_ID_BYTE_LEN 8
 static int mlx4_en_get_phys_port_id(struct net_device *dev,
-                                   struct netdev_phys_port_id *ppid)
+                                   struct netdev_phys_item_id *ppid)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_dev *mdev = priv->mdev->dev;
index d718ca0f88dab618f19e55178f7d5a0edb94bcb4..16f617b5749e562e7021b30bd285dcdb2671810b 100644 (file)
@@ -1546,7 +1546,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
 
        switch (op) {
        case RES_OP_RESERVE:
-               count = get_param_l(&in_param);
+               count = get_param_l(&in_param) & 0xffffff;
                align = get_param_h(&in_param);
                err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
                if (err)
index a913b3ad2f899e791a9abd3c5e518d1f7410ceeb..1aa25b13ace1d2ccce0d5cf10fb07fb3a3158038 100644 (file)
@@ -376,13 +376,14 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
 }
 
 static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
-                       struct net_device *netdev, const unsigned char *addr)
+                       struct net_device *netdev,
+                       const unsigned char *addr, u16 vid)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        int err = -EOPNOTSUPP;
 
        if (!adapter->fdb_mac_learn)
-               return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
+               return ndo_dflt_fdb_del(ndm, tb, netdev, addr, vid);
 
        if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
            qlcnic_sriov_check(adapter)) {
@@ -401,13 +402,13 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
 
 static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                        struct net_device *netdev,
-                       const unsigned char *addr, u16 flags)
+                       const unsigned char *addr, u16 vid, u16 flags)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        int err = 0;
 
        if (!adapter->fdb_mac_learn)
-               return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags);
+               return ndo_dflt_fdb_add(ndm, tb, netdev, addr, vid, flags);
 
        if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
            !qlcnic_sriov_check(adapter)) {
@@ -460,7 +461,7 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
 }
 
 static int qlcnic_get_phys_port_id(struct net_device *netdev,
-                                  struct netdev_phys_port_id *ppid)
+                                  struct netdev_phys_item_id *ppid)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_hardware_context *ahw = adapter->ahw;
diff --git a/drivers/net/ethernet/rocker/Kconfig b/drivers/net/ethernet/rocker/Kconfig
new file mode 100644 (file)
index 0000000..11a850e
--- /dev/null
@@ -0,0 +1,27 @@
+#
+# Rocker device configuration
+#
+
+config NET_VENDOR_ROCKER
+       bool "Rocker devices"
+       default y
+       ---help---
+         If you have a network device belonging to this class, say Y.
+
+         Note that the answer to this question doesn't directly affect the
+         kernel: saying N will just cause the configurator to skip all
+         the questions about Rocker devices. If you say Y, you will be asked for
+         your specific card in the following questions.
+
+if NET_VENDOR_ROCKER
+
+config ROCKER
+       tristate "Rocker switch driver (EXPERIMENTAL)"
+       depends on PCI && NET_SWITCHDEV
+       ---help---
+         This driver supports Rocker switch device.
+
+         To compile this driver as a module, choose M here: the
+         module will be called rocker.
+
+endif # NET_VENDOR_ROCKER
diff --git a/drivers/net/ethernet/rocker/Makefile b/drivers/net/ethernet/rocker/Makefile
new file mode 100644 (file)
index 0000000..f85fb12
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for the Rocker network device drivers.
+#
+
+obj-$(CONFIG_ROCKER) += rocker.o
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
new file mode 100644 (file)
index 0000000..fded127
--- /dev/null
@@ -0,0 +1,4374 @@
+/*
+ * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
+ * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/hashtable.h>
+#include <linux/crc32.h>
+#include <linux/sort.h>
+#include <linux/random.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/bitops.h>
+#include <net/switchdev.h>
+#include <net/rtnetlink.h>
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+#include <generated/utsrelease.h>
+
+#include "rocker.h"
+
+static const char rocker_driver_name[] = "rocker";
+
+static const struct pci_device_id rocker_pci_id_table[] = {
+       {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
+       {0, }
+};
+
+struct rocker_flow_tbl_key {
+       u32 priority;
+       enum rocker_of_dpa_table_id tbl_id;
+       union {
+               struct {
+                       u32 in_lport;
+                       u32 in_lport_mask;
+                       enum rocker_of_dpa_table_id goto_tbl;
+               } ig_port;
+               struct {
+                       u32 in_lport;
+                       __be16 vlan_id;
+                       __be16 vlan_id_mask;
+                       enum rocker_of_dpa_table_id goto_tbl;
+                       bool untagged;
+                       __be16 new_vlan_id;
+               } vlan;
+               struct {
+                       u32 in_lport;
+                       u32 in_lport_mask;
+                       __be16 eth_type;
+                       u8 eth_dst[ETH_ALEN];
+                       u8 eth_dst_mask[ETH_ALEN];
+                       __be16 vlan_id;
+                       __be16 vlan_id_mask;
+                       enum rocker_of_dpa_table_id goto_tbl;
+                       bool copy_to_cpu;
+               } term_mac;
+               struct {
+                       __be16 eth_type;
+                       __be32 dst4;
+                       __be32 dst4_mask;
+                       enum rocker_of_dpa_table_id goto_tbl;
+                       u32 group_id;
+               } ucast_routing;
+               struct {
+                       u8 eth_dst[ETH_ALEN];
+                       u8 eth_dst_mask[ETH_ALEN];
+                       int has_eth_dst;
+                       int has_eth_dst_mask;
+                       __be16 vlan_id;
+                       u32 tunnel_id;
+                       enum rocker_of_dpa_table_id goto_tbl;
+                       u32 group_id;
+                       bool copy_to_cpu;
+               } bridge;
+               struct {
+                       u32 in_lport;
+                       u32 in_lport_mask;
+                       u8 eth_src[ETH_ALEN];
+                       u8 eth_src_mask[ETH_ALEN];
+                       u8 eth_dst[ETH_ALEN];
+                       u8 eth_dst_mask[ETH_ALEN];
+                       __be16 eth_type;
+                       __be16 vlan_id;
+                       __be16 vlan_id_mask;
+                       u8 ip_proto;
+                       u8 ip_proto_mask;
+                       u8 ip_tos;
+                       u8 ip_tos_mask;
+                       u32 group_id;
+               } acl;
+       };
+};
+
+struct rocker_flow_tbl_entry {
+       struct hlist_node entry;
+       u32 ref_count;
+       u64 cookie;
+       struct rocker_flow_tbl_key key;
+       u32 key_crc32; /* key */
+};
+
+struct rocker_group_tbl_entry {
+       struct hlist_node entry;
+       u32 cmd;
+       u32 group_id; /* key */
+       u16 group_count;
+       u32 *group_ids;
+       union {
+               struct {
+                       u8 pop_vlan;
+               } l2_interface;
+               struct {
+                       u8 eth_src[ETH_ALEN];
+                       u8 eth_dst[ETH_ALEN];
+                       __be16 vlan_id;
+                       u32 group_id;
+               } l2_rewrite;
+               struct {
+                       u8 eth_src[ETH_ALEN];
+                       u8 eth_dst[ETH_ALEN];
+                       __be16 vlan_id;
+                       bool ttl_check;
+                       u32 group_id;
+               } l3_unicast;
+       };
+};
+
+struct rocker_fdb_tbl_entry {
+       struct hlist_node entry;
+       u32 key_crc32; /* key */
+       bool learned;
+       struct rocker_fdb_tbl_key {
+               u32 lport;
+               u8 addr[ETH_ALEN];
+               __be16 vlan_id;
+       } key;
+};
+
+struct rocker_internal_vlan_tbl_entry {
+       struct hlist_node entry;
+       int ifindex; /* key */
+       u32 ref_count;
+       __be16 vlan_id;
+};
+
+struct rocker_desc_info {
+       char *data; /* mapped */
+       size_t data_size;
+       size_t tlv_size;
+       struct rocker_desc *desc;
+       DEFINE_DMA_UNMAP_ADDR(mapaddr);
+};
+
+struct rocker_dma_ring_info {
+       size_t size;
+       u32 head;
+       u32 tail;
+       struct rocker_desc *desc; /* mapped */
+       dma_addr_t mapaddr;
+       struct rocker_desc_info *desc_info;
+       unsigned int type;
+};
+
+struct rocker;
+
+enum {
+       ROCKER_CTRL_LINK_LOCAL_MCAST,
+       ROCKER_CTRL_LOCAL_ARP,
+       ROCKER_CTRL_IPV4_MCAST,
+       ROCKER_CTRL_IPV6_MCAST,
+       ROCKER_CTRL_DFLT_BRIDGING,
+       ROCKER_CTRL_MAX,
+};
+
+#define ROCKER_INTERNAL_VLAN_ID_BASE   0x0f00
+#define ROCKER_N_INTERNAL_VLANS                255
+#define ROCKER_VLAN_BITMAP_LEN         BITS_TO_LONGS(VLAN_N_VID)
+#define ROCKER_INTERNAL_VLAN_BITMAP_LEN        BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
+
+struct rocker_port {
+       struct net_device *dev;
+       struct net_device *bridge_dev;
+       struct rocker *rocker;
+       unsigned int port_number;
+       u32 lport;
+       __be16 internal_vlan_id;
+       int stp_state;
+       u32 brport_flags;
+       bool ctrls[ROCKER_CTRL_MAX];
+       unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
+       struct napi_struct napi_tx;
+       struct napi_struct napi_rx;
+       struct rocker_dma_ring_info tx_ring;
+       struct rocker_dma_ring_info rx_ring;
+};
+
+struct rocker {
+       struct pci_dev *pdev;
+       u8 __iomem *hw_addr;
+       struct msix_entry *msix_entries;
+       unsigned int port_count;
+       struct rocker_port **ports;
+       struct {
+               u64 id;
+       } hw;
+       spinlock_t cmd_ring_lock;
+       struct rocker_dma_ring_info cmd_ring;
+       struct rocker_dma_ring_info event_ring;
+       DECLARE_HASHTABLE(flow_tbl, 16);
+       spinlock_t flow_tbl_lock;
+       u64 flow_tbl_next_cookie;
+       DECLARE_HASHTABLE(group_tbl, 16);
+       spinlock_t group_tbl_lock;
+       DECLARE_HASHTABLE(fdb_tbl, 16);
+       spinlock_t fdb_tbl_lock;
+       unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
+       DECLARE_HASHTABLE(internal_vlan_tbl, 8);
+       spinlock_t internal_vlan_tbl_lock;
+};
+
+static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
+static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
+static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
+static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
+static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
+static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+/* Rocker priority levels for flow table entries.  Higher
+ * priority match takes precedence over lower priority match.
+ */
+
+enum {
+       ROCKER_PRIORITY_UNKNOWN = 0,
+       ROCKER_PRIORITY_IG_PORT = 1,
+       ROCKER_PRIORITY_VLAN = 1,
+       ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
+       ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
+       ROCKER_PRIORITY_UNICAST_ROUTING = 1,
+       ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
+       ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
+       ROCKER_PRIORITY_BRIDGING_VLAN = 3,
+       ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
+       ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
+       ROCKER_PRIORITY_BRIDGING_TENANT = 3,
+       ROCKER_PRIORITY_ACL_CTRL = 3,
+       ROCKER_PRIORITY_ACL_NORMAL = 2,
+       ROCKER_PRIORITY_ACL_DFLT = 1,
+};
+
+static bool rocker_vlan_id_is_internal(__be16 vlan_id)
+{
+       u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
+       u16 end = 0xffe;
+       u16 _vlan_id = ntohs(vlan_id);
+
+       return (_vlan_id >= start && _vlan_id <= end);
+}
+
+static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
+                                     u16 vid, bool *pop_vlan)
+{
+       __be16 vlan_id;
+
+       if (pop_vlan)
+               *pop_vlan = false;
+       vlan_id = htons(vid);
+       if (!vlan_id) {
+               vlan_id = rocker_port->internal_vlan_id;
+               if (pop_vlan)
+                       *pop_vlan = true;
+       }
+
+       return vlan_id;
+}
+
+static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
+                                  __be16 vlan_id)
+{
+       if (rocker_vlan_id_is_internal(vlan_id))
+               return 0;
+
+       return ntohs(vlan_id);
+}
+
+static bool rocker_port_is_bridged(struct rocker_port *rocker_port)
+{
+       return !!rocker_port->bridge_dev;
+}
+
+struct rocker_wait {
+       wait_queue_head_t wait;
+       bool done;
+       bool nowait;
+};
+
+static void rocker_wait_reset(struct rocker_wait *wait)
+{
+       wait->done = false;
+       wait->nowait = false;
+}
+
+static void rocker_wait_init(struct rocker_wait *wait)
+{
+       init_waitqueue_head(&wait->wait);
+       rocker_wait_reset(wait);
+}
+
+static struct rocker_wait *rocker_wait_create(gfp_t gfp)
+{
+       struct rocker_wait *wait;
+
+       wait = kmalloc(sizeof(*wait), gfp);
+       if (!wait)
+               return NULL;
+       rocker_wait_init(wait);
+       return wait;
+}
+
+static void rocker_wait_destroy(struct rocker_wait *work)
+{
+       kfree(work);
+}
+
+static bool rocker_wait_event_timeout(struct rocker_wait *wait,
+                                     unsigned long timeout)
+{
+       wait_event_timeout(wait->wait, wait->done, HZ / 10);
+       if (!wait->done)
+               return false;
+       return true;
+}
+
+static void rocker_wait_wake_up(struct rocker_wait *wait)
+{
+       wait->done = true;
+       wake_up(&wait->wait);
+}
+
+static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector)
+{
+       return rocker->msix_entries[vector].vector;
+}
+
+static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port)
+{
+       return rocker_msix_vector(rocker_port->rocker,
+                                 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
+}
+
+static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
+{
+       return rocker_msix_vector(rocker_port->rocker,
+                                 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
+}
+
+#define rocker_write32(rocker, reg, val)       \
+       writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
+#define rocker_read32(rocker, reg)     \
+       readl((rocker)->hw_addr + (ROCKER_ ## reg))
+#define rocker_write64(rocker, reg, val)       \
+       writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
+#define rocker_read64(rocker, reg)     \
+       readq((rocker)->hw_addr + (ROCKER_ ## reg))
+
+/*****************************
+ * HW basic testing functions
+ *****************************/
+
+static int rocker_reg_test(struct rocker *rocker)
+{
+       struct pci_dev *pdev = rocker->pdev;
+       u64 test_reg;
+       u64 rnd;
+
+       rnd = prandom_u32();
+       rnd >>= 1;
+       rocker_write32(rocker, TEST_REG, rnd);
+       test_reg = rocker_read32(rocker, TEST_REG);
+       if (test_reg != rnd * 2) {
+               dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
+                       test_reg, rnd * 2);
+               return -EIO;
+       }
+
+       rnd = prandom_u32();
+       rnd <<= 31;
+       rnd |= prandom_u32();
+       rocker_write64(rocker, TEST_REG64, rnd);
+       test_reg = rocker_read64(rocker, TEST_REG64);
+       if (test_reg != rnd * 2) {
+               dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
+                       test_reg, rnd * 2);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
+                              u32 test_type, dma_addr_t dma_handle,
+                              unsigned char *buf, unsigned char *expect,
+                              size_t size)
+{
+       struct pci_dev *pdev = rocker->pdev;
+       int i;
+
+       rocker_wait_reset(wait);
+       rocker_write32(rocker, TEST_DMA_CTRL, test_type);
+
+       if (!rocker_wait_event_timeout(wait, HZ / 10)) {
+               dev_err(&pdev->dev, "no interrupt received within a timeout\n");
+               return -EIO;
+       }
+
+       for (i = 0; i < size; i++) {
+               if (buf[i] != expect[i]) {
+                       dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
+                               buf[i], i, expect[i]);
+                       return -EIO;
+               }
+       }
+       return 0;
+}
+
+#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
+#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
+
+static int rocker_dma_test_offset(struct rocker *rocker,
+                                 struct rocker_wait *wait, int offset)
+{
+       struct pci_dev *pdev = rocker->pdev;
+       unsigned char *alloc;
+       unsigned char *buf;
+       unsigned char *expect;
+       dma_addr_t dma_handle;
+       int i;
+       int err;
+
+       alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
+                       GFP_KERNEL | GFP_DMA);
+       if (!alloc)
+               return -ENOMEM;
+       buf = alloc + offset;
+       expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
+
+       dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
+                                   PCI_DMA_BIDIRECTIONAL);
+       if (pci_dma_mapping_error(pdev, dma_handle)) {
+               err = -EIO;
+               goto free_alloc;
+       }
+
+       rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
+       rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
+
+       memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
+       err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
+                                 dma_handle, buf, expect,
+                                 ROCKER_TEST_DMA_BUF_SIZE);
+       if (err)
+               goto unmap;
+
+       memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
+       err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
+                                 dma_handle, buf, expect,
+                                 ROCKER_TEST_DMA_BUF_SIZE);
+       if (err)
+               goto unmap;
+
+       prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
+       for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
+               expect[i] = ~buf[i];
+       err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
+                                 dma_handle, buf, expect,
+                                 ROCKER_TEST_DMA_BUF_SIZE);
+       if (err)
+               goto unmap;
+
+unmap:
+       pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
+                        PCI_DMA_BIDIRECTIONAL);
+free_alloc:
+       kfree(alloc);
+
+       return err;
+}
+
+static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait)
+{
+       int i;
+       int err;
+
+       for (i = 0; i < 8; i++) {
+               err = rocker_dma_test_offset(rocker, wait, i);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
+{
+       struct rocker_wait *wait = dev_id;
+
+       rocker_wait_wake_up(wait);
+
+       return IRQ_HANDLED;
+}
+
+static int rocker_basic_hw_test(struct rocker *rocker)
+{
+       struct pci_dev *pdev = rocker->pdev;
+       struct rocker_wait wait;
+       int err;
+
+       err = rocker_reg_test(rocker);
+       if (err) {
+               dev_err(&pdev->dev, "reg test failed\n");
+               return err;
+       }
+
+       err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
+                         rocker_test_irq_handler, 0,
+                         rocker_driver_name, &wait);
+       if (err) {
+               dev_err(&pdev->dev, "cannot assign test irq\n");
+               return err;
+       }
+
+       rocker_wait_init(&wait);
+       rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
+
+       if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
+               dev_err(&pdev->dev, "no interrupt received within a timeout\n");
+               err = -EIO;
+               goto free_irq;
+       }
+
+       err = rocker_dma_test(rocker, &wait);
+       if (err)
+               dev_err(&pdev->dev, "dma test failed\n");
+
+free_irq:
+       free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
+       return err;
+}
+
+/******
+ * TLV
+ ******/
+
+#define ROCKER_TLV_ALIGNTO 8U
+#define ROCKER_TLV_ALIGN(len) \
+       (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
+#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
+
+/*  <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
+ * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
+ * |             Header          | Pad |           Payload           | Pad |
+ * |      (struct rocker_tlv)    | ing |                             | ing |
+ * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
+ *  <--------------------------- tlv->len -------------------------->
+ */
+
+static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
+                                         int *remaining)
+{
+       int totlen = ROCKER_TLV_ALIGN(tlv->len);
+
+       *remaining -= totlen;
+       return (struct rocker_tlv *) ((char *) tlv + totlen);
+}
+
+static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
+{
+       return remaining >= (int) ROCKER_TLV_HDRLEN &&
+              tlv->len >= ROCKER_TLV_HDRLEN &&
+              tlv->len <= remaining;
+}
+
+#define rocker_tlv_for_each(pos, head, len, rem)       \
+       for (pos = head, rem = len;                     \
+            rocker_tlv_ok(pos, rem);                   \
+            pos = rocker_tlv_next(pos, &(rem)))
+
+#define rocker_tlv_for_each_nested(pos, tlv, rem)      \
+       rocker_tlv_for_each(pos, rocker_tlv_data(tlv),  \
+                           rocker_tlv_len(tlv), rem)
+
+static int rocker_tlv_attr_size(int payload)
+{
+       return ROCKER_TLV_HDRLEN + payload;
+}
+
+static int rocker_tlv_total_size(int payload)
+{
+       return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
+}
+
+static int rocker_tlv_padlen(int payload)
+{
+       return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
+}
+
+static int rocker_tlv_type(const struct rocker_tlv *tlv)
+{
+       return tlv->type;
+}
+
+static void *rocker_tlv_data(const struct rocker_tlv *tlv)
+{
+       return (char *) tlv + ROCKER_TLV_HDRLEN;
+}
+
+static int rocker_tlv_len(const struct rocker_tlv *tlv)
+{
+       return tlv->len - ROCKER_TLV_HDRLEN;
+}
+
+static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
+{
+       return *(u8 *) rocker_tlv_data(tlv);
+}
+
+static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
+{
+       return *(u16 *) rocker_tlv_data(tlv);
+}
+
+static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
+{
+       return *(u32 *) rocker_tlv_data(tlv);
+}
+
+static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
+{
+       return *(u64 *) rocker_tlv_data(tlv);
+}
+
+static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
+                            const char *buf, int buf_len)
+{
+       const struct rocker_tlv *tlv;
+       const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
+       int rem;
+
+       memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
+
+       rocker_tlv_for_each(tlv, head, buf_len, rem) {
+               u32 type = rocker_tlv_type(tlv);
+
+               if (type > 0 && type <= maxtype)
+                       tb[type] = (struct rocker_tlv *) tlv;
+       }
+}
+
+static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype,
+                                   const struct rocker_tlv *tlv)
+{
+       rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
+                        rocker_tlv_len(tlv));
+}
+
+static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype,
+                                 struct rocker_desc_info *desc_info)
+{
+       rocker_tlv_parse(tb, maxtype, desc_info->data,
+                        desc_info->desc->tlv_size);
+}
+
+static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
+{
+       return (struct rocker_tlv *) ((char *) desc_info->data +
+                                              desc_info->tlv_size);
+}
+
+static int rocker_tlv_put(struct rocker_desc_info *desc_info,
+                         int attrtype, int attrlen, const void *data)
+{
+       int tail_room = desc_info->data_size - desc_info->tlv_size;
+       int total_size = rocker_tlv_total_size(attrlen);
+       struct rocker_tlv *tlv;
+
+       if (unlikely(tail_room < total_size))
+               return -EMSGSIZE;
+
+       tlv = rocker_tlv_start(desc_info);
+       desc_info->tlv_size += total_size;
+       tlv->type = attrtype;
+       tlv->len = rocker_tlv_attr_size(attrlen);
+       memcpy(rocker_tlv_data(tlv), data, attrlen);
+       memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
+       return 0;
+}
+
+static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
+                            int attrtype, u8 value)
+{
+       return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
+}
+
+static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
+                             int attrtype, u16 value)
+{
+       return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
+}
+
+static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
+                             int attrtype, u32 value)
+{
+       return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
+}
+
+static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
+                             int attrtype, u64 value)
+{
+       return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
+}
+
+static struct rocker_tlv *
+rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
+{
+       struct rocker_tlv *start = rocker_tlv_start(desc_info);
+
+       if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
+               return NULL;
+
+       return start;
+}
+
+static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
+                               struct rocker_tlv *start)
+{
+       start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
+}
+
+static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
+                                  struct rocker_tlv *start)
+{
+       desc_info->tlv_size = (char *) start - desc_info->data;
+}
+
+/******************************************
+ * DMA rings and descriptors manipulations
+ ******************************************/
+
+static u32 __pos_inc(u32 pos, size_t limit)
+{
+       return ++pos == limit ? 0 : pos;
+}
+
+static int rocker_desc_err(struct rocker_desc_info *desc_info)
+{
+       return -(desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN);
+}
+
+static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
+{
+       desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
+}
+
+static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
+{
+       u32 comp_err = desc_info->desc->comp_err;
+
+       return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
+}
+
+static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
+{
+       return (void *) desc_info->desc->cookie;
+}
+
+static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
+                                      void *ptr)
+{
+       desc_info->desc->cookie = (long) ptr;
+}
+
+static struct rocker_desc_info *
+rocker_desc_head_get(struct rocker_dma_ring_info *info)
+{
+       static struct rocker_desc_info *desc_info;
+       u32 head = __pos_inc(info->head, info->size);
+
+       desc_info = &info->desc_info[info->head];
+       if (head == info->tail)
+               return NULL; /* ring full */
+       desc_info->tlv_size = 0;
+       return desc_info;
+}
+
+static void rocker_desc_commit(struct rocker_desc_info *desc_info)
+{
+       desc_info->desc->buf_size = desc_info->data_size;
+       desc_info->desc->tlv_size = desc_info->tlv_size;
+}
+
+static void rocker_desc_head_set(struct rocker *rocker,
+                                struct rocker_dma_ring_info *info,
+                                struct rocker_desc_info *desc_info)
+{
+       u32 head = __pos_inc(info->head, info->size);
+
+       BUG_ON(head == info->tail);
+       rocker_desc_commit(desc_info);
+       info->head = head;
+       rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
+}
+
+static struct rocker_desc_info *
+rocker_desc_tail_get(struct rocker_dma_ring_info *info)
+{
+       static struct rocker_desc_info *desc_info;
+
+       if (info->tail == info->head)
+               return NULL; /* nothing to be done between head and tail */
+       desc_info = &info->desc_info[info->tail];
+       if (!rocker_desc_gen(desc_info))
+               return NULL; /* gen bit not set, desc is not ready yet */
+       info->tail = __pos_inc(info->tail, info->size);
+       desc_info->tlv_size = desc_info->desc->tlv_size;
+       return desc_info;
+}
+
+static void rocker_dma_ring_credits_set(struct rocker *rocker,
+                                       struct rocker_dma_ring_info *info,
+                                       u32 credits)
+{
+       if (credits)
+               rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
+}
+
+static unsigned long rocker_dma_ring_size_fix(size_t size)
+{
+       return max(ROCKER_DMA_SIZE_MIN,
+                  min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
+}
+
+static int rocker_dma_ring_create(struct rocker *rocker,
+                                 unsigned int type,
+                                 size_t size,
+                                 struct rocker_dma_ring_info *info)
+{
+       int i;
+
+       BUG_ON(size != rocker_dma_ring_size_fix(size));
+       info->size = size;
+       info->type = type;
+       info->head = 0;
+       info->tail = 0;
+       info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
+                                 GFP_KERNEL);
+       if (!info->desc_info)
+               return -ENOMEM;
+
+       info->desc = pci_alloc_consistent(rocker->pdev,
+                                         info->size * sizeof(*info->desc),
+                                         &info->mapaddr);
+       if (!info->desc) {
+               kfree(info->desc_info);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < info->size; i++)
+               info->desc_info[i].desc = &info->desc[i];
+
+       rocker_write32(rocker, DMA_DESC_CTRL(info->type),
+                      ROCKER_DMA_DESC_CTRL_RESET);
+       rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
+       rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
+
+       return 0;
+}
+
+static void rocker_dma_ring_destroy(struct rocker *rocker,
+                                   struct rocker_dma_ring_info *info)
+{
+       rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
+
+       pci_free_consistent(rocker->pdev,
+                           info->size * sizeof(struct rocker_desc),
+                           info->desc, info->mapaddr);
+       kfree(info->desc_info);
+}
+
+static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
+                                            struct rocker_dma_ring_info *info)
+{
+       int i;
+
+       BUG_ON(info->head || info->tail);
+
+       /* When ring is consumer, we need to advance head for each desc.
+        * That tells hw that the desc is ready to be used by it.
+        */
+       for (i = 0; i < info->size - 1; i++)
+               rocker_desc_head_set(rocker, info, &info->desc_info[i]);
+       rocker_desc_commit(&info->desc_info[i]);
+}
+
+static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
+                                     struct rocker_dma_ring_info *info,
+                                     int direction, size_t buf_size)
+{
+       struct pci_dev *pdev = rocker->pdev;
+       int i;
+       int err;
+
+       for (i = 0; i < info->size; i++) {
+               struct rocker_desc_info *desc_info = &info->desc_info[i];
+               struct rocker_desc *desc = &info->desc[i];
+               dma_addr_t dma_handle;
+               char *buf;
+
+               buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
+               if (!buf) {
+                       err = -ENOMEM;
+                       goto rollback;
+               }
+
+               dma_handle = pci_map_single(pdev, buf, buf_size, direction);
+               if (pci_dma_mapping_error(pdev, dma_handle)) {
+                       kfree(buf);
+                       err = -EIO;
+                       goto rollback;
+               }
+
+               desc_info->data = buf;
+               desc_info->data_size = buf_size;
+               dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
+
+               desc->buf_addr = dma_handle;
+               desc->buf_size = buf_size;
+       }
+       return 0;
+
+rollback:
+       for (i--; i >= 0; i--) {
+               struct rocker_desc_info *desc_info = &info->desc_info[i];
+
+               pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
+                                desc_info->data_size, direction);
+               kfree(desc_info->data);
+       }
+       return err;
+}
+
+static void rocker_dma_ring_bufs_free(struct rocker *rocker,
+                                     struct rocker_dma_ring_info *info,
+                                     int direction)
+{
+       struct pci_dev *pdev = rocker->pdev;
+       int i;
+
+       for (i = 0; i < info->size; i++) {
+               struct rocker_desc_info *desc_info = &info->desc_info[i];
+               struct rocker_desc *desc = &info->desc[i];
+
+               desc->buf_addr = 0;
+               desc->buf_size = 0;
+               pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
+                                desc_info->data_size, direction);
+               kfree(desc_info->data);
+       }
+}
+
+static int rocker_dma_rings_init(struct rocker *rocker)
+{
+       struct pci_dev *pdev = rocker->pdev;
+       int err;
+
+       err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
+                                    ROCKER_DMA_CMD_DEFAULT_SIZE,
+                                    &rocker->cmd_ring);
+       if (err) {
+               dev_err(&pdev->dev, "failed to create command dma ring\n");
+               return err;
+       }
+
+       spin_lock_init(&rocker->cmd_ring_lock);
+
+       err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
+                                        PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
+       if (err) {
+               dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
+               goto err_dma_cmd_ring_bufs_alloc;
+       }
+
+       err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
+                                    ROCKER_DMA_EVENT_DEFAULT_SIZE,
+                                    &rocker->event_ring);
+       if (err) {
+               dev_err(&pdev->dev, "failed to create event dma ring\n");
+               goto err_dma_event_ring_create;
+       }
+
+       err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
+                                        PCI_DMA_FROMDEVICE, PAGE_SIZE);
+       if (err) {
+               dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
+               goto err_dma_event_ring_bufs_alloc;
+       }
+       rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
+       return 0;
+
+err_dma_event_ring_bufs_alloc:
+       rocker_dma_ring_destroy(rocker, &rocker->event_ring);
+err_dma_event_ring_create:
+       rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
+                                 PCI_DMA_BIDIRECTIONAL);
+err_dma_cmd_ring_bufs_alloc:
+       rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
+       return err;
+}
+
+static void rocker_dma_rings_fini(struct rocker *rocker)
+{
+       rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
+                                 PCI_DMA_BIDIRECTIONAL);
+       rocker_dma_ring_destroy(rocker, &rocker->event_ring);
+       rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
+                                 PCI_DMA_BIDIRECTIONAL);
+       rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
+}
+
+static int rocker_dma_rx_ring_skb_map(struct rocker *rocker,
+                                     struct rocker_port *rocker_port,
+                                     struct rocker_desc_info *desc_info,
+                                     struct sk_buff *skb, size_t buf_len)
+{
+       struct pci_dev *pdev = rocker->pdev;
+       dma_addr_t dma_handle;
+
+       dma_handle = pci_map_single(pdev, skb->data, buf_len,
+                                   PCI_DMA_FROMDEVICE);
+       if (pci_dma_mapping_error(pdev, dma_handle))
+               return -EIO;
+       if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
+               goto tlv_put_failure;
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
+               goto tlv_put_failure;
+       return 0;
+
+tlv_put_failure:
+       pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
+       desc_info->tlv_size = 0;
+       return -EMSGSIZE;
+}
+
+static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port)
+{
+       return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+}
+
+static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
+                                       struct rocker_port *rocker_port,
+                                       struct rocker_desc_info *desc_info)
+{
+       struct net_device *dev = rocker_port->dev;
+       struct sk_buff *skb;
+       size_t buf_len = rocker_port_rx_buf_len(rocker_port);
+       int err;
+
+       /* Ensure that hw will see tlv_size zero in case of an error.
+        * That tells hw to use another descriptor.
+        */
+       rocker_desc_cookie_ptr_set(desc_info, NULL);
+       desc_info->tlv_size = 0;
+
+       skb = netdev_alloc_skb_ip_align(dev, buf_len);
+       if (!skb)
+               return -ENOMEM;
+       err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info,
+                                        skb, buf_len);
+       if (err) {
+               dev_kfree_skb_any(skb);
+               return err;
+       }
+       rocker_desc_cookie_ptr_set(desc_info, skb);
+       return 0;
+}
+
+static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
+                                        struct rocker_tlv **attrs)
+{
+       struct pci_dev *pdev = rocker->pdev;
+       dma_addr_t dma_handle;
+       size_t len;
+
+       if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
+           !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
+               return;
+       dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
+       len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
+       pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
+}
+
+static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
+                                       struct rocker_desc_info *desc_info)
+{
+       struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+       struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
+
+       if (!skb)
+               return;
+       rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
+       rocker_dma_rx_ring_skb_unmap(rocker, attrs);
+       dev_kfree_skb_any(skb);
+}
+
+static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker,
+                                        struct rocker_port *rocker_port)
+{
+       struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+       int i;
+       int err;
+
+       for (i = 0; i < rx_ring->size; i++) {
+               err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port,
+                                                  &rx_ring->desc_info[i]);
+               if (err)
+                       goto rollback;
+       }
+       return 0;
+
+rollback:
+       for (i--; i >= 0; i--)
+               rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
+       return err;
+}
+
+static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker,
+                                        struct rocker_port *rocker_port)
+{
+       struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+       int i;
+
+       for (i = 0; i < rx_ring->size; i++)
+               rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
+}
+
+static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
+{
+       struct rocker *rocker = rocker_port->rocker;
+       int err;
+
+       err = rocker_dma_ring_create(rocker,
+                                    ROCKER_DMA_TX(rocker_port->port_number),
+                                    ROCKER_DMA_TX_DEFAULT_SIZE,
+                                    &rocker_port->tx_ring);
+       if (err) {
+               netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
+               return err;
+       }
+
+       err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
+                                        PCI_DMA_TODEVICE,
+                                        ROCKER_DMA_TX_DESC_SIZE);
+       if (err) {
+               netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
+               goto err_dma_tx_ring_bufs_alloc;
+       }
+
+       err = rocker_dma_ring_create(rocker,
+                                    ROCKER_DMA_RX(rocker_port->port_number),
+                                    ROCKER_DMA_RX_DEFAULT_SIZE,
+                                    &rocker_port->rx_ring);
+       if (err) {
+               netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
+               goto err_dma_rx_ring_create;
+       }
+
+       err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
+                                        PCI_DMA_BIDIRECTIONAL,
+                                        ROCKER_DMA_RX_DESC_SIZE);
+       if (err) {
+               netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
+               goto err_dma_rx_ring_bufs_alloc;
+       }
+
+       err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port);
+       if (err) {
+               netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
+               goto err_dma_rx_ring_skbs_alloc;
+       }
+       rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
+
+       return 0;
+
+err_dma_rx_ring_skbs_alloc:
+       rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
+                                 PCI_DMA_BIDIRECTIONAL);
+err_dma_rx_ring_bufs_alloc:
+       rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
+err_dma_rx_ring_create:
+       rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
+                                 PCI_DMA_TODEVICE);
+err_dma_tx_ring_bufs_alloc:
+       rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
+       return err;
+}
+
+static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
+{
+       struct rocker *rocker = rocker_port->rocker;
+
+       rocker_dma_rx_ring_skbs_free(rocker, rocker_port);
+       rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
+                                 PCI_DMA_BIDIRECTIONAL);
+       rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
+       rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
+                                 PCI_DMA_TODEVICE);
+       rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
+}
+
+static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
+{
+       u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
+
+       if (enable)
+               val |= 1 << rocker_port->lport;
+       else
+               val &= ~(1 << rocker_port->lport);
+       rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
+}
+
+/********************************
+ * Interrupt handler and helpers
+ ********************************/
+
+static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
+{
+       struct rocker *rocker = dev_id;
+       struct rocker_desc_info *desc_info;
+       struct rocker_wait *wait;
+       u32 credits = 0;
+
+       spin_lock(&rocker->cmd_ring_lock);
+       while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
+               wait = rocker_desc_cookie_ptr_get(desc_info);
+               if (wait->nowait) {
+                       rocker_desc_gen_clear(desc_info);
+                       rocker_wait_destroy(wait);
+               } else {
+                       rocker_wait_wake_up(wait);
+               }
+               credits++;
+       }
+       spin_unlock(&rocker->cmd_ring_lock);
+       rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
+
+       return IRQ_HANDLED;
+}
+
+static void rocker_port_link_up(struct rocker_port *rocker_port)
+{
+       netif_carrier_on(rocker_port->dev);
+       netdev_info(rocker_port->dev, "Link is up\n");
+}
+
+static void rocker_port_link_down(struct rocker_port *rocker_port)
+{
+       netif_carrier_off(rocker_port->dev);
+       netdev_info(rocker_port->dev, "Link is down\n");
+}
+
+static int rocker_event_link_change(struct rocker *rocker,
+                                   const struct rocker_tlv *info)
+{
+       struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
+       unsigned int port_number;
+       bool link_up;
+       struct rocker_port *rocker_port;
+
+       rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
+       if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT] ||
+           !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
+               return -EIO;
+       port_number =
+               rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT]) - 1;
+       link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
+
+       if (port_number >= rocker->port_count)
+               return -EINVAL;
+
+       rocker_port = rocker->ports[port_number];
+       if (netif_carrier_ok(rocker_port->dev) != link_up) {
+               if (link_up)
+                       rocker_port_link_up(rocker_port);
+               else
+                       rocker_port_link_down(rocker_port);
+       }
+
+       return 0;
+}
+
+#define ROCKER_OP_FLAG_REMOVE          BIT(0)
+#define ROCKER_OP_FLAG_NOWAIT          BIT(1)
+#define ROCKER_OP_FLAG_LEARNED         BIT(2)
+#define ROCKER_OP_FLAG_REFRESH         BIT(3)
+
+static int rocker_port_fdb(struct rocker_port *rocker_port,
+                          const unsigned char *addr,
+                          __be16 vlan_id, int flags);
+
+static int rocker_event_mac_vlan_seen(struct rocker *rocker,
+                                     const struct rocker_tlv *info)
+{
+       struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
+       unsigned int port_number;
+       struct rocker_port *rocker_port;
+       unsigned char *addr;
+       int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
+       __be16 vlan_id;
+
+       rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
+       if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT] ||
+           !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
+           !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
+               return -EIO;
+       port_number =
+               rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT]) - 1;
+       addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
+       vlan_id = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
+
+       if (port_number >= rocker->port_count)
+               return -EINVAL;
+
+       rocker_port = rocker->ports[port_number];
+
+       if (rocker_port->stp_state != BR_STATE_LEARNING &&
+           rocker_port->stp_state != BR_STATE_FORWARDING)
+               return 0;
+
+       return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+}
+
+static int rocker_event_process(struct rocker *rocker,
+                               struct rocker_desc_info *desc_info)
+{
+       struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
+       struct rocker_tlv *info;
+       u16 type;
+
+       rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
+       if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
+           !attrs[ROCKER_TLV_EVENT_INFO])
+               return -EIO;
+
+       type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
+       info = attrs[ROCKER_TLV_EVENT_INFO];
+
+       switch (type) {
+       case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
+               return rocker_event_link_change(rocker, info);
+       case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
+               return rocker_event_mac_vlan_seen(rocker, info);
+       }
+
+       return -EOPNOTSUPP;
+}
+
+static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
+{
+       struct rocker *rocker = dev_id;
+       struct pci_dev *pdev = rocker->pdev;
+       struct rocker_desc_info *desc_info;
+       u32 credits = 0;
+       int err;
+
+       while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
+               err = rocker_desc_err(desc_info);
+               if (err) {
+                       dev_err(&pdev->dev, "event desc received with err %d\n",
+                               err);
+               } else {
+                       err = rocker_event_process(rocker, desc_info);
+                       if (err)
+                               dev_err(&pdev->dev, "event processing failed with err %d\n",
+                                       err);
+               }
+               rocker_desc_gen_clear(desc_info);
+               rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
+               credits++;
+       }
+       rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
+{
+       struct rocker_port *rocker_port = dev_id;
+
+       napi_schedule(&rocker_port->napi_tx);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
+{
+       struct rocker_port *rocker_port = dev_id;
+
+       napi_schedule(&rocker_port->napi_rx);
+       return IRQ_HANDLED;
+}
+
+/********************
+ * Command interface
+ ********************/
+
+typedef int (*rocker_cmd_cb_t)(struct rocker *rocker,
+                              struct rocker_port *rocker_port,
+                              struct rocker_desc_info *desc_info,
+                              void *priv);
+
+static int rocker_cmd_exec(struct rocker *rocker,
+                          struct rocker_port *rocker_port,
+                          rocker_cmd_cb_t prepare, void *prepare_priv,
+                          rocker_cmd_cb_t process, void *process_priv,
+                          bool nowait)
+{
+       struct rocker_desc_info *desc_info;
+       struct rocker_wait *wait;
+       unsigned long flags;
+       int err;
+
+       wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL);
+       if (!wait)
+               return -ENOMEM;
+       wait->nowait = nowait;
+
+       spin_lock_irqsave(&rocker->cmd_ring_lock, flags);
+       desc_info = rocker_desc_head_get(&rocker->cmd_ring);
+       if (!desc_info) {
+               spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+               err = -EAGAIN;
+               goto out;
+       }
+       err = prepare(rocker, rocker_port, desc_info, prepare_priv);
+       if (err) {
+               spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+               goto out;
+       }
+       rocker_desc_cookie_ptr_set(desc_info, wait);
+       rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
+       spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+
+       if (nowait)
+               return 0;
+
+       if (!rocker_wait_event_timeout(wait, HZ / 10))
+               return -EIO;
+
+       err = rocker_desc_err(desc_info);
+       if (err)
+               return err;
+
+       if (process)
+               err = process(rocker, rocker_port, desc_info, process_priv);
+
+       rocker_desc_gen_clear(desc_info);
+out:
+       rocker_wait_destroy(wait);
+       return err;
+}
+
+static int
+rocker_cmd_get_port_settings_prep(struct rocker *rocker,
+                                 struct rocker_port *rocker_port,
+                                 struct rocker_desc_info *desc_info,
+                                 void *priv)
+{
+       struct rocker_tlv *cmd_info;
+
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+                              ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
+               return -EMSGSIZE;
+       cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+       if (!cmd_info)
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
+                              rocker_port->lport))
+               return -EMSGSIZE;
+       rocker_tlv_nest_end(desc_info, cmd_info);
+       return 0;
+}
+
+static int
+rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
+                                         struct rocker_port *rocker_port,
+                                         struct rocker_desc_info *desc_info,
+                                         void *priv)
+{
+       struct ethtool_cmd *ecmd = priv;
+       struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+       struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+       u32 speed;
+       u8 duplex;
+       u8 autoneg;
+
+       rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
+       if (!attrs[ROCKER_TLV_CMD_INFO])
+               return -EIO;
+
+       rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
+                               attrs[ROCKER_TLV_CMD_INFO]);
+       if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
+           !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
+           !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
+               return -EIO;
+
+       speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
+       duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
+       autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
+
+       ecmd->transceiver = XCVR_INTERNAL;
+       ecmd->supported = SUPPORTED_TP;
+       ecmd->phy_address = 0xff;
+       ecmd->port = PORT_TP;
+       ethtool_cmd_speed_set(ecmd, speed);
+       ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
+       ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+       return 0;
+}
+
+static int
+rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
+                                         struct rocker_port *rocker_port,
+                                         struct rocker_desc_info *desc_info,
+                                         void *priv)
+{
+       unsigned char *macaddr = priv;
+       struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+       struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+       struct rocker_tlv *attr;
+
+       rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
+       if (!attrs[ROCKER_TLV_CMD_INFO])
+               return -EIO;
+
+       rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
+                               attrs[ROCKER_TLV_CMD_INFO]);
+       attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
+       if (!attr)
+               return -EIO;
+
+       if (rocker_tlv_len(attr) != ETH_ALEN)
+               return -EINVAL;
+
+       ether_addr_copy(macaddr, rocker_tlv_data(attr));
+       return 0;
+}
+
+static int
+rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
+                                         struct rocker_port *rocker_port,
+                                         struct rocker_desc_info *desc_info,
+                                         void *priv)
+{
+       struct ethtool_cmd *ecmd = priv;
+       struct rocker_tlv *cmd_info;
+
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+                              ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+               return -EMSGSIZE;
+       cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+       if (!cmd_info)
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
+                              rocker_port->lport))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
+                              ethtool_cmd_speed(ecmd)))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
+                             ecmd->duplex))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
+                             ecmd->autoneg))
+               return -EMSGSIZE;
+       rocker_tlv_nest_end(desc_info, cmd_info);
+       return 0;
+}
+
+static int
+rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
+                                         struct rocker_port *rocker_port,
+                                         struct rocker_desc_info *desc_info,
+                                         void *priv)
+{
+       unsigned char *macaddr = priv;
+       struct rocker_tlv *cmd_info;
+
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+                              ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+               return -EMSGSIZE;
+       cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+       if (!cmd_info)
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
+                              rocker_port->lport))
+               return -EMSGSIZE;
+       if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
+                          ETH_ALEN, macaddr))
+               return -EMSGSIZE;
+       rocker_tlv_nest_end(desc_info, cmd_info);
+       return 0;
+}
+
+static int
+rocker_cmd_set_port_learning_prep(struct rocker *rocker,
+                                 struct rocker_port *rocker_port,
+                                 struct rocker_desc_info *desc_info,
+                                 void *priv)
+{
+       struct rocker_tlv *cmd_info;
+
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+                              ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+               return -EMSGSIZE;
+       cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+       if (!cmd_info)
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
+                              rocker_port->lport))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
+                             !!(rocker_port->brport_flags & BR_LEARNING)))
+               return -EMSGSIZE;
+       rocker_tlv_nest_end(desc_info, cmd_info);
+       return 0;
+}
+
+static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
+                                               struct ethtool_cmd *ecmd)
+{
+       return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+                              rocker_cmd_get_port_settings_prep, NULL,
+                              rocker_cmd_get_port_settings_ethtool_proc,
+                              ecmd, false);
+}
+
+static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
+                                               unsigned char *macaddr)
+{
+       return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+                              rocker_cmd_get_port_settings_prep, NULL,
+                              rocker_cmd_get_port_settings_macaddr_proc,
+                              macaddr, false);
+}
+
+static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
+                                               struct ethtool_cmd *ecmd)
+{
+       return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+                              rocker_cmd_set_port_settings_ethtool_prep,
+                              ecmd, NULL, NULL, false);
+}
+
+static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
+                                               unsigned char *macaddr)
+{
+       return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+                              rocker_cmd_set_port_settings_macaddr_prep,
+                              macaddr, NULL, NULL, false);
+}
+
+static int rocker_port_set_learning(struct rocker_port *rocker_port)
+{
+       return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+                              rocker_cmd_set_port_learning_prep,
+                              NULL, NULL, NULL, false);
+}
+
+static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
+                                          struct rocker_flow_tbl_entry *entry)
+{
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
+                              entry->key.ig_port.in_lport))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
+                              entry->key.ig_port.in_lport_mask))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+                              entry->key.ig_port.goto_tbl))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
+static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
+                                       struct rocker_flow_tbl_entry *entry)
+{
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
+                              entry->key.vlan.in_lport))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+                              entry->key.vlan.vlan_id))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
+                              entry->key.vlan.vlan_id_mask))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+                              entry->key.vlan.goto_tbl))
+               return -EMSGSIZE;
+       if (entry->key.vlan.untagged &&
+           rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
+                              entry->key.vlan.new_vlan_id))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
+static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
+                                           struct rocker_flow_tbl_entry *entry)
+{
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
+                              entry->key.term_mac.in_lport))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
+                              entry->key.term_mac.in_lport_mask))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
+                              entry->key.term_mac.eth_type))
+               return -EMSGSIZE;
+       if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+                          ETH_ALEN, entry->key.term_mac.eth_dst))
+               return -EMSGSIZE;
+       if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
+                          ETH_ALEN, entry->key.term_mac.eth_dst_mask))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+                              entry->key.term_mac.vlan_id))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
+                              entry->key.term_mac.vlan_id_mask))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+                              entry->key.term_mac.goto_tbl))
+               return -EMSGSIZE;
+       if (entry->key.term_mac.copy_to_cpu &&
+           rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
+                             entry->key.term_mac.copy_to_cpu))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
+static int
+rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
+                                     struct rocker_flow_tbl_entry *entry)
+{
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
+                              entry->key.ucast_routing.eth_type))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
+                              entry->key.ucast_routing.dst4))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
+                              entry->key.ucast_routing.dst4_mask))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+                              entry->key.ucast_routing.goto_tbl))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+                              entry->key.ucast_routing.group_id))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
+static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
+                                         struct rocker_flow_tbl_entry *entry)
+{
+       if (entry->key.bridge.has_eth_dst &&
+           rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+                          ETH_ALEN, entry->key.bridge.eth_dst))
+               return -EMSGSIZE;
+       if (entry->key.bridge.has_eth_dst_mask &&
+           rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
+                          ETH_ALEN, entry->key.bridge.eth_dst_mask))
+               return -EMSGSIZE;
+       if (entry->key.bridge.vlan_id &&
+           rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+                              entry->key.bridge.vlan_id))
+               return -EMSGSIZE;
+       if (entry->key.bridge.tunnel_id &&
+           rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
+                              entry->key.bridge.tunnel_id))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+                              entry->key.bridge.goto_tbl))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+                              entry->key.bridge.group_id))
+               return -EMSGSIZE;
+       if (entry->key.bridge.copy_to_cpu &&
+           rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
+                             entry->key.bridge.copy_to_cpu))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
+static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
+                                      struct rocker_flow_tbl_entry *entry)
+{
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
+                              entry->key.acl.in_lport))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
+                              entry->key.acl.in_lport_mask))
+               return -EMSGSIZE;
+       if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
+                          ETH_ALEN, entry->key.acl.eth_src))
+               return -EMSGSIZE;
+       if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
+                          ETH_ALEN, entry->key.acl.eth_src_mask))
+               return -EMSGSIZE;
+       if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+                          ETH_ALEN, entry->key.acl.eth_dst))
+               return -EMSGSIZE;
+       if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
+                          ETH_ALEN, entry->key.acl.eth_dst_mask))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
+                              entry->key.acl.eth_type))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+                              entry->key.acl.vlan_id))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
+                              entry->key.acl.vlan_id_mask))
+               return -EMSGSIZE;
+
+       switch (ntohs(entry->key.acl.eth_type)) {
+       case ETH_P_IP:
+       case ETH_P_IPV6:
+               if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
+                                     entry->key.acl.ip_proto))
+                       return -EMSGSIZE;
+               if (rocker_tlv_put_u8(desc_info,
+                                     ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
+                                     entry->key.acl.ip_proto_mask))
+                       return -EMSGSIZE;
+               if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
+                                     entry->key.acl.ip_tos & 0x3f))
+                       return -EMSGSIZE;
+               if (rocker_tlv_put_u8(desc_info,
+                                     ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
+                                     entry->key.acl.ip_tos_mask & 0x3f))
+                       return -EMSGSIZE;
+               if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
+                                     (entry->key.acl.ip_tos & 0xc0) >> 6))
+                       return -EMSGSIZE;
+               if (rocker_tlv_put_u8(desc_info,
+                                     ROCKER_TLV_OF_DPA_IP_ECN_MASK,
+                                     (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
+                       return -EMSGSIZE;
+               break;
+       }
+
+       if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
+           rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+                              entry->key.acl.group_id))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
+static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
+                                  struct rocker_port *rocker_port,
+                                  struct rocker_desc_info *desc_info,
+                                  void *priv)
+{
+       struct rocker_flow_tbl_entry *entry = priv;
+       struct rocker_tlv *cmd_info;
+       int err = 0;
+
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+                              ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD))
+               return -EMSGSIZE;
+       cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+       if (!cmd_info)
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
+                              entry->key.tbl_id))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
+                              entry->key.priority))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
+                              entry->cookie))
+               return -EMSGSIZE;
+
+       switch (entry->key.tbl_id) {
+       case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
+               err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
+               break;
+       case ROCKER_OF_DPA_TABLE_ID_VLAN:
+               err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
+               break;
+       case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
+               err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
+               break;
+       case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
+               err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
+               break;
+       case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
+               err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
+               break;
+       case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
+               err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
+               break;
+       default:
+               err = -ENOTSUPP;
+               break;
+       }
+
+       if (err)
+               return err;
+
+       rocker_tlv_nest_end(desc_info, cmd_info);
+
+       return 0;
+}
+
+static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
+                                  struct rocker_port *rocker_port,
+                                  struct rocker_desc_info *desc_info,
+                                  void *priv)
+{
+       const struct rocker_flow_tbl_entry *entry = priv;
+       struct rocker_tlv *cmd_info;
+
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+                              ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL))
+               return -EMSGSIZE;
+       cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+       if (!cmd_info)
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
+                              entry->cookie))
+               return -EMSGSIZE;
+       rocker_tlv_nest_end(desc_info, cmd_info);
+
+       return 0;
+}
+
+static int
+rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
+                                     struct rocker_group_tbl_entry *entry)
+{
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_LPORT,
+                              ROCKER_GROUP_PORT_GET(entry->group_id)))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
+                             entry->l2_interface.pop_vlan))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
+static int
+rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
+                                   struct rocker_group_tbl_entry *entry)
+{
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
+                              entry->l2_rewrite.group_id))
+               return -EMSGSIZE;
+       if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
+           rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
+                          ETH_ALEN, entry->l2_rewrite.eth_src))
+               return -EMSGSIZE;
+       if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
+           rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+                          ETH_ALEN, entry->l2_rewrite.eth_dst))
+               return -EMSGSIZE;
+       if (entry->l2_rewrite.vlan_id &&
+           rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+                              entry->l2_rewrite.vlan_id))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
+static int
+rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
+                                  struct rocker_group_tbl_entry *entry)
+{
+       int i;
+       struct rocker_tlv *group_ids;
+
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
+                              entry->group_count))
+               return -EMSGSIZE;
+
+       group_ids = rocker_tlv_nest_start(desc_info,
+                                         ROCKER_TLV_OF_DPA_GROUP_IDS);
+       if (!group_ids)
+               return -EMSGSIZE;
+
+       for (i = 0; i < entry->group_count; i++)
+               /* Note TLV array is 1-based */
+               if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
+                       return -EMSGSIZE;
+
+       rocker_tlv_nest_end(desc_info, group_ids);
+
+       return 0;
+}
+
+static int
+rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
+                                   struct rocker_group_tbl_entry *entry)
+{
+       if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
+           rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
+                          ETH_ALEN, entry->l3_unicast.eth_src))
+               return -EMSGSIZE;
+       if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
+           rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+                          ETH_ALEN, entry->l3_unicast.eth_dst))
+               return -EMSGSIZE;
+       if (entry->l3_unicast.vlan_id &&
+           rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+                              entry->l3_unicast.vlan_id))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
+                             entry->l3_unicast.ttl_check))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
+                              entry->l3_unicast.group_id))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
+static int rocker_cmd_group_tbl_add(struct rocker *rocker,
+                                   struct rocker_port *rocker_port,
+                                   struct rocker_desc_info *desc_info,
+                                   void *priv)
+{
+       struct rocker_group_tbl_entry *entry = priv;
+       struct rocker_tlv *cmd_info;
+       int err = 0;
+
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
+               return -EMSGSIZE;
+       cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+       if (!cmd_info)
+               return -EMSGSIZE;
+
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+                              entry->group_id))
+               return -EMSGSIZE;
+
+       switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
+       case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
+               err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
+               break;
+       case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
+               err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
+               break;
+       case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
+       case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
+               err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
+               break;
+       case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
+               err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
+               break;
+       default:
+               err = -ENOTSUPP;
+               break;
+       }
+
+       if (err)
+               return err;
+
+       rocker_tlv_nest_end(desc_info, cmd_info);
+
+       return 0;
+}
+
+static int rocker_cmd_group_tbl_del(struct rocker *rocker,
+                                   struct rocker_port *rocker_port,
+                                   struct rocker_desc_info *desc_info,
+                                   void *priv)
+{
+       const struct rocker_group_tbl_entry *entry = priv;
+       struct rocker_tlv *cmd_info;
+
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
+               return -EMSGSIZE;
+       cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+       if (!cmd_info)
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+                              entry->group_id))
+               return -EMSGSIZE;
+       rocker_tlv_nest_end(desc_info, cmd_info);
+
+       return 0;
+}
+
+/*****************************************
+ * Flow, group, FDB, internal VLAN tables
+ *****************************************/
+
+static int rocker_init_tbls(struct rocker *rocker)
+{
+       hash_init(rocker->flow_tbl);
+       spin_lock_init(&rocker->flow_tbl_lock);
+
+       hash_init(rocker->group_tbl);
+       spin_lock_init(&rocker->group_tbl_lock);
+
+       hash_init(rocker->fdb_tbl);
+       spin_lock_init(&rocker->fdb_tbl_lock);
+
+       hash_init(rocker->internal_vlan_tbl);
+       spin_lock_init(&rocker->internal_vlan_tbl_lock);
+
+       return 0;
+}
+
+static void rocker_free_tbls(struct rocker *rocker)
+{
+       unsigned long flags;
+       struct rocker_flow_tbl_entry *flow_entry;
+       struct rocker_group_tbl_entry *group_entry;
+       struct rocker_fdb_tbl_entry *fdb_entry;
+       struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
+       struct hlist_node *tmp;
+       int bkt;
+
+       spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
+       hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
+               hash_del(&flow_entry->entry);
+       spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
+
+       spin_lock_irqsave(&rocker->group_tbl_lock, flags);
+       hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
+               hash_del(&group_entry->entry);
+       spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
+
+       spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
+       hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
+               hash_del(&fdb_entry->entry);
+       spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
+
+       spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
+       hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
+                          tmp, internal_vlan_entry, entry)
+               hash_del(&internal_vlan_entry->entry);
+       spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
+}
+
+static struct rocker_flow_tbl_entry *
+rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
+{
+       struct rocker_flow_tbl_entry *found;
+
+       hash_for_each_possible(rocker->flow_tbl, found,
+                              entry, match->key_crc32) {
+               if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
+                       return found;
+       }
+
+       return NULL;
+}
+
+static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
+                              struct rocker_flow_tbl_entry *match,
+                              bool nowait)
+{
+       struct rocker *rocker = rocker_port->rocker;
+       struct rocker_flow_tbl_entry *found;
+       unsigned long flags;
+       bool add_to_hw = false;
+       int err = 0;
+
+       match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
+
+       spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
+
+       found = rocker_flow_tbl_find(rocker, match);
+
+       if (found) {
+               kfree(match);
+       } else {
+               found = match;
+               found->cookie = rocker->flow_tbl_next_cookie++;
+               hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
+               add_to_hw = true;
+       }
+
+       found->ref_count++;
+
+       spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
+
+       if (add_to_hw) {
+               err = rocker_cmd_exec(rocker, rocker_port,
+                                     rocker_cmd_flow_tbl_add,
+                                     found, NULL, NULL, nowait);
+               if (err) {
+                       spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
+                       hash_del(&found->entry);
+                       spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
+                       kfree(found);
+               }
+       }
+
+       return err;
+}
+
+static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
+                              struct rocker_flow_tbl_entry *match,
+                              bool nowait)
+{
+       struct rocker *rocker = rocker_port->rocker;
+       struct rocker_flow_tbl_entry *found;
+       unsigned long flags;
+       bool del_from_hw = false;
+       int err = 0;
+
+       match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
+
+       spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
+
+       found = rocker_flow_tbl_find(rocker, match);
+
+       if (found) {
+               found->ref_count--;
+               if (found->ref_count == 0) {
+                       hash_del(&found->entry);
+                       del_from_hw = true;
+               }
+       }
+
+       spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
+
+       kfree(match);
+
+       if (del_from_hw) {
+               err = rocker_cmd_exec(rocker, rocker_port,
+                                     rocker_cmd_flow_tbl_del,
+                                     found, NULL, NULL, nowait);
+               kfree(found);
+       }
+
+       return err;
+}
+
+static gfp_t rocker_op_flags_gfp(int flags)
+{
+       return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL;
+}
+
+static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
+                             int flags, struct rocker_flow_tbl_entry *entry)
+{
+       bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
+
+       if (flags & ROCKER_OP_FLAG_REMOVE)
+               return rocker_flow_tbl_del(rocker_port, entry, nowait);
+       else
+               return rocker_flow_tbl_add(rocker_port, entry, nowait);
+}
+
+static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
+                                  int flags, u32 in_lport, u32 in_lport_mask,
+                                  enum rocker_of_dpa_table_id goto_tbl)
+{
+       struct rocker_flow_tbl_entry *entry;
+
+       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       if (!entry)
+               return -ENOMEM;
+
+       entry->key.priority = ROCKER_PRIORITY_IG_PORT;
+       entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
+       entry->key.ig_port.in_lport = in_lport;
+       entry->key.ig_port.in_lport_mask = in_lport_mask;
+       entry->key.ig_port.goto_tbl = goto_tbl;
+
+       return rocker_flow_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
+                               int flags, u32 in_lport,
+                               __be16 vlan_id, __be16 vlan_id_mask,
+                               enum rocker_of_dpa_table_id goto_tbl,
+                               bool untagged, __be16 new_vlan_id)
+{
+       struct rocker_flow_tbl_entry *entry;
+
+       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       if (!entry)
+               return -ENOMEM;
+
+       entry->key.priority = ROCKER_PRIORITY_VLAN;
+       entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
+       entry->key.vlan.in_lport = in_lport;
+       entry->key.vlan.vlan_id = vlan_id;
+       entry->key.vlan.vlan_id_mask = vlan_id_mask;
+       entry->key.vlan.goto_tbl = goto_tbl;
+
+       entry->key.vlan.untagged = untagged;
+       entry->key.vlan.new_vlan_id = new_vlan_id;
+
+       return rocker_flow_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
+                                   u32 in_lport, u32 in_lport_mask,
+                                   __be16 eth_type, const u8 *eth_dst,
+                                   const u8 *eth_dst_mask, __be16 vlan_id,
+                                   __be16 vlan_id_mask, bool copy_to_cpu,
+                                   int flags)
+{
+       struct rocker_flow_tbl_entry *entry;
+
+       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       if (!entry)
+               return -ENOMEM;
+
+       if (is_multicast_ether_addr(eth_dst)) {
+               entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
+               entry->key.term_mac.goto_tbl =
+                        ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
+       } else {
+               entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
+               entry->key.term_mac.goto_tbl =
+                        ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
+       }
+
+       entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
+       entry->key.term_mac.in_lport = in_lport;
+       entry->key.term_mac.in_lport_mask = in_lport_mask;
+       entry->key.term_mac.eth_type = eth_type;
+       ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
+       ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
+       entry->key.term_mac.vlan_id = vlan_id;
+       entry->key.term_mac.vlan_id_mask = vlan_id_mask;
+       entry->key.term_mac.copy_to_cpu = copy_to_cpu;
+
+       return rocker_flow_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
+                                 int flags,
+                                 const u8 *eth_dst, const u8 *eth_dst_mask,
+                                 __be16 vlan_id, u32 tunnel_id,
+                                 enum rocker_of_dpa_table_id goto_tbl,
+                                 u32 group_id, bool copy_to_cpu)
+{
+       struct rocker_flow_tbl_entry *entry;
+       u32 priority;
+       bool vlan_bridging = !!vlan_id;
+       bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
+       bool wild = false;
+
+       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       if (!entry)
+               return -ENOMEM;
+
+       entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
+
+       if (eth_dst) {
+               entry->key.bridge.has_eth_dst = 1;
+               ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
+       }
+       if (eth_dst_mask) {
+               entry->key.bridge.has_eth_dst_mask = 1;
+               ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
+               if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN))
+                       wild = true;
+       }
+
+       priority = ROCKER_PRIORITY_UNKNOWN;
+       if (vlan_bridging && dflt && wild)
+               priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
+       else if (vlan_bridging && dflt && !wild)
+               priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
+       else if (vlan_bridging && !dflt)
+               priority = ROCKER_PRIORITY_BRIDGING_VLAN;
+       else if (!vlan_bridging && dflt && wild)
+               priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
+       else if (!vlan_bridging && dflt && !wild)
+               priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
+       else if (!vlan_bridging && !dflt)
+               priority = ROCKER_PRIORITY_BRIDGING_TENANT;
+
+       entry->key.priority = priority;
+       entry->key.bridge.vlan_id = vlan_id;
+       entry->key.bridge.tunnel_id = tunnel_id;
+       entry->key.bridge.goto_tbl = goto_tbl;
+       entry->key.bridge.group_id = group_id;
+       entry->key.bridge.copy_to_cpu = copy_to_cpu;
+
+       return rocker_flow_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
+                              int flags, u32 in_lport,
+                              u32 in_lport_mask,
+                              const u8 *eth_src, const u8 *eth_src_mask,
+                              const u8 *eth_dst, const u8 *eth_dst_mask,
+                              __be16 eth_type,
+                              __be16 vlan_id, __be16 vlan_id_mask,
+                              u8 ip_proto, u8 ip_proto_mask,
+                              u8 ip_tos, u8 ip_tos_mask,
+                              u32 group_id)
+{
+       u32 priority;
+       struct rocker_flow_tbl_entry *entry;
+
+       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       if (!entry)
+               return -ENOMEM;
+
+       priority = ROCKER_PRIORITY_ACL_NORMAL;
+       if (eth_dst && eth_dst_mask) {
+               if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0)
+                       priority = ROCKER_PRIORITY_ACL_DFLT;
+               else if (is_link_local_ether_addr(eth_dst))
+                       priority = ROCKER_PRIORITY_ACL_CTRL;
+       }
+
+       entry->key.priority = priority;
+       entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+       entry->key.acl.in_lport = in_lport;
+       entry->key.acl.in_lport_mask = in_lport_mask;
+
+       if (eth_src)
+               ether_addr_copy(entry->key.acl.eth_src, eth_src);
+       if (eth_src_mask)
+               ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
+       if (eth_dst)
+               ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
+       if (eth_dst_mask)
+               ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
+
+       entry->key.acl.eth_type = eth_type;
+       entry->key.acl.vlan_id = vlan_id;
+       entry->key.acl.vlan_id_mask = vlan_id_mask;
+       entry->key.acl.ip_proto = ip_proto;
+       entry->key.acl.ip_proto_mask = ip_proto_mask;
+       entry->key.acl.ip_tos = ip_tos;
+       entry->key.acl.ip_tos_mask = ip_tos_mask;
+       entry->key.acl.group_id = group_id;
+
+       return rocker_flow_tbl_do(rocker_port, flags, entry);
+}
+
+static struct rocker_group_tbl_entry *
+rocker_group_tbl_find(struct rocker *rocker,
+                     struct rocker_group_tbl_entry *match)
+{
+       struct rocker_group_tbl_entry *found;
+
+       hash_for_each_possible(rocker->group_tbl, found,
+                              entry, match->group_id) {
+               if (found->group_id == match->group_id)
+                       return found;
+       }
+
+       return NULL;
+}
+
+static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry)
+{
+       switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
+       case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
+       case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
+               kfree(entry->group_ids);
+               break;
+       default:
+               break;
+       }
+       kfree(entry);
+}
+
+static int rocker_group_tbl_add(struct rocker_port *rocker_port,
+                               struct rocker_group_tbl_entry *match,
+                               bool nowait)
+{
+       struct rocker *rocker = rocker_port->rocker;
+       struct rocker_group_tbl_entry *found;
+       unsigned long flags;
+       int err = 0;
+
+       spin_lock_irqsave(&rocker->group_tbl_lock, flags);
+
+       found = rocker_group_tbl_find(rocker, match);
+
+       if (found) {
+               hash_del(&found->entry);
+               rocker_group_tbl_entry_free(found);
+               found = match;
+               found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
+       } else {
+               found = match;
+               found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
+       }
+
+       hash_add(rocker->group_tbl, &found->entry, found->group_id);
+
+       spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
+
+       if (found->cmd)
+               err = rocker_cmd_exec(rocker, rocker_port,
+                                     rocker_cmd_group_tbl_add,
+                                     found, NULL, NULL, nowait);
+
+       return err;
+}
+
+static int rocker_group_tbl_del(struct rocker_port *rocker_port,
+                               struct rocker_group_tbl_entry *match,
+                               bool nowait)
+{
+       struct rocker *rocker = rocker_port->rocker;
+       struct rocker_group_tbl_entry *found;
+       unsigned long flags;
+       int err = 0;
+
+       spin_lock_irqsave(&rocker->group_tbl_lock, flags);
+
+       found = rocker_group_tbl_find(rocker, match);
+
+       if (found) {
+               hash_del(&found->entry);
+               found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
+       }
+
+       spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
+
+       rocker_group_tbl_entry_free(match);
+
+       if (found) {
+               err = rocker_cmd_exec(rocker, rocker_port,
+                                     rocker_cmd_group_tbl_del,
+                                     found, NULL, NULL, nowait);
+               rocker_group_tbl_entry_free(found);
+       }
+
+       return err;
+}
+
+static int rocker_group_tbl_do(struct rocker_port *rocker_port,
+                              int flags, struct rocker_group_tbl_entry *entry)
+{
+       bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
+
+       if (flags & ROCKER_OP_FLAG_REMOVE)
+               return rocker_group_tbl_del(rocker_port, entry, nowait);
+       else
+               return rocker_group_tbl_add(rocker_port, entry, nowait);
+}
+
+static int rocker_group_l2_interface(struct rocker_port *rocker_port,
+                                    int flags, __be16 vlan_id,
+                                    u32 out_lport, int pop_vlan)
+{
+       struct rocker_group_tbl_entry *entry;
+
+       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       if (!entry)
+               return -ENOMEM;
+
+       entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
+       entry->l2_interface.pop_vlan = pop_vlan;
+
+       return rocker_group_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
+                                  int flags, u8 group_count,
+                                  u32 *group_ids, u32 group_id)
+{
+       struct rocker_group_tbl_entry *entry;
+
+       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       if (!entry)
+               return -ENOMEM;
+
+       entry->group_id = group_id;
+       entry->group_count = group_count;
+
+       entry->group_ids = kcalloc(group_count, sizeof(u32),
+                                  rocker_op_flags_gfp(flags));
+       if (!entry->group_ids) {
+               kfree(entry);
+               return -ENOMEM;
+       }
+       memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
+
+       return rocker_group_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_group_l2_flood(struct rocker_port *rocker_port,
+                                int flags, __be16 vlan_id,
+                                u8 group_count, u32 *group_ids,
+                                u32 group_id)
+{
+       return rocker_group_l2_fan_out(rocker_port, flags,
+                                      group_count, group_ids,
+                                      group_id);
+}
+
+static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
+                                       int flags, __be16 vlan_id)
+{
+       struct rocker_port *p;
+       struct rocker *rocker = rocker_port->rocker;
+       u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
+       u32 group_ids[rocker->port_count];
+       u8 group_count = 0;
+       int err;
+       int i;
+
+       /* Adjust the flood group for this VLAN.  The flood group
+        * references an L2 interface group for each port in this
+        * VLAN.
+        */
+
+       for (i = 0; i < rocker->port_count; i++) {
+               p = rocker->ports[i];
+               if (!rocker_port_is_bridged(p))
+                       continue;
+               if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
+                       group_ids[group_count++] =
+                               ROCKER_GROUP_L2_INTERFACE(vlan_id,
+                                                         p->lport);
+               }
+       }
+
+       /* If there are no bridged ports in this VLAN, we're done */
+       if (group_count == 0)
+               return 0;
+
+       err = rocker_group_l2_flood(rocker_port, flags, vlan_id,
+                                   group_count, group_ids,
+                                   group_id);
+       if (err)
+               netdev_err(rocker_port->dev,
+                          "Error (%d) port VLAN l2 flood group\n", err);
+
+       return err;
+}
+
+static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
+                                     int flags, __be16 vlan_id,
+                                     bool pop_vlan)
+{
+       struct rocker *rocker = rocker_port->rocker;
+       struct rocker_port *p;
+       bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
+       u32 out_lport;
+       int ref = 0;
+       int err;
+       int i;
+
+       /* An L2 interface group for this port in this VLAN, but
+        * only when port STP state is LEARNING|FORWARDING.
+        */
+
+       if (rocker_port->stp_state == BR_STATE_LEARNING ||
+           rocker_port->stp_state == BR_STATE_FORWARDING) {
+               out_lport = rocker_port->lport;
+               err = rocker_group_l2_interface(rocker_port, flags,
+                                               vlan_id, out_lport,
+                                               pop_vlan);
+               if (err) {
+                       netdev_err(rocker_port->dev,
+                                  "Error (%d) port VLAN l2 group for lport %d\n",
+                                  err, out_lport);
+                       return err;
+               }
+       }
+
+       /* An L2 interface group for this VLAN to CPU port.
+        * Add when first port joins this VLAN and destroy when
+        * last port leaves this VLAN.
+        */
+
+       for (i = 0; i < rocker->port_count; i++) {
+               p = rocker->ports[i];
+               if (test_bit(ntohs(vlan_id), p->vlan_bitmap))
+                       ref++;
+       }
+
+       if ((!adding || ref != 1) && (adding || ref != 0))
+               return 0;
+
+       out_lport = 0;
+       err = rocker_group_l2_interface(rocker_port, flags,
+                                       vlan_id, out_lport,
+                                       pop_vlan);
+       if (err) {
+               netdev_err(rocker_port->dev,
+                          "Error (%d) port VLAN l2 group for CPU port\n", err);
+               return err;
+       }
+
+       return 0;
+}
+
+static struct rocker_ctrl {
+       const u8 *eth_dst;
+       const u8 *eth_dst_mask;
+       u16 eth_type;
+       bool acl;
+       bool bridge;
+       bool term;
+       bool copy_to_cpu;
+} rocker_ctrls[] = {
+       [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
+               /* pass link local multicast pkts up to CPU for filtering */
+               .eth_dst = ll_mac,
+               .eth_dst_mask = ll_mask,
+               .acl = true,
+       },
+       [ROCKER_CTRL_LOCAL_ARP] = {
+               /* pass local ARP pkts up to CPU */
+               .eth_dst = zero_mac,
+               .eth_dst_mask = zero_mac,
+               .eth_type = htons(ETH_P_ARP),
+               .acl = true,
+       },
+       [ROCKER_CTRL_IPV4_MCAST] = {
+               /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
+               .eth_dst = ipv4_mcast,
+               .eth_dst_mask = ipv4_mask,
+               .eth_type = htons(ETH_P_IP),
+               .term  = true,
+               .copy_to_cpu = true,
+       },
+       [ROCKER_CTRL_IPV6_MCAST] = {
+               /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
+               .eth_dst = ipv6_mcast,
+               .eth_dst_mask = ipv6_mask,
+               .eth_type = htons(ETH_P_IPV6),
+               .term  = true,
+               .copy_to_cpu = true,
+       },
+       [ROCKER_CTRL_DFLT_BRIDGING] = {
+               /* flood any pkts on vlan */
+               .bridge = true,
+               .copy_to_cpu = true,
+       },
+};
+
+static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
+                                    int flags, struct rocker_ctrl *ctrl,
+                                    __be16 vlan_id)
+{
+       u32 in_lport = rocker_port->lport;
+       u32 in_lport_mask = 0xffffffff;
+       u32 out_lport = 0;
+       u8 *eth_src = NULL;
+       u8 *eth_src_mask = NULL;
+       __be16 vlan_id_mask = htons(0xffff);
+       u8 ip_proto = 0;
+       u8 ip_proto_mask = 0;
+       u8 ip_tos = 0;
+       u8 ip_tos_mask = 0;
+       u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
+       int err;
+
+       err = rocker_flow_tbl_acl(rocker_port, flags,
+                                 in_lport, in_lport_mask,
+                                 eth_src, eth_src_mask,
+                                 ctrl->eth_dst, ctrl->eth_dst_mask,
+                                 ctrl->eth_type,
+                                 vlan_id, vlan_id_mask,
+                                 ip_proto, ip_proto_mask,
+                                 ip_tos, ip_tos_mask,
+                                 group_id);
+
+       if (err)
+               netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
+
+       return err;
+}
+
+static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
+                                       int flags, struct rocker_ctrl *ctrl,
+                                       __be16 vlan_id)
+{
+       enum rocker_of_dpa_table_id goto_tbl =
+               ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+       u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
+       u32 tunnel_id = 0;
+       int err;
+
+       if (!rocker_port_is_bridged(rocker_port))
+               return 0;
+
+       err = rocker_flow_tbl_bridge(rocker_port, flags,
+                                    ctrl->eth_dst, ctrl->eth_dst_mask,
+                                    vlan_id, tunnel_id,
+                                    goto_tbl, group_id, ctrl->copy_to_cpu);
+
+       if (err)
+               netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
+
+       return err;
+}
+
+static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
+                                     int flags, struct rocker_ctrl *ctrl,
+                                     __be16 vlan_id)
+{
+       u32 in_lport_mask = 0xffffffff;
+       __be16 vlan_id_mask = htons(0xffff);
+       int err;
+
+       if (ntohs(vlan_id) == 0)
+               vlan_id = rocker_port->internal_vlan_id;
+
+       err = rocker_flow_tbl_term_mac(rocker_port,
+                                      rocker_port->lport, in_lport_mask,
+                                      ctrl->eth_type, ctrl->eth_dst,
+                                      ctrl->eth_dst_mask, vlan_id,
+                                      vlan_id_mask, ctrl->copy_to_cpu,
+                                      flags);
+
+       if (err)
+               netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
+
+       return err;
+}
+
+static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags,
+                                struct rocker_ctrl *ctrl, __be16 vlan_id)
+{
+       if (ctrl->acl)
+               return rocker_port_ctrl_vlan_acl(rocker_port, flags,
+                                                ctrl, vlan_id);
+       if (ctrl->bridge)
+               return rocker_port_ctrl_vlan_bridge(rocker_port, flags,
+                                                   ctrl, vlan_id);
+
+       if (ctrl->term)
+               return rocker_port_ctrl_vlan_term(rocker_port, flags,
+                                                 ctrl, vlan_id);
+
+       return -EOPNOTSUPP;
+}
+
+static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
+                                    int flags, __be16 vlan_id)
+{
+       int err = 0;
+       int i;
+
+       for (i = 0; i < ROCKER_CTRL_MAX; i++) {
+               if (rocker_port->ctrls[i]) {
+                       err = rocker_port_ctrl_vlan(rocker_port, flags,
+                                                   &rocker_ctrls[i], vlan_id);
+                       if (err)
+                               return err;
+               }
+       }
+
+       return err;
+}
+
+static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
+                           struct rocker_ctrl *ctrl)
+{
+       u16 vid;
+       int err = 0;
+
+       for (vid = 1; vid < VLAN_N_VID; vid++) {
+               if (!test_bit(vid, rocker_port->vlan_bitmap))
+                       continue;
+               err = rocker_port_ctrl_vlan(rocker_port, flags,
+                                           ctrl, htons(vid));
+               if (err)
+                       break;
+       }
+
+       return err;
+}
+
+static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
+                           u16 vid)
+{
+       enum rocker_of_dpa_table_id goto_tbl =
+               ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
+       u32 in_lport = rocker_port->lport;
+       __be16 vlan_id = htons(vid);
+       __be16 vlan_id_mask = htons(0xffff);
+       __be16 internal_vlan_id;
+       bool untagged;
+       bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
+       int err;
+
+       internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
+
+       if (adding && test_and_set_bit(ntohs(internal_vlan_id),
+                                      rocker_port->vlan_bitmap))
+                       return 0; /* already added */
+       else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id),
+                                               rocker_port->vlan_bitmap))
+                       return 0; /* already removed */
+
+       if (adding) {
+               err = rocker_port_ctrl_vlan_add(rocker_port, flags,
+                                               internal_vlan_id);
+               if (err) {
+                       netdev_err(rocker_port->dev,
+                                  "Error (%d) port ctrl vlan add\n", err);
+                       return err;
+               }
+       }
+
+       err = rocker_port_vlan_l2_groups(rocker_port, flags,
+                                        internal_vlan_id, untagged);
+       if (err) {
+               netdev_err(rocker_port->dev,
+                          "Error (%d) port VLAN l2 groups\n", err);
+               return err;
+       }
+
+       err = rocker_port_vlan_flood_group(rocker_port, flags,
+                                          internal_vlan_id);
+       if (err) {
+               netdev_err(rocker_port->dev,
+                          "Error (%d) port VLAN l2 flood group\n", err);
+               return err;
+       }
+
+       err = rocker_flow_tbl_vlan(rocker_port, flags,
+                                  in_lport, vlan_id, vlan_id_mask,
+                                  goto_tbl, untagged, internal_vlan_id);
+       if (err)
+               netdev_err(rocker_port->dev,
+                          "Error (%d) port VLAN table\n", err);
+
+       return err;
+}
+
+static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
+{
+       enum rocker_of_dpa_table_id goto_tbl;
+       u32 in_lport;
+       u32 in_lport_mask;
+       int err;
+
+       /* Normal Ethernet Frames.  Matches pkts from any local physical
+        * ports.  Goto VLAN tbl.
+        */
+
+       in_lport = 0;
+       in_lport_mask = 0xffff0000;
+       goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
+
+       err = rocker_flow_tbl_ig_port(rocker_port, flags,
+                                     in_lport, in_lport_mask,
+                                     goto_tbl);
+       if (err)
+               netdev_err(rocker_port->dev,
+                          "Error (%d) ingress port table entry\n", err);
+
+       return err;
+}
+
+struct rocker_fdb_learn_work {
+       struct work_struct work;
+       struct net_device *dev;
+       int flags;
+       u8 addr[ETH_ALEN];
+       u16 vid;
+};
+
+static void rocker_port_fdb_learn_work(struct work_struct *work)
+{
+       struct rocker_fdb_learn_work *lw =
+               container_of(work, struct rocker_fdb_learn_work, work);
+       bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
+       bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
+
+       if (learned && removing)
+               br_fdb_external_learn_del(lw->dev, lw->addr, lw->vid);
+       else if (learned && !removing)
+               br_fdb_external_learn_add(lw->dev, lw->addr, lw->vid);
+
+       kfree(work);
+}
+
+static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
+                                int flags, const u8 *addr, __be16 vlan_id)
+{
+       struct rocker_fdb_learn_work *lw;
+       enum rocker_of_dpa_table_id goto_tbl =
+               ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+       u32 out_lport = rocker_port->lport;
+       u32 tunnel_id = 0;
+       u32 group_id = ROCKER_GROUP_NONE;
+       bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
+       bool copy_to_cpu = false;
+       int err;
+
+       if (rocker_port_is_bridged(rocker_port))
+               group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
+
+       if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
+               err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL,
+                                            vlan_id, tunnel_id, goto_tbl,
+                                            group_id, copy_to_cpu);
+               if (err)
+                       return err;
+       }
+
+       if (!syncing)
+               return 0;
+
+       if (!rocker_port_is_bridged(rocker_port))
+               return 0;
+
+       lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags));
+       if (!lw)
+               return -ENOMEM;
+
+       INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
+
+       lw->dev = rocker_port->dev;
+       lw->flags = flags;
+       ether_addr_copy(lw->addr, addr);
+       lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
+
+       schedule_work(&lw->work);
+
+       return 0;
+}
+
+static struct rocker_fdb_tbl_entry *
+rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
+{
+       struct rocker_fdb_tbl_entry *found;
+
+       hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
+               if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
+                       return found;
+
+       return NULL;
+}
+
+static int rocker_port_fdb(struct rocker_port *rocker_port,
+                          const unsigned char *addr,
+                          __be16 vlan_id, int flags)
+{
+       struct rocker *rocker = rocker_port->rocker;
+       struct rocker_fdb_tbl_entry *fdb;
+       struct rocker_fdb_tbl_entry *found;
+       bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
+       unsigned long lock_flags;
+
+       fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags));
+       if (!fdb)
+               return -ENOMEM;
+
+       fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
+       fdb->key.lport = rocker_port->lport;
+       ether_addr_copy(fdb->key.addr, addr);
+       fdb->key.vlan_id = vlan_id;
+       fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
+
+       spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
+
+       found = rocker_fdb_tbl_find(rocker, fdb);
+
+       if (removing && found) {
+               kfree(fdb);
+               hash_del(&found->entry);
+       } else if (!removing && !found) {
+               hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
+       }
+
+       spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
+
+       /* Check if adding and already exists, or removing and can't find */
+       if (!found != !removing) {
+               kfree(fdb);
+               if (!found && removing)
+                       return 0;
+               /* Refreshing existing to update aging timers */
+               flags |= ROCKER_OP_FLAG_REFRESH;
+       }
+
+       return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id);
+}
+
+static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
+{
+       struct rocker *rocker = rocker_port->rocker;
+       struct rocker_fdb_tbl_entry *found;
+       unsigned long lock_flags;
+       int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
+       struct hlist_node *tmp;
+       int bkt;
+       int err = 0;
+
+       if (rocker_port->stp_state == BR_STATE_LEARNING ||
+           rocker_port->stp_state == BR_STATE_FORWARDING)
+               return 0;
+
+       spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
+
+       hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
+               if (found->key.lport != rocker_port->lport)
+                       continue;
+               if (!found->learned)
+                       continue;
+               err = rocker_port_fdb_learn(rocker_port, flags,
+                                           found->key.addr,
+                                           found->key.vlan_id);
+               if (err)
+                       goto err_out;
+               hash_del(&found->entry);
+       }
+
+err_out:
+       spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
+
+       return err;
+}
+
+static int rocker_port_router_mac(struct rocker_port *rocker_port,
+                                 int flags, __be16 vlan_id)
+{
+       u32 in_lport_mask = 0xffffffff;
+       __be16 eth_type;
+       const u8 *dst_mac_mask = ff_mac;
+       __be16 vlan_id_mask = htons(0xffff);
+       bool copy_to_cpu = false;
+       int err;
+
+       if (ntohs(vlan_id) == 0)
+               vlan_id = rocker_port->internal_vlan_id;
+
+       eth_type = htons(ETH_P_IP);
+       err = rocker_flow_tbl_term_mac(rocker_port,
+                                      rocker_port->lport, in_lport_mask,
+                                      eth_type, rocker_port->dev->dev_addr,
+                                      dst_mac_mask, vlan_id, vlan_id_mask,
+                                      copy_to_cpu, flags);
+       if (err)
+               return err;
+
+       eth_type = htons(ETH_P_IPV6);
+       err = rocker_flow_tbl_term_mac(rocker_port,
+                                      rocker_port->lport, in_lport_mask,
+                                      eth_type, rocker_port->dev->dev_addr,
+                                      dst_mac_mask, vlan_id, vlan_id_mask,
+                                      copy_to_cpu, flags);
+
+       return err;
+}
+
+static int rocker_port_fwding(struct rocker_port *rocker_port)
+{
+       bool pop_vlan;
+       u32 out_lport;
+       __be16 vlan_id;
+       u16 vid;
+       int flags = ROCKER_OP_FLAG_NOWAIT;
+       int err;
+
+       /* Port will be forwarding-enabled if its STP state is LEARNING
+        * or FORWARDING.  Traffic from CPU can still egress, regardless of
+        * port STP state.  Use L2 interface group on port VLANs as a way
+        * to toggle port forwarding: if forwarding is disabled, L2
+        * interface group will not exist.
+        */
+
+       if (rocker_port->stp_state != BR_STATE_LEARNING &&
+           rocker_port->stp_state != BR_STATE_FORWARDING)
+               flags |= ROCKER_OP_FLAG_REMOVE;
+
+       out_lport = rocker_port->lport;
+       for (vid = 1; vid < VLAN_N_VID; vid++) {
+               if (!test_bit(vid, rocker_port->vlan_bitmap))
+                       continue;
+               vlan_id = htons(vid);
+               pop_vlan = rocker_vlan_id_is_internal(vlan_id);
+               err = rocker_group_l2_interface(rocker_port, flags,
+                                               vlan_id, out_lport,
+                                               pop_vlan);
+               if (err) {
+                       netdev_err(rocker_port->dev,
+                                  "Error (%d) port VLAN l2 group for lport %d\n",
+                                  err, out_lport);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
+static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
+{
+       bool want[ROCKER_CTRL_MAX] = { 0, };
+       int flags;
+       int err;
+       int i;
+
+       if (rocker_port->stp_state == state)
+               return 0;
+
+       rocker_port->stp_state = state;
+
+       switch (state) {
+       case BR_STATE_DISABLED:
+               /* port is completely disabled */
+               break;
+       case BR_STATE_LISTENING:
+       case BR_STATE_BLOCKING:
+               want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
+               break;
+       case BR_STATE_LEARNING:
+       case BR_STATE_FORWARDING:
+               want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
+               want[ROCKER_CTRL_IPV4_MCAST] = true;
+               want[ROCKER_CTRL_IPV6_MCAST] = true;
+               if (rocker_port_is_bridged(rocker_port))
+                       want[ROCKER_CTRL_DFLT_BRIDGING] = true;
+               else
+                       want[ROCKER_CTRL_LOCAL_ARP] = true;
+               break;
+       }
+
+       for (i = 0; i < ROCKER_CTRL_MAX; i++) {
+               if (want[i] != rocker_port->ctrls[i]) {
+                       flags = ROCKER_OP_FLAG_NOWAIT |
+                               (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
+                       err = rocker_port_ctrl(rocker_port, flags,
+                                              &rocker_ctrls[i]);
+                       if (err)
+                               return err;
+                       rocker_port->ctrls[i] = want[i];
+               }
+       }
+
+       err = rocker_port_fdb_flush(rocker_port);
+       if (err)
+               return err;
+
+       return rocker_port_fwding(rocker_port);
+}
+
+static struct rocker_internal_vlan_tbl_entry *
+rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
+{
+       struct rocker_internal_vlan_tbl_entry *found;
+
+       hash_for_each_possible(rocker->internal_vlan_tbl, found,
+                              entry, ifindex) {
+               if (found->ifindex == ifindex)
+                       return found;
+       }
+
+       return NULL;
+}
+
+static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
+                                              int ifindex)
+{
+       struct rocker *rocker = rocker_port->rocker;
+       struct rocker_internal_vlan_tbl_entry *entry;
+       struct rocker_internal_vlan_tbl_entry *found;
+       unsigned long lock_flags;
+       int i;
+
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return 0;
+
+       entry->ifindex = ifindex;
+
+       spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
+
+       found = rocker_internal_vlan_tbl_find(rocker, ifindex);
+       if (found) {
+               kfree(entry);
+               goto found;
+       }
+
+       found = entry;
+       hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
+
+       for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
+               if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
+                       continue;
+               found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
+               goto found;
+       }
+
+       netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
+
+found:
+       found->ref_count++;
+       spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
+
+       return found->vlan_id;
+}
+
+static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port,
+                                            int ifindex)
+{
+       struct rocker *rocker = rocker_port->rocker;
+       struct rocker_internal_vlan_tbl_entry *found;
+       unsigned long lock_flags;
+       unsigned long bit;
+
+       spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
+
+       found = rocker_internal_vlan_tbl_find(rocker, ifindex);
+       if (!found) {
+               netdev_err(rocker_port->dev,
+                          "ifindex (%d) not found in internal VLAN tbl\n",
+                          ifindex);
+               goto not_found;
+       }
+
+       if (--found->ref_count <= 0) {
+               bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
+               clear_bit(bit, rocker->internal_vlan_bitmap);
+               hash_del(&found->entry);
+               kfree(found);
+       }
+
+not_found:
+       spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
+}
+
+/*****************
+ * Net device ops
+ *****************/
+
+static int rocker_port_open(struct net_device *dev)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       u8 stp_state = rocker_port_is_bridged(rocker_port) ?
+               BR_STATE_BLOCKING : BR_STATE_FORWARDING;
+       int err;
+
+       err = rocker_port_dma_rings_init(rocker_port);
+       if (err)
+               return err;
+
+       err = request_irq(rocker_msix_tx_vector(rocker_port),
+                         rocker_tx_irq_handler, 0,
+                         rocker_driver_name, rocker_port);
+       if (err) {
+               netdev_err(rocker_port->dev, "cannot assign tx irq\n");
+               goto err_request_tx_irq;
+       }
+
+       err = request_irq(rocker_msix_rx_vector(rocker_port),
+                         rocker_rx_irq_handler, 0,
+                         rocker_driver_name, rocker_port);
+       if (err) {
+               netdev_err(rocker_port->dev, "cannot assign rx irq\n");
+               goto err_request_rx_irq;
+       }
+
+       err = rocker_port_stp_update(rocker_port, stp_state);
+       if (err)
+               goto err_stp_update;
+
+       napi_enable(&rocker_port->napi_tx);
+       napi_enable(&rocker_port->napi_rx);
+       rocker_port_set_enable(rocker_port, true);
+       netif_start_queue(dev);
+       return 0;
+
+err_stp_update:
+       free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
+err_request_rx_irq:
+       free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
+err_request_tx_irq:
+       rocker_port_dma_rings_fini(rocker_port);
+       return err;
+}
+
+static int rocker_port_stop(struct net_device *dev)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+
+       netif_stop_queue(dev);
+       rocker_port_set_enable(rocker_port, false);
+       napi_disable(&rocker_port->napi_rx);
+       napi_disable(&rocker_port->napi_tx);
+       rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
+       free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
+       free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
+       rocker_port_dma_rings_fini(rocker_port);
+
+       return 0;
+}
+
+static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
+                                      struct rocker_desc_info *desc_info)
+{
+       struct rocker *rocker = rocker_port->rocker;
+       struct pci_dev *pdev = rocker->pdev;
+       struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
+       struct rocker_tlv *attr;
+       int rem;
+
+       rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
+       if (!attrs[ROCKER_TLV_TX_FRAGS])
+               return;
+       rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
+               struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
+               dma_addr_t dma_handle;
+               size_t len;
+
+               if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
+                       continue;
+               rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
+                                       attr);
+               if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
+                   !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
+                       continue;
+               dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
+               len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
+               pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
+       }
+}
+
+static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port,
+                                      struct rocker_desc_info *desc_info,
+                                      char *buf, size_t buf_len)
+{
+       struct rocker *rocker = rocker_port->rocker;
+       struct pci_dev *pdev = rocker->pdev;
+       dma_addr_t dma_handle;
+       struct rocker_tlv *frag;
+
+       dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
+       if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
+               if (net_ratelimit())
+                       netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
+               return -EIO;
+       }
+       frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
+       if (!frag)
+               goto unmap_frag;
+       if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
+                              dma_handle))
+               goto nest_cancel;
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
+                              buf_len))
+               goto nest_cancel;
+       rocker_tlv_nest_end(desc_info, frag);
+       return 0;
+
+nest_cancel:
+       rocker_tlv_nest_cancel(desc_info, frag);
+unmap_frag:
+       pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
+       return -EMSGSIZE;
+}
+
+static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       struct rocker *rocker = rocker_port->rocker;
+       struct rocker_desc_info *desc_info;
+       struct rocker_tlv *frags;
+       int i;
+       int err;
+
+       desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
+       if (unlikely(!desc_info)) {
+               if (net_ratelimit())
+                       netdev_err(dev, "tx ring full when queue awake\n");
+               return NETDEV_TX_BUSY;
+       }
+
+       rocker_desc_cookie_ptr_set(desc_info, skb);
+
+       frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
+       if (!frags)
+               goto out;
+       err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
+                                         skb->data, skb_headlen(skb));
+       if (err)
+               goto nest_cancel;
+       if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX)
+               goto nest_cancel;
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+               err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
+                                                 skb_frag_address(frag),
+                                                 skb_frag_size(frag));
+               if (err)
+                       goto unmap_frags;
+       }
+       rocker_tlv_nest_end(desc_info, frags);
+
+       rocker_desc_gen_clear(desc_info);
+       rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
+
+       desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
+       if (!desc_info)
+               netif_stop_queue(dev);
+
+       return NETDEV_TX_OK;
+
+unmap_frags:
+       rocker_tx_desc_frags_unmap(rocker_port, desc_info);
+nest_cancel:
+       rocker_tlv_nest_cancel(desc_info, frags);
+out:
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
+}
+
+static int rocker_port_set_mac_address(struct net_device *dev, void *p)
+{
+       struct sockaddr *addr = p;
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       int err;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
+       if (err)
+               return err;
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+       return 0;
+}
+
+static int rocker_port_vlan_rx_add_vid(struct net_device *dev,
+                                      __be16 proto, u16 vid)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       int err;
+
+       err = rocker_port_vlan(rocker_port, 0, vid);
+       if (err)
+               return err;
+
+       return rocker_port_router_mac(rocker_port, 0, htons(vid));
+}
+
+static int rocker_port_vlan_rx_kill_vid(struct net_device *dev,
+                                       __be16 proto, u16 vid)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       int err;
+
+       err = rocker_port_router_mac(rocker_port, ROCKER_OP_FLAG_REMOVE,
+                                    htons(vid));
+       if (err)
+               return err;
+
+       return rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, vid);
+}
+
+static int rocker_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+                              struct net_device *dev,
+                              const unsigned char *addr, u16 vid,
+                              u16 nlm_flags)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
+       int flags = 0;
+
+       if (!rocker_port_is_bridged(rocker_port))
+               return -EINVAL;
+
+       return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+}
+
+static int rocker_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+                              struct net_device *dev,
+                              const unsigned char *addr, u16 vid)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
+       int flags = ROCKER_OP_FLAG_REMOVE;
+
+       if (!rocker_port_is_bridged(rocker_port))
+               return -EINVAL;
+
+       return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+}
+
+static int rocker_fdb_fill_info(struct sk_buff *skb,
+                               struct rocker_port *rocker_port,
+                               const unsigned char *addr, u16 vid,
+                               u32 portid, u32 seq, int type,
+                               unsigned int flags)
+{
+       struct nlmsghdr *nlh;
+       struct ndmsg *ndm;
+
+       nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
+       if (!nlh)
+               return -EMSGSIZE;
+
+       ndm = nlmsg_data(nlh);
+       ndm->ndm_family  = AF_BRIDGE;
+       ndm->ndm_pad1    = 0;
+       ndm->ndm_pad2    = 0;
+       ndm->ndm_flags   = NTF_SELF;
+       ndm->ndm_type    = 0;
+       ndm->ndm_ifindex = rocker_port->dev->ifindex;
+       ndm->ndm_state   = NUD_REACHABLE;
+
+       if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
+               goto nla_put_failure;
+
+       if (vid && nla_put_u16(skb, NDA_VLAN, vid))
+               goto nla_put_failure;
+
+       return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+       nlmsg_cancel(skb, nlh);
+       return -EMSGSIZE;
+}
+
+static int rocker_port_fdb_dump(struct sk_buff *skb,
+                               struct netlink_callback *cb,
+                               struct net_device *dev,
+                               struct net_device *filter_dev,
+                               int idx)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       struct rocker *rocker = rocker_port->rocker;
+       struct rocker_fdb_tbl_entry *found;
+       struct hlist_node *tmp;
+       int bkt;
+       unsigned long lock_flags;
+       const unsigned char *addr;
+       u16 vid;
+       int err;
+
+       spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
+       hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
+               if (found->key.lport != rocker_port->lport)
+                       continue;
+               if (idx < cb->args[0])
+                       goto skip;
+               addr = found->key.addr;
+               vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id);
+               err = rocker_fdb_fill_info(skb, rocker_port, addr, vid,
+                                          NETLINK_CB(cb->skb).portid,
+                                          cb->nlh->nlmsg_seq,
+                                          RTM_NEWNEIGH, NLM_F_MULTI);
+               if (err < 0)
+                       break;
+skip:
+               ++idx;
+       }
+       spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
+       return idx;
+}
+
+static int rocker_port_bridge_setlink(struct net_device *dev,
+                                     struct nlmsghdr *nlh)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       struct nlattr *protinfo;
+       struct nlattr *afspec;
+       struct nlattr *attr;
+       u16 mode;
+       int err;
+
+       protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
+                                  IFLA_PROTINFO);
+       afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+       if (afspec) {
+               attr = nla_find_nested(afspec, IFLA_BRIDGE_MODE);
+               if (attr) {
+                       if (nla_len(attr) < sizeof(mode))
+                               return -EINVAL;
+
+                       mode = nla_get_u16(attr);
+                       if (mode != BRIDGE_MODE_SWDEV)
+                               return -EINVAL;
+               }
+       }
+
+       if (protinfo) {
+               attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING);
+               if (attr) {
+                       if (nla_len(attr) < sizeof(u8))
+                               return -EINVAL;
+
+                       if (nla_get_u8(attr))
+                               rocker_port->brport_flags |= BR_LEARNING;
+                       else
+                               rocker_port->brport_flags &= ~BR_LEARNING;
+                       err = rocker_port_set_learning(rocker_port);
+                       if (err)
+                               return err;
+               }
+               attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING_SYNC);
+               if (attr) {
+                       if (nla_len(attr) < sizeof(u8))
+                               return -EINVAL;
+
+                       if (nla_get_u8(attr))
+                               rocker_port->brport_flags |= BR_LEARNING_SYNC;
+                       else
+                               rocker_port->brport_flags &= ~BR_LEARNING_SYNC;
+               }
+       }
+
+       return 0;
+}
+
+static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+                                     struct net_device *dev,
+                                     u32 filter_mask)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       u16 mode = BRIDGE_MODE_SWDEV;
+       u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
+
+       return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
+                                      rocker_port->brport_flags, mask);
+}
+
+static int rocker_port_switch_parent_id_get(struct net_device *dev,
+                                           struct netdev_phys_item_id *psid)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       struct rocker *rocker = rocker_port->rocker;
+
+       psid->id_len = sizeof(rocker->hw.id);
+       memcpy(&psid->id, &rocker->hw.id, psid->id_len);
+       return 0;
+}
+
+static int rocker_port_switch_port_stp_update(struct net_device *dev, u8 state)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+
+       return rocker_port_stp_update(rocker_port, state);
+}
+
+static const struct net_device_ops rocker_port_netdev_ops = {
+       .ndo_open                       = rocker_port_open,
+       .ndo_stop                       = rocker_port_stop,
+       .ndo_start_xmit                 = rocker_port_xmit,
+       .ndo_set_mac_address            = rocker_port_set_mac_address,
+       .ndo_vlan_rx_add_vid            = rocker_port_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid           = rocker_port_vlan_rx_kill_vid,
+       .ndo_fdb_add                    = rocker_port_fdb_add,
+       .ndo_fdb_del                    = rocker_port_fdb_del,
+       .ndo_fdb_dump                   = rocker_port_fdb_dump,
+       .ndo_bridge_setlink             = rocker_port_bridge_setlink,
+       .ndo_bridge_getlink             = rocker_port_bridge_getlink,
+       .ndo_switch_parent_id_get       = rocker_port_switch_parent_id_get,
+       .ndo_switch_port_stp_update     = rocker_port_switch_port_stp_update,
+};
+
+/********************
+ * ethtool interface
+ ********************/
+
+static int rocker_port_get_settings(struct net_device *dev,
+                                   struct ethtool_cmd *ecmd)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+
+       return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
+}
+
+static int rocker_port_set_settings(struct net_device *dev,
+                                   struct ethtool_cmd *ecmd)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+
+       return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
+}
+
+static void rocker_port_get_drvinfo(struct net_device *dev,
+                                   struct ethtool_drvinfo *drvinfo)
+{
+       strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
+}
+
+static const struct ethtool_ops rocker_port_ethtool_ops = {
+       .get_settings           = rocker_port_get_settings,
+       .set_settings           = rocker_port_set_settings,
+       .get_drvinfo            = rocker_port_get_drvinfo,
+       .get_link               = ethtool_op_get_link,
+};
+
+/*****************
+ * NAPI interface
+ *****************/
+
+static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
+{
+       return container_of(napi, struct rocker_port, napi_tx);
+}
+
+static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
+{
+       struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
+       struct rocker *rocker = rocker_port->rocker;
+       struct rocker_desc_info *desc_info;
+       u32 credits = 0;
+       int err;
+
+       /* Cleanup tx descriptors */
+       while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
+               err = rocker_desc_err(desc_info);
+               if (err && net_ratelimit())
+                       netdev_err(rocker_port->dev, "tx desc received with err %d\n",
+                                  err);
+               rocker_tx_desc_frags_unmap(rocker_port, desc_info);
+               dev_kfree_skb_any(rocker_desc_cookie_ptr_get(desc_info));
+               credits++;
+       }
+
+       if (credits && netif_queue_stopped(rocker_port->dev))
+               netif_wake_queue(rocker_port->dev);
+
+       napi_complete(napi);
+       rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
+
+       return 0;
+}
+
+static int rocker_port_rx_proc(struct rocker *rocker,
+                              struct rocker_port *rocker_port,
+                              struct rocker_desc_info *desc_info)
+{
+       struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+       struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
+       size_t rx_len;
+
+       if (!skb)
+               return -ENOENT;
+
+       rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
+       if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
+               return -EINVAL;
+
+       rocker_dma_rx_ring_skb_unmap(rocker, attrs);
+
+       rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
+       skb_put(skb, rx_len);
+       skb->protocol = eth_type_trans(skb, rocker_port->dev);
+       netif_receive_skb(skb);
+
+       return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
+}
+
+static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
+{
+       return container_of(napi, struct rocker_port, napi_rx);
+}
+
+static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
+{
+       struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
+       struct rocker *rocker = rocker_port->rocker;
+       struct rocker_desc_info *desc_info;
+       u32 credits = 0;
+       int err;
+
+       /* Process rx descriptors */
+       while (credits < budget &&
+              (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
+               err = rocker_desc_err(desc_info);
+               if (err) {
+                       if (net_ratelimit())
+                               netdev_err(rocker_port->dev, "rx desc received with err %d\n",
+                                          err);
+               } else {
+                       err = rocker_port_rx_proc(rocker, rocker_port,
+                                                 desc_info);
+                       if (err && net_ratelimit())
+                               netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
+                                          err);
+               }
+               rocker_desc_gen_clear(desc_info);
+               rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
+               credits++;
+       }
+
+       if (credits < budget)
+               napi_complete(napi);
+
+       rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
+
+       return credits;
+}
+
+/*****************
+ * PCI driver ops
+ *****************/
+
+static void rocker_carrier_init(struct rocker_port *rocker_port)
+{
+       struct rocker *rocker = rocker_port->rocker;
+       u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
+       bool link_up;
+
+       link_up = link_status & (1 << rocker_port->lport);
+       if (link_up)
+               netif_carrier_on(rocker_port->dev);
+       else
+               netif_carrier_off(rocker_port->dev);
+}
+
+static void rocker_remove_ports(struct rocker *rocker)
+{
+       struct rocker_port *rocker_port;
+       int i;
+
+       for (i = 0; i < rocker->port_count; i++) {
+               rocker_port = rocker->ports[i];
+               rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE);
+               unregister_netdev(rocker_port->dev);
+       }
+       kfree(rocker->ports);
+}
+
+static void rocker_port_dev_addr_init(struct rocker *rocker,
+                                     struct rocker_port *rocker_port)
+{
+       struct pci_dev *pdev = rocker->pdev;
+       int err;
+
+       err = rocker_cmd_get_port_settings_macaddr(rocker_port,
+                                                  rocker_port->dev->dev_addr);
+       if (err) {
+               dev_warn(&pdev->dev, "failed to get mac address, using random\n");
+               eth_hw_addr_random(rocker_port->dev);
+       }
+}
+
+static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
+{
+       struct pci_dev *pdev = rocker->pdev;
+       struct rocker_port *rocker_port;
+       struct net_device *dev;
+       int err;
+
+       dev = alloc_etherdev(sizeof(struct rocker_port));
+       if (!dev)
+               return -ENOMEM;
+       rocker_port = netdev_priv(dev);
+       rocker_port->dev = dev;
+       rocker_port->rocker = rocker;
+       rocker_port->port_number = port_number;
+       rocker_port->lport = port_number + 1;
+       rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
+
+       rocker_port_dev_addr_init(rocker, rocker_port);
+       dev->netdev_ops = &rocker_port_netdev_ops;
+       dev->ethtool_ops = &rocker_port_ethtool_ops;
+       netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
+                      NAPI_POLL_WEIGHT);
+       netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
+                      NAPI_POLL_WEIGHT);
+       rocker_carrier_init(rocker_port);
+
+       dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+       err = register_netdev(dev);
+       if (err) {
+               dev_err(&pdev->dev, "register_netdev failed\n");
+               goto err_register_netdev;
+       }
+       rocker->ports[port_number] = rocker_port;
+
+       rocker_port_set_learning(rocker_port);
+
+       rocker_port->internal_vlan_id =
+               rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
+       err = rocker_port_ig_tbl(rocker_port, 0);
+       if (err) {
+               dev_err(&pdev->dev, "install ig port table failed\n");
+               goto err_port_ig_tbl;
+       }
+
+       return 0;
+
+err_port_ig_tbl:
+       unregister_netdev(dev);
+err_register_netdev:
+       free_netdev(dev);
+       return err;
+}
+
+static int rocker_probe_ports(struct rocker *rocker)
+{
+       int i;
+       size_t alloc_size;
+       int err;
+
+       alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
+       rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
+       for (i = 0; i < rocker->port_count; i++) {
+               err = rocker_probe_port(rocker, i);
+               if (err)
+                       goto remove_ports;
+       }
+       return 0;
+
+remove_ports:
+       rocker_remove_ports(rocker);
+       return err;
+}
+
+static int rocker_msix_init(struct rocker *rocker)
+{
+       struct pci_dev *pdev = rocker->pdev;
+       int msix_entries;
+       int i;
+       int err;
+
+       msix_entries = pci_msix_vec_count(pdev);
+       if (msix_entries < 0)
+               return msix_entries;
+
+       if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
+               return -EINVAL;
+
+       rocker->msix_entries = kmalloc_array(msix_entries,
+                                            sizeof(struct msix_entry),
+                                            GFP_KERNEL);
+       if (!rocker->msix_entries)
+               return -ENOMEM;
+
+       for (i = 0; i < msix_entries; i++)
+               rocker->msix_entries[i].entry = i;
+
+       err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
+       if (err < 0)
+               goto err_enable_msix;
+
+       return 0;
+
+err_enable_msix:
+       kfree(rocker->msix_entries);
+       return err;
+}
+
+static void rocker_msix_fini(struct rocker *rocker)
+{
+       pci_disable_msix(rocker->pdev);
+       kfree(rocker->msix_entries);
+}
+
+static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct rocker *rocker;
+       int err;
+
+       rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
+       if (!rocker)
+               return -ENOMEM;
+
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "pci_enable_device failed\n");
+               goto err_pci_enable_device;
+       }
+
+       err = pci_request_regions(pdev, rocker_driver_name);
+       if (err) {
+               dev_err(&pdev->dev, "pci_request_regions failed\n");
+               goto err_pci_request_regions;
+       }
+
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (!err) {
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+               if (err) {
+                       dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
+                       goto err_pci_set_dma_mask;
+               }
+       } else {
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (err) {
+                       dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+                       goto err_pci_set_dma_mask;
+               }
+       }
+
+       if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
+               dev_err(&pdev->dev, "invalid PCI region size\n");
+               goto err_pci_resource_len_check;
+       }
+
+       rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
+                                 pci_resource_len(pdev, 0));
+       if (!rocker->hw_addr) {
+               dev_err(&pdev->dev, "ioremap failed\n");
+               err = -EIO;
+               goto err_ioremap;
+       }
+       pci_set_master(pdev);
+
+       rocker->pdev = pdev;
+       pci_set_drvdata(pdev, rocker);
+
+       rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
+
+       err = rocker_msix_init(rocker);
+       if (err) {
+               dev_err(&pdev->dev, "MSI-X init failed\n");
+               goto err_msix_init;
+       }
+
+       err = rocker_basic_hw_test(rocker);
+       if (err) {
+               dev_err(&pdev->dev, "basic hw test failed\n");
+               goto err_basic_hw_test;
+       }
+
+       rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
+
+       err = rocker_dma_rings_init(rocker);
+       if (err)
+               goto err_dma_rings_init;
+
+       err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
+                         rocker_cmd_irq_handler, 0,
+                         rocker_driver_name, rocker);
+       if (err) {
+               dev_err(&pdev->dev, "cannot assign cmd irq\n");
+               goto err_request_cmd_irq;
+       }
+
+       err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
+                         rocker_event_irq_handler, 0,
+                         rocker_driver_name, rocker);
+       if (err) {
+               dev_err(&pdev->dev, "cannot assign event irq\n");
+               goto err_request_event_irq;
+       }
+
+       rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
+
+       err = rocker_init_tbls(rocker);
+       if (err) {
+               dev_err(&pdev->dev, "cannot init rocker tables\n");
+               goto err_init_tbls;
+       }
+
+       err = rocker_probe_ports(rocker);
+       if (err) {
+               dev_err(&pdev->dev, "failed to probe ports\n");
+               goto err_probe_ports;
+       }
+
+       dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id);
+
+       return 0;
+
+err_probe_ports:
+       rocker_free_tbls(rocker);
+err_init_tbls:
+       free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
+err_request_event_irq:
+       free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
+err_request_cmd_irq:
+       rocker_dma_rings_fini(rocker);
+err_dma_rings_init:
+err_basic_hw_test:
+       rocker_msix_fini(rocker);
+err_msix_init:
+       iounmap(rocker->hw_addr);
+err_ioremap:
+err_pci_resource_len_check:
+err_pci_set_dma_mask:
+       pci_release_regions(pdev);
+err_pci_request_regions:
+       pci_disable_device(pdev);
+err_pci_enable_device:
+       kfree(rocker);
+       return err;
+}
+
+static void rocker_remove(struct pci_dev *pdev)
+{
+       struct rocker *rocker = pci_get_drvdata(pdev);
+
+       rocker_free_tbls(rocker);
+       rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
+       rocker_remove_ports(rocker);
+       free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
+       free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
+       rocker_dma_rings_fini(rocker);
+       rocker_msix_fini(rocker);
+       iounmap(rocker->hw_addr);
+       pci_release_regions(rocker->pdev);
+       pci_disable_device(rocker->pdev);
+       kfree(rocker);
+}
+
+static struct pci_driver rocker_pci_driver = {
+       .name           = rocker_driver_name,
+       .id_table       = rocker_pci_id_table,
+       .probe          = rocker_probe,
+       .remove         = rocker_remove,
+};
+
+/************************************
+ * Net device notifier event handler
+ ************************************/
+
+static bool rocker_port_dev_check(struct net_device *dev)
+{
+       return dev->netdev_ops == &rocker_port_netdev_ops;
+}
+
+static int rocker_port_bridge_join(struct rocker_port *rocker_port,
+                                  struct net_device *bridge)
+{
+       int err;
+
+       rocker_port_internal_vlan_id_put(rocker_port,
+                                        rocker_port->dev->ifindex);
+
+       rocker_port->bridge_dev = bridge;
+
+       /* Use bridge internal VLAN ID for untagged pkts */
+       err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
+       if (err)
+               return err;
+       rocker_port->internal_vlan_id =
+               rocker_port_internal_vlan_id_get(rocker_port,
+                                                bridge->ifindex);
+       err = rocker_port_vlan(rocker_port, 0, 0);
+
+       return err;
+}
+
+static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
+{
+       int err;
+
+       rocker_port_internal_vlan_id_put(rocker_port,
+                                        rocker_port->bridge_dev->ifindex);
+
+       rocker_port->bridge_dev = NULL;
+
+       /* Use port internal VLAN ID for untagged pkts */
+       err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
+       if (err)
+               return err;
+       rocker_port->internal_vlan_id =
+               rocker_port_internal_vlan_id_get(rocker_port,
+                                                rocker_port->dev->ifindex);
+       err = rocker_port_vlan(rocker_port, 0, 0);
+
+       return err;
+}
+
+static int rocker_port_master_changed(struct net_device *dev)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       struct net_device *master = netdev_master_upper_dev_get(dev);
+       int err = 0;
+
+       if (master && master->rtnl_link_ops &&
+           !strcmp(master->rtnl_link_ops->kind, "bridge"))
+               err = rocker_port_bridge_join(rocker_port, master);
+       else
+               err = rocker_port_bridge_leave(rocker_port);
+
+       return err;
+}
+
+static int rocker_netdevice_event(struct notifier_block *unused,
+                                 unsigned long event, void *ptr)
+{
+       struct net_device *dev;
+       int err;
+
+       switch (event) {
+       case NETDEV_CHANGEUPPER:
+               dev = netdev_notifier_info_to_dev(ptr);
+               if (!rocker_port_dev_check(dev))
+                       return NOTIFY_DONE;
+               err = rocker_port_master_changed(dev);
+               if (err)
+                       netdev_warn(dev,
+                                   "failed to reflect master change (err %d)\n",
+                                   err);
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block rocker_netdevice_nb __read_mostly = {
+       .notifier_call = rocker_netdevice_event,
+};
+
+/***********************
+ * Module init and exit
+ ***********************/
+
+static int __init rocker_module_init(void)
+{
+       int err;
+
+       register_netdevice_notifier(&rocker_netdevice_nb);
+       err = pci_register_driver(&rocker_pci_driver);
+       if (err)
+               goto err_pci_register_driver;
+       return 0;
+
+err_pci_register_driver:
+       unregister_netdevice_notifier(&rocker_netdevice_nb);
+       return err;
+}
+
+static void __exit rocker_module_exit(void)
+{
+       unregister_netdevice_notifier(&rocker_netdevice_nb);
+       pci_unregister_driver(&rocker_pci_driver);
+}
+
+module_init(rocker_module_init);
+module_exit(rocker_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
+MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
+MODULE_DESCRIPTION("Rocker switch device driver");
+MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
new file mode 100644 (file)
index 0000000..8d2865b
--- /dev/null
@@ -0,0 +1,428 @@
+/*
+ * drivers/net/ethernet/rocker/rocker.h - Rocker switch device driver
+ * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _ROCKER_H
+#define _ROCKER_H
+
+#include <linux/types.h>
+
+#define PCI_VENDOR_ID_REDHAT           0x1b36
+#define PCI_DEVICE_ID_REDHAT_ROCKER    0x0006
+
+#define ROCKER_PCI_BAR0_SIZE           0x2000
+
+/* MSI-X vectors */
+enum {
+       ROCKER_MSIX_VEC_CMD,
+       ROCKER_MSIX_VEC_EVENT,
+       ROCKER_MSIX_VEC_TEST,
+       ROCKER_MSIX_VEC_RESERVED0,
+       __ROCKER_MSIX_VEC_TX,
+       __ROCKER_MSIX_VEC_RX,
+#define ROCKER_MSIX_VEC_TX(port) \
+       (__ROCKER_MSIX_VEC_TX + ((port) * 2))
+#define ROCKER_MSIX_VEC_RX(port) \
+       (__ROCKER_MSIX_VEC_RX + ((port) * 2))
+#define ROCKER_MSIX_VEC_COUNT(portcnt) \
+       (ROCKER_MSIX_VEC_RX((portcnt - 1)) + 1)
+};
+
+/* Rocker bogus registers */
+#define ROCKER_BOGUS_REG0              0x0000
+#define ROCKER_BOGUS_REG1              0x0004
+#define ROCKER_BOGUS_REG2              0x0008
+#define ROCKER_BOGUS_REG3              0x000c
+
+/* Rocker test registers */
+#define ROCKER_TEST_REG                        0x0010
+#define ROCKER_TEST_REG64              0x0018  /* 8-byte */
+#define ROCKER_TEST_IRQ                        0x0020
+#define ROCKER_TEST_DMA_ADDR           0x0028  /* 8-byte */
+#define ROCKER_TEST_DMA_SIZE           0x0030
+#define ROCKER_TEST_DMA_CTRL           0x0034
+
+/* Rocker test register ctrl */
+#define ROCKER_TEST_DMA_CTRL_CLEAR     (1 << 0)
+#define ROCKER_TEST_DMA_CTRL_FILL      (1 << 1)
+#define ROCKER_TEST_DMA_CTRL_INVERT    (1 << 2)
+
+/* Rocker DMA ring register offsets */
+#define ROCKER_DMA_DESC_ADDR(x)                (0x1000 + (x) * 32)  /* 8-byte */
+#define ROCKER_DMA_DESC_SIZE(x)                (0x1008 + (x) * 32)
+#define ROCKER_DMA_DESC_HEAD(x)                (0x100c + (x) * 32)
+#define ROCKER_DMA_DESC_TAIL(x)                (0x1010 + (x) * 32)
+#define ROCKER_DMA_DESC_CTRL(x)                (0x1014 + (x) * 32)
+#define ROCKER_DMA_DESC_CREDITS(x)     (0x1018 + (x) * 32)
+#define ROCKER_DMA_DESC_RES1(x)                (0x101c + (x) * 32)
+
+/* Rocker dma ctrl register bits */
+#define ROCKER_DMA_DESC_CTRL_RESET     (1 << 0)
+
+/* Rocker DMA ring types */
+enum rocker_dma_type {
+       ROCKER_DMA_CMD,
+       ROCKER_DMA_EVENT,
+       __ROCKER_DMA_TX,
+       __ROCKER_DMA_RX,
+#define ROCKER_DMA_TX(port) (__ROCKER_DMA_TX + (port) * 2)
+#define ROCKER_DMA_RX(port) (__ROCKER_DMA_RX + (port) * 2)
+};
+
+/* Rocker DMA ring size limits and default sizes */
+#define ROCKER_DMA_SIZE_MIN            2ul
+#define ROCKER_DMA_SIZE_MAX            65536ul
+#define ROCKER_DMA_CMD_DEFAULT_SIZE    32ul
+#define ROCKER_DMA_EVENT_DEFAULT_SIZE  32ul
+#define ROCKER_DMA_TX_DEFAULT_SIZE     64ul
+#define ROCKER_DMA_TX_DESC_SIZE                256
+#define ROCKER_DMA_RX_DEFAULT_SIZE     64ul
+#define ROCKER_DMA_RX_DESC_SIZE                256
+
+/* Rocker DMA descriptor struct */
+struct rocker_desc {
+       u64 buf_addr;
+       u64 cookie;
+       u16 buf_size;
+       u16 tlv_size;
+       u16 resv[5];
+       u16 comp_err;
+};
+
+#define ROCKER_DMA_DESC_COMP_ERR_GEN   (1 << 15)
+
+/* Rocker DMA TLV struct */
+struct rocker_tlv {
+       u32 type;
+       u16 len;
+};
+
+/* TLVs */
+enum {
+       ROCKER_TLV_CMD_UNSPEC,
+       ROCKER_TLV_CMD_TYPE,    /* u16 */
+       ROCKER_TLV_CMD_INFO,    /* nest */
+
+       __ROCKER_TLV_CMD_MAX,
+       ROCKER_TLV_CMD_MAX = __ROCKER_TLV_CMD_MAX - 1,
+};
+
+enum {
+       ROCKER_TLV_CMD_TYPE_UNSPEC,
+       ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS,
+       ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS,
+       ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD,
+       ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD,
+       ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL,
+       ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS,
+       ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD,
+       ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD,
+       ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL,
+       ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS,
+
+       __ROCKER_TLV_CMD_TYPE_MAX,
+       ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1,
+};
+
+enum {
+       ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC,
+       ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,             /* u32 */
+       ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,             /* u32 */
+       ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,            /* u8 */
+       ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,           /* u8 */
+       ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,           /* binary */
+       ROCKER_TLV_CMD_PORT_SETTINGS_MODE,              /* u8 */
+       ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,          /* u8 */
+
+       __ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
+       ROCKER_TLV_CMD_PORT_SETTINGS_MAX =
+                       __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1,
+};
+
+enum rocker_port_mode {
+       ROCKER_PORT_MODE_OF_DPA,
+};
+
+enum {
+       ROCKER_TLV_EVENT_UNSPEC,
+       ROCKER_TLV_EVENT_TYPE,  /* u16 */
+       ROCKER_TLV_EVENT_INFO,  /* nest */
+
+       __ROCKER_TLV_EVENT_MAX,
+       ROCKER_TLV_EVENT_MAX = __ROCKER_TLV_EVENT_MAX - 1,
+};
+
+enum {
+       ROCKER_TLV_EVENT_TYPE_UNSPEC,
+       ROCKER_TLV_EVENT_TYPE_LINK_CHANGED,
+       ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN,
+
+       __ROCKER_TLV_EVENT_TYPE_MAX,
+       ROCKER_TLV_EVENT_TYPE_MAX = __ROCKER_TLV_EVENT_TYPE_MAX - 1,
+};
+
+enum {
+       ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC,
+       ROCKER_TLV_EVENT_LINK_CHANGED_LPORT,    /* u32 */
+       ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP,   /* u8 */
+
+       __ROCKER_TLV_EVENT_LINK_CHANGED_MAX,
+       ROCKER_TLV_EVENT_LINK_CHANGED_MAX =
+                       __ROCKER_TLV_EVENT_LINK_CHANGED_MAX - 1,
+};
+
+enum {
+       ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC,
+       ROCKER_TLV_EVENT_MAC_VLAN_LPORT,        /* u32 */
+       ROCKER_TLV_EVENT_MAC_VLAN_MAC,          /* binary */
+       ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID,      /* __be16 */
+
+       __ROCKER_TLV_EVENT_MAC_VLAN_MAX,
+       ROCKER_TLV_EVENT_MAC_VLAN_MAX = __ROCKER_TLV_EVENT_MAC_VLAN_MAX - 1,
+};
+
+enum {
+       ROCKER_TLV_RX_UNSPEC,
+       ROCKER_TLV_RX_FLAGS,            /* u16, see ROCKER_RX_FLAGS_ */
+       ROCKER_TLV_RX_CSUM,             /* u16 */
+       ROCKER_TLV_RX_FRAG_ADDR,        /* u64 */
+       ROCKER_TLV_RX_FRAG_MAX_LEN,     /* u16 */
+       ROCKER_TLV_RX_FRAG_LEN,         /* u16 */
+
+       __ROCKER_TLV_RX_MAX,
+       ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1,
+};
+
+#define ROCKER_RX_FLAGS_IPV4                   (1 << 0)
+#define ROCKER_RX_FLAGS_IPV6                   (1 << 1)
+#define ROCKER_RX_FLAGS_CSUM_CALC              (1 << 2)
+#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD         (1 << 3)
+#define ROCKER_RX_FLAGS_IP_FRAG                        (1 << 4)
+#define ROCKER_RX_FLAGS_TCP                    (1 << 5)
+#define ROCKER_RX_FLAGS_UDP                    (1 << 6)
+#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD      (1 << 7)
+
+enum {
+       ROCKER_TLV_TX_UNSPEC,
+       ROCKER_TLV_TX_OFFLOAD,          /* u8, see ROCKER_TX_OFFLOAD_ */
+       ROCKER_TLV_TX_L3_CSUM_OFF,      /* u16 */
+       ROCKER_TLV_TX_TSO_MSS,          /* u16 */
+       ROCKER_TLV_TX_TSO_HDR_LEN,      /* u16 */
+       ROCKER_TLV_TX_FRAGS,            /* array */
+
+       __ROCKER_TLV_TX_MAX,
+       ROCKER_TLV_TX_MAX = __ROCKER_TLV_TX_MAX - 1,
+};
+
+#define ROCKER_TX_OFFLOAD_NONE         0
+#define ROCKER_TX_OFFLOAD_IP_CSUM      1
+#define ROCKER_TX_OFFLOAD_TCP_UDP_CSUM 2
+#define ROCKER_TX_OFFLOAD_L3_CSUM      3
+#define ROCKER_TX_OFFLOAD_TSO          4
+
+#define ROCKER_TX_FRAGS_MAX            16
+
+enum {
+       ROCKER_TLV_TX_FRAG_UNSPEC,
+       ROCKER_TLV_TX_FRAG,             /* nest */
+
+       __ROCKER_TLV_TX_FRAG_MAX,
+       ROCKER_TLV_TX_FRAG_MAX = __ROCKER_TLV_TX_FRAG_MAX - 1,
+};
+
+enum {
+       ROCKER_TLV_TX_FRAG_ATTR_UNSPEC,
+       ROCKER_TLV_TX_FRAG_ATTR_ADDR,   /* u64 */
+       ROCKER_TLV_TX_FRAG_ATTR_LEN,    /* u16 */
+
+       __ROCKER_TLV_TX_FRAG_ATTR_MAX,
+       ROCKER_TLV_TX_FRAG_ATTR_MAX = __ROCKER_TLV_TX_FRAG_ATTR_MAX - 1,
+};
+
+/* cmd info nested for OF-DPA msgs */
+enum {
+       ROCKER_TLV_OF_DPA_UNSPEC,
+       ROCKER_TLV_OF_DPA_TABLE_ID,             /* u16 */
+       ROCKER_TLV_OF_DPA_PRIORITY,             /* u32 */
+       ROCKER_TLV_OF_DPA_HARDTIME,             /* u32 */
+       ROCKER_TLV_OF_DPA_IDLETIME,             /* u32 */
+       ROCKER_TLV_OF_DPA_COOKIE,               /* u64 */
+       ROCKER_TLV_OF_DPA_IN_LPORT,             /* u32 */
+       ROCKER_TLV_OF_DPA_IN_LPORT_MASK,        /* u32 */
+       ROCKER_TLV_OF_DPA_OUT_LPORT,            /* u32 */
+       ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,        /* u16 */
+       ROCKER_TLV_OF_DPA_GROUP_ID,             /* u32 */
+       ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,       /* u32 */
+       ROCKER_TLV_OF_DPA_GROUP_COUNT,          /* u16 */
+       ROCKER_TLV_OF_DPA_GROUP_IDS,            /* u32 array */
+       ROCKER_TLV_OF_DPA_VLAN_ID,              /* __be16 */
+       ROCKER_TLV_OF_DPA_VLAN_ID_MASK,         /* __be16 */
+       ROCKER_TLV_OF_DPA_VLAN_PCP,             /* __be16 */
+       ROCKER_TLV_OF_DPA_VLAN_PCP_MASK,        /* __be16 */
+       ROCKER_TLV_OF_DPA_VLAN_PCP_ACTION,      /* u8 */
+       ROCKER_TLV_OF_DPA_NEW_VLAN_ID,          /* __be16 */
+       ROCKER_TLV_OF_DPA_NEW_VLAN_PCP,         /* u8 */
+       ROCKER_TLV_OF_DPA_TUNNEL_ID,            /* u32 */
+       ROCKER_TLV_OF_DPA_TUN_LOG_LPORT,        /* u32 */
+       ROCKER_TLV_OF_DPA_ETHERTYPE,            /* __be16 */
+       ROCKER_TLV_OF_DPA_DST_MAC,              /* binary */
+       ROCKER_TLV_OF_DPA_DST_MAC_MASK,         /* binary */
+       ROCKER_TLV_OF_DPA_SRC_MAC,              /* binary */
+       ROCKER_TLV_OF_DPA_SRC_MAC_MASK,         /* binary */
+       ROCKER_TLV_OF_DPA_IP_PROTO,             /* u8 */
+       ROCKER_TLV_OF_DPA_IP_PROTO_MASK,        /* u8 */
+       ROCKER_TLV_OF_DPA_IP_DSCP,              /* u8 */
+       ROCKER_TLV_OF_DPA_IP_DSCP_MASK,         /* u8 */
+       ROCKER_TLV_OF_DPA_IP_DSCP_ACTION,       /* u8 */
+       ROCKER_TLV_OF_DPA_NEW_IP_DSCP,          /* u8 */
+       ROCKER_TLV_OF_DPA_IP_ECN,               /* u8 */
+       ROCKER_TLV_OF_DPA_IP_ECN_MASK,          /* u8 */
+       ROCKER_TLV_OF_DPA_DST_IP,               /* __be32 */
+       ROCKER_TLV_OF_DPA_DST_IP_MASK,          /* __be32 */
+       ROCKER_TLV_OF_DPA_SRC_IP,               /* __be32 */
+       ROCKER_TLV_OF_DPA_SRC_IP_MASK,          /* __be32 */
+       ROCKER_TLV_OF_DPA_DST_IPV6,             /* binary */
+       ROCKER_TLV_OF_DPA_DST_IPV6_MASK,        /* binary */
+       ROCKER_TLV_OF_DPA_SRC_IPV6,             /* binary */
+       ROCKER_TLV_OF_DPA_SRC_IPV6_MASK,        /* binary */
+       ROCKER_TLV_OF_DPA_SRC_ARP_IP,           /* __be32 */
+       ROCKER_TLV_OF_DPA_SRC_ARP_IP_MASK,      /* __be32 */
+       ROCKER_TLV_OF_DPA_L4_DST_PORT,          /* __be16 */
+       ROCKER_TLV_OF_DPA_L4_DST_PORT_MASK,     /* __be16 */
+       ROCKER_TLV_OF_DPA_L4_SRC_PORT,          /* __be16 */
+       ROCKER_TLV_OF_DPA_L4_SRC_PORT_MASK,     /* __be16 */
+       ROCKER_TLV_OF_DPA_ICMP_TYPE,            /* u8 */
+       ROCKER_TLV_OF_DPA_ICMP_TYPE_MASK,       /* u8 */
+       ROCKER_TLV_OF_DPA_ICMP_CODE,            /* u8 */
+       ROCKER_TLV_OF_DPA_ICMP_CODE_MASK,       /* u8 */
+       ROCKER_TLV_OF_DPA_IPV6_LABEL,           /* __be32 */
+       ROCKER_TLV_OF_DPA_IPV6_LABEL_MASK,      /* __be32 */
+       ROCKER_TLV_OF_DPA_QUEUE_ID_ACTION,      /* u8 */
+       ROCKER_TLV_OF_DPA_NEW_QUEUE_ID,         /* u8 */
+       ROCKER_TLV_OF_DPA_CLEAR_ACTIONS,        /* u32 */
+       ROCKER_TLV_OF_DPA_POP_VLAN,             /* u8 */
+       ROCKER_TLV_OF_DPA_TTL_CHECK,            /* u8 */
+       ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,      /* u8 */
+
+       __ROCKER_TLV_OF_DPA_MAX,
+       ROCKER_TLV_OF_DPA_MAX = __ROCKER_TLV_OF_DPA_MAX - 1,
+};
+
+/* OF-DPA table IDs */
+
+enum rocker_of_dpa_table_id {
+       ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT = 0,
+       ROCKER_OF_DPA_TABLE_ID_VLAN = 10,
+       ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC = 20,
+       ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING = 30,
+       ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING = 40,
+       ROCKER_OF_DPA_TABLE_ID_BRIDGING = 50,
+       ROCKER_OF_DPA_TABLE_ID_ACL_POLICY = 60,
+};
+
+/* OF-DPA flow stats */
+enum {
+       ROCKER_TLV_OF_DPA_FLOW_STAT_UNSPEC,
+       ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION,   /* u32 */
+       ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS,    /* u64 */
+       ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS,    /* u64 */
+
+       __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX,
+       ROCKER_TLV_OF_DPA_FLOW_STAT_MAX = __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX - 1,
+};
+
+/* OF-DPA group types */
+enum rocker_of_dpa_group_type {
+       ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE = 0,
+       ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE,
+       ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST,
+       ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST,
+       ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD,
+       ROCKER_OF_DPA_GROUP_TYPE_L3_INTERFACE,
+       ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST,
+       ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP,
+       ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY,
+};
+
+/* OF-DPA group L2 overlay types */
+enum rocker_of_dpa_overlay_type {
+       ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_UCAST = 0,
+       ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_MCAST,
+       ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_UCAST,
+       ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_MCAST,
+};
+
+/* OF-DPA group ID encoding */
+#define ROCKER_GROUP_TYPE_SHIFT 28
+#define ROCKER_GROUP_TYPE_MASK 0xf0000000
+#define ROCKER_GROUP_VLAN_SHIFT 16
+#define ROCKER_GROUP_VLAN_MASK 0x0fff0000
+#define ROCKER_GROUP_PORT_SHIFT 0
+#define ROCKER_GROUP_PORT_MASK 0x0000ffff
+#define ROCKER_GROUP_TUNNEL_ID_SHIFT 12
+#define ROCKER_GROUP_TUNNEL_ID_MASK 0x0ffff000
+#define ROCKER_GROUP_SUBTYPE_SHIFT 10
+#define ROCKER_GROUP_SUBTYPE_MASK 0x00000c00
+#define ROCKER_GROUP_INDEX_SHIFT 0
+#define ROCKER_GROUP_INDEX_MASK 0x0000ffff
+#define ROCKER_GROUP_INDEX_LONG_SHIFT 0
+#define ROCKER_GROUP_INDEX_LONG_MASK 0x0fffffff
+
+#define ROCKER_GROUP_TYPE_GET(group_id) \
+       (((group_id) & ROCKER_GROUP_TYPE_MASK) >> ROCKER_GROUP_TYPE_SHIFT)
+#define ROCKER_GROUP_TYPE_SET(type) \
+       (((type) << ROCKER_GROUP_TYPE_SHIFT) & ROCKER_GROUP_TYPE_MASK)
+#define ROCKER_GROUP_VLAN_GET(group_id) \
+       (((group_id) & ROCKER_GROUP_VLAN_ID_MASK) >> ROCKER_GROUP_VLAN_ID_SHIFT)
+#define ROCKER_GROUP_VLAN_SET(vlan_id) \
+       (((vlan_id) << ROCKER_GROUP_VLAN_SHIFT) & ROCKER_GROUP_VLAN_MASK)
+#define ROCKER_GROUP_PORT_GET(group_id) \
+       (((group_id) & ROCKER_GROUP_PORT_MASK) >> ROCKER_GROUP_PORT_SHIFT)
+#define ROCKER_GROUP_PORT_SET(port) \
+       (((port) << ROCKER_GROUP_PORT_SHIFT) & ROCKER_GROUP_PORT_MASK)
+#define ROCKER_GROUP_INDEX_GET(group_id) \
+       (((group_id) & ROCKER_GROUP_INDEX_MASK) >> ROCKER_GROUP_INDEX_SHIFT)
+#define ROCKER_GROUP_INDEX_SET(index) \
+       (((index) << ROCKER_GROUP_INDEX_SHIFT) & ROCKER_GROUP_INDEX_MASK)
+#define ROCKER_GROUP_INDEX_LONG_GET(group_id) \
+       (((group_id) & ROCKER_GROUP_INDEX_LONG_MASK) >> \
+        ROCKER_GROUP_INDEX_LONG_SHIFT)
+#define ROCKER_GROUP_INDEX_LONG_SET(index) \
+       (((index) << ROCKER_GROUP_INDEX_LONG_SHIFT) & \
+        ROCKER_GROUP_INDEX_LONG_MASK)
+
+#define ROCKER_GROUP_NONE 0
+#define ROCKER_GROUP_L2_INTERFACE(vlan_id, port) \
+       (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) |\
+        ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_PORT_SET(port))
+#define ROCKER_GROUP_L2_REWRITE(index) \
+       (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE) |\
+        ROCKER_GROUP_INDEX_LONG_SET(index))
+#define ROCKER_GROUP_L2_MCAST(vlan_id, index) \
+       (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) |\
+        ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index))
+#define ROCKER_GROUP_L2_FLOOD(vlan_id, index) \
+       (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) |\
+       ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index))
+#define ROCKER_GROUP_L3_UNICAST(index) \
+       (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST) |\
+        ROCKER_GROUP_INDEX_LONG_SET(index))
+
+/* Rocker general purpose registers */
+#define ROCKER_CONTROL                 0x0300
+#define ROCKER_PORT_PHYS_COUNT         0x0304
+#define ROCKER_PORT_PHYS_LINK_STATUS   0x0310 /* 8-byte */
+#define ROCKER_PORT_PHYS_ENABLE                0x0318 /* 8-byte */
+#define ROCKER_SWITCH_ID               0x0320 /* 8-byte */
+
+/* Rocker control bits */
+#define ROCKER_CONTROL_RESET           (1 << 0)
+
+#endif
index 15814b79ff10e6edf3c3114e0d48f52b9ec58a10..07054ce84ba8d0b0c8344100495bb6e3d9f91261 100644 (file)
@@ -188,12 +188,6 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
         */
        plat->maxmtu = JUMBO_LEN;
 
-       /* Set default value for multicast hash bins */
-       plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
-       /* Set default value for unicast filter entries */
-       plat->unicast_filter_entries = 1;
-
        /*
         * Currently only the properties needed on SPEAr600
         * are provided. All other properties should be added
@@ -281,6 +275,13 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
                return PTR_ERR(addr);
 
        plat_dat = dev_get_platdata(&pdev->dev);
+
+       /* Set default value for multicast hash bins */
+       plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+       /* Set default value for unicast filter entries */
+       plat_dat->unicast_filter_entries = 1;
+
        if (pdev->dev.of_node) {
                if (!plat_dat)
                        plat_dat = devm_kzalloc(&pdev->dev,
index 6b463117dcaca45f6130a0b6f4573d8946bc41ec..6fc834e4306d4b76ff4c42b42337e32008a683f0 100644 (file)
@@ -561,9 +561,7 @@ int netvsc_device_remove(struct hv_device *device)
        vmbus_close(device->channel);
 
        /* Release all resources */
-       if (net_device->sub_cb_buf)
-               vfree(net_device->sub_cb_buf);
-
+       vfree(net_device->sub_cb_buf);
        free_netvsc_device(net_device);
        return 0;
 }
index ab3e7614ed71065b6183f86ca29ef62ab6d28641..c44d29eca6c0fd5cbefb723eb016535fa7a900fc 100644 (file)
@@ -23,8 +23,9 @@
 #include <linux/if_vlan.h>
 #include <linux/ip.h>
 #include <linux/inetdevice.h>
+#include <net/ip.h>
+#include <net/ip6_route.h>
 #include <net/rtnetlink.h>
-#include <net/gre.h>
 #include <net/route.h>
 #include <net/addrconf.h>
 
index c3df84bd28570b6760a9af066162cbfc05da2ee7..96b71b0d78f6552a5af2518247562d11dd276fd5 100644 (file)
@@ -169,7 +169,8 @@ static int ipvlan_stop(struct net_device *dev)
        return 0;
 }
 
-netdev_tx_t ipvlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ipvlan_start_xmit(struct sk_buff *skb,
+                                    struct net_device *dev)
 {
        const struct ipvl_dev *ipvlan = netdev_priv(dev);
        int skblen = skb->len;
@@ -541,7 +542,7 @@ static struct rtnl_link_ops ipvlan_link_ops = {
        .dellink        = ipvlan_link_delete,
 };
 
-int ipvlan_link_register(struct rtnl_link_ops *ops)
+static int ipvlan_link_register(struct rtnl_link_ops *ops)
 {
        return rtnl_link_register(ops);
 }
index bfb0b6ec8c56e26d67fa94814fe460af34c95fed..9538674587aab5ae98023d6e228960979acc2c2f 100644 (file)
@@ -742,11 +742,12 @@ static struct lock_class_key macvlan_netdev_xmit_lock_key;
 static struct lock_class_key macvlan_netdev_addr_lock_key;
 
 #define ALWAYS_ON_FEATURES \
-       (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX)
+       (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX | \
+        NETIF_F_GSO_ROBUST)
 
 #define MACVLAN_FEATURES \
        (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
-        NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
+        NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | \
         NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
         NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
 
@@ -872,7 +873,7 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
 
 static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                           struct net_device *dev,
-                          const unsigned char *addr,
+                          const unsigned char *addr, u16 vid,
                           u16 flags)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
@@ -897,7 +898,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 
 static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
                           struct net_device *dev,
-                          const unsigned char *addr)
+                          const unsigned char *addr, u16 vid)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
        int err = -EINVAL;
index 42a80d3de83911456d393580c658bd7a3c3cf80e..22b4cf2fa108fe17b9fec38267efb364b9fd28df 100644 (file)
@@ -640,12 +640,12 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
 
 /* Get packet from user space buffer */
 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
-                               const struct iovec *iv, unsigned long total_len,
-                               size_t count, int noblock)
+                               struct iov_iter *from, int noblock)
 {
        int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
        struct sk_buff *skb;
        struct macvlan_dev *vlan;
+       unsigned long total_len = iov_iter_count(from);
        unsigned long len = total_len;
        int err;
        struct virtio_net_hdr vnet_hdr = { 0 };
@@ -653,6 +653,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
        int copylen = 0;
        bool zerocopy = false;
        size_t linear;
+       ssize_t n;
 
        if (q->flags & IFF_VNET_HDR) {
                vnet_hdr_len = q->vnet_hdr_sz;
@@ -662,10 +663,11 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
                        goto err;
                len -= vnet_hdr_len;
 
-               err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
-                                          sizeof(vnet_hdr));
-               if (err < 0)
+               err = -EFAULT;
+               n = copy_from_iter(&vnet_hdr, sizeof(vnet_hdr), from);
+               if (n != sizeof(vnet_hdr))
                        goto err;
+               iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
                if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
                     vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
                                                        vnet_hdr.hdr_len)
@@ -680,17 +682,16 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
        if (unlikely(len < ETH_HLEN))
                goto err;
 
-       err = -EMSGSIZE;
-       if (unlikely(count > UIO_MAXIOV))
-               goto err;
-
        if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
+               struct iov_iter i;
+
                copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
                if (copylen > good_linear)
                        copylen = good_linear;
                linear = copylen;
-               if (iov_pages(iv, vnet_hdr_len + copylen, count)
-                   <= MAX_SKB_FRAGS)
+               i = *from;
+               iov_iter_advance(&i, copylen);
+               if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
                        zerocopy = true;
        }
 
@@ -708,10 +709,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
                goto err;
 
        if (zerocopy)
-               err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
+               err = zerocopy_sg_from_iter(skb, from);
        else {
-               err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
-                                                  len);
+               err = skb_copy_datagram_from_iter(skb, 0, from, len);
                if (!err && m && m->msg_control) {
                        struct ubuf_info *uarg = m->msg_control;
                        uarg->callback(uarg, false);
@@ -764,16 +764,12 @@ err:
        return err;
 }
 
-static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
-                                unsigned long count, loff_t pos)
+static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
-       ssize_t result = -ENOLINK;
        struct macvtap_queue *q = file->private_data;
 
-       result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count,
-                                 file->f_flags & O_NONBLOCK);
-       return result;
+       return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
 }
 
 /* Put packet to the user space buffer */
@@ -831,64 +827,55 @@ done:
 }
 
 static ssize_t macvtap_do_read(struct macvtap_queue *q,
-                              const struct iovec *iv, unsigned long segs,
-                              unsigned long len,
+                              struct iov_iter *to,
                               int noblock)
 {
        DEFINE_WAIT(wait);
        struct sk_buff *skb;
        ssize_t ret = 0;
-       struct iov_iter iter;
 
-       while (len) {
+       if (!iov_iter_count(to))
+               return 0;
+
+       while (1) {
                if (!noblock)
                        prepare_to_wait(sk_sleep(&q->sk), &wait,
                                        TASK_INTERRUPTIBLE);
 
                /* Read frames from the queue */
                skb = skb_dequeue(&q->sk.sk_receive_queue);
-               if (!skb) {
-                       if (noblock) {
-                               ret = -EAGAIN;
-                               break;
-                       }
-                       if (signal_pending(current)) {
-                               ret = -ERESTARTSYS;
-                               break;
-                       }
-                       /* Nothing to read, let's sleep */
-                       schedule();
-                       continue;
+               if (skb)
+                       break;
+               if (noblock) {
+                       ret = -EAGAIN;
+                       break;
+               }
+               if (signal_pending(current)) {
+                       ret = -ERESTARTSYS;
+                       break;
                }
-               iov_iter_init(&iter, READ, iv, segs, len);
-               ret = macvtap_put_user(q, skb, &iter);
+               /* Nothing to read, let's sleep */
+               schedule();
+       }
+       if (skb) {
+               ret = macvtap_put_user(q, skb, to);
                kfree_skb(skb);
-               break;
        }
-
        if (!noblock)
                finish_wait(sk_sleep(&q->sk), &wait);
        return ret;
 }
 
-static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
-                               unsigned long count, loff_t pos)
+static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
        struct file *file = iocb->ki_filp;
        struct macvtap_queue *q = file->private_data;
-       ssize_t len, ret = 0;
-
-       len = iov_length(iv, count);
-       if (len < 0) {
-               ret = -EINVAL;
-               goto out;
-       }
+       ssize_t len = iov_iter_count(to), ret;
 
-       ret = macvtap_do_read(q, iv, count, len, file->f_flags & O_NONBLOCK);
+       ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK);
        ret = min_t(ssize_t, ret, len);
        if (ret > 0)
                iocb->ki_pos = ret;
-out:
        return ret;
 }
 
@@ -1089,8 +1076,10 @@ static const struct file_operations macvtap_fops = {
        .owner          = THIS_MODULE,
        .open           = macvtap_open,
        .release        = macvtap_release,
-       .aio_read       = macvtap_aio_read,
-       .aio_write      = macvtap_aio_write,
+       .read           = new_sync_read,
+       .write          = new_sync_write,
+       .read_iter      = macvtap_read_iter,
+       .write_iter     = macvtap_write_iter,
        .poll           = macvtap_poll,
        .llseek         = no_llseek,
        .unlocked_ioctl = macvtap_ioctl,
@@ -1103,8 +1092,9 @@ static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
                           struct msghdr *m, size_t total_len)
 {
        struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
-       return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen,
-                           m->msg_flags & MSG_DONTWAIT);
+       struct iov_iter from;
+       iov_iter_init(&from, WRITE, m->msg_iov, m->msg_iovlen, total_len);
+       return macvtap_get_user(q, m, &from, m->msg_flags & MSG_DONTWAIT);
 }
 
 static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
@@ -1112,11 +1102,12 @@ static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
                           int flags)
 {
        struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
+       struct iov_iter to;
        int ret;
        if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
                return -EINVAL;
-       ret = macvtap_do_read(q, m->msg_iov, m->msg_iovlen, total_len,
-                         flags & MSG_DONTWAIT);
+       iov_iter_init(&to, READ, m->msg_iov, m->msg_iovlen, total_len);
+       ret = macvtap_do_read(q, &to, flags & MSG_DONTWAIT);
        if (ret > total_len) {
                m->msg_flags |= MSG_TRUNC;
                ret = flags & MSG_TRUNC ? ret : total_len;
index 443cbbf5c55f684db9ee4bcfbd69fc06706c0957..d2408a5e43a6a1fdef0ba85f5fb19b10ffca4743 100644 (file)
@@ -869,7 +869,7 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
        ph = (struct pppoe_hdr *)skb_put(skb, total_len + sizeof(struct pppoe_hdr));
        start = (char *)&ph->tag[0];
 
-       error = memcpy_fromiovec(start, m->msg_iov, total_len);
+       error = memcpy_from_msg(start, m, total_len);
        if (error < 0) {
                kfree_skb(skb);
                goto end;
index ac53a7316ecd55b5b081072f4e16e3e09b3625be..6d44da1845945ba9400e5fba82f2d718a7a8a944 100644 (file)
@@ -1012,28 +1012,29 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
 
 /* Get packet from user space buffer */
 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
-                           void *msg_control, const struct iovec *iv,
-                           size_t total_len, size_t count, int noblock)
+                           void *msg_control, struct iov_iter *from,
+                           int noblock)
 {
        struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
        struct sk_buff *skb;
+       size_t total_len = iov_iter_count(from);
        size_t len = total_len, align = NET_SKB_PAD, linear;
        struct virtio_net_hdr gso = { 0 };
        int good_linear;
-       int offset = 0;
        int copylen;
        bool zerocopy = false;
        int err;
        u32 rxhash;
+       ssize_t n;
 
        if (!(tun->flags & TUN_NO_PI)) {
                if (len < sizeof(pi))
                        return -EINVAL;
                len -= sizeof(pi);
 
-               if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
+               n = copy_from_iter(&pi, sizeof(pi), from);
+               if (n != sizeof(pi))
                        return -EFAULT;
-               offset += sizeof(pi);
        }
 
        if (tun->flags & TUN_VNET_HDR) {
@@ -1041,7 +1042,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                        return -EINVAL;
                len -= tun->vnet_hdr_sz;
 
-               if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
+               n = copy_from_iter(&gso, sizeof(gso), from);
+               if (n != sizeof(gso))
                        return -EFAULT;
 
                if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
@@ -1050,7 +1052,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 
                if (gso.hdr_len > len)
                        return -EINVAL;
-               offset += tun->vnet_hdr_sz;
+               iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
        }
 
        if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
@@ -1063,6 +1065,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        good_linear = SKB_MAX_HEAD(align);
 
        if (msg_control) {
+               struct iov_iter i = *from;
+
                /* There are 256 bytes to be copied in skb, so there is
                 * enough room for skb expand head in case it is used.
                 * The rest of the buffer is mapped from userspace.
@@ -1071,7 +1075,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                if (copylen > good_linear)
                        copylen = good_linear;
                linear = copylen;
-               if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
+               iov_iter_advance(&i, copylen);
+               if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
                        zerocopy = true;
        }
 
@@ -1091,9 +1096,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        }
 
        if (zerocopy)
-               err = zerocopy_sg_from_iovec(skb, iv, offset, count);
+               err = zerocopy_sg_from_iter(skb, from);
        else {
-               err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
+               err = skb_copy_datagram_from_iter(skb, 0, from, len);
                if (!err && msg_control) {
                        struct ubuf_info *uarg = msg_control;
                        uarg->callback(uarg, false);
@@ -1207,8 +1212,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        return total_len;
 }
 
-static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
-                             unsigned long count, loff_t pos)
+static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
        struct tun_struct *tun = tun_get(file);
@@ -1218,10 +1222,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
        if (!tun)
                return -EBADFD;
 
-       tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
-
-       result = tun_get_user(tun, tfile, NULL, iv, iov_length(iv, count),
-                             count, file->f_flags & O_NONBLOCK);
+       result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK);
 
        tun_put(tun);
        return result;
@@ -1339,18 +1340,17 @@ done:
 }
 
 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
-                          const struct iovec *iv, unsigned long segs,
-                          ssize_t len, int noblock)
+                          struct iov_iter *to,
+                          int noblock)
 {
        struct sk_buff *skb;
-       ssize_t ret = 0;
+       ssize_t ret;
        int peeked, err, off = 0;
-       struct iov_iter iter;
 
        tun_debug(KERN_INFO, tun, "tun_do_read\n");
 
-       if (!len)
-               return ret;
+       if (!iov_iter_count(to))
+               return 0;
 
        if (tun->dev->reg_state != NETREG_REGISTERED)
                return -EIO;
@@ -1359,37 +1359,27 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
        skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
                                  &peeked, &off, &err);
        if (!skb)
-               return ret;
+               return 0;
 
-       iov_iter_init(&iter, READ, iv, segs, len);
-       ret = tun_put_user(tun, tfile, skb, &iter);
+       ret = tun_put_user(tun, tfile, skb, to);
        kfree_skb(skb);
 
        return ret;
 }
 
-static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
-                           unsigned long count, loff_t pos)
+static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
        struct file *file = iocb->ki_filp;
        struct tun_file *tfile = file->private_data;
        struct tun_struct *tun = __tun_get(tfile);
-       ssize_t len, ret;
+       ssize_t len = iov_iter_count(to), ret;
 
        if (!tun)
                return -EBADFD;
-       len = iov_length(iv, count);
-       if (len < 0) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       ret = tun_do_read(tun, tfile, iv, count, len,
-                         file->f_flags & O_NONBLOCK);
+       ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK);
        ret = min_t(ssize_t, ret, len);
        if (ret > 0)
                iocb->ki_pos = ret;
-out:
        tun_put(tun);
        return ret;
 }
@@ -1456,11 +1446,14 @@ static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
        int ret;
        struct tun_file *tfile = container_of(sock, struct tun_file, socket);
        struct tun_struct *tun = __tun_get(tfile);
+       struct iov_iter from;
 
        if (!tun)
                return -EBADFD;
-       ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len,
-                          m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
+
+       iov_iter_init(&from, WRITE, m->msg_iov, m->msg_iovlen, total_len);
+       ret = tun_get_user(tun, tfile, m->msg_control, &from,
+                          m->msg_flags & MSG_DONTWAIT);
        tun_put(tun);
        return ret;
 }
@@ -1471,6 +1464,7 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
 {
        struct tun_file *tfile = container_of(sock, struct tun_file, socket);
        struct tun_struct *tun = __tun_get(tfile);
+       struct iov_iter to;
        int ret;
 
        if (!tun)
@@ -1485,8 +1479,8 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
                                         SOL_PACKET, TUN_TX_TIMESTAMP);
                goto out;
        }
-       ret = tun_do_read(tun, tfile, m->msg_iov, m->msg_iovlen, total_len,
-                         flags & MSG_DONTWAIT);
+       iov_iter_init(&to, READ, m->msg_iov, m->msg_iovlen, total_len);
+       ret = tun_do_read(tun, tfile, &to, flags & MSG_DONTWAIT);
        if (ret > total_len) {
                m->msg_flags |= MSG_TRUNC;
                ret = flags & MSG_TRUNC ? ret : total_len;
@@ -2242,10 +2236,10 @@ static int tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
 static const struct file_operations tun_fops = {
        .owner  = THIS_MODULE,
        .llseek = no_llseek,
-       .read  = do_sync_read,
-       .aio_read  = tun_chr_aio_read,
-       .write = do_sync_write,
-       .aio_write = tun_chr_aio_write,
+       .read  = new_sync_read,
+       .write = new_sync_write,
+       .read_iter  = tun_chr_read_iter,
+       .write_iter = tun_chr_write_iter,
        .poll   = tun_chr_poll,
        .unlocked_ioctl = tun_chr_ioctl,
 #ifdef CONFIG_COMPAT
index e9f81d4636598f681f40cce219d41937140278d8..31ecb03368c6dc3d581fdbd30b409b88190f3c71 100644 (file)
@@ -849,7 +849,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
 /* Add static entry (via netlink) */
 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                         struct net_device *dev,
-                        const unsigned char *addr, u16 flags)
+                        const unsigned char *addr, u16 vid, u16 flags)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
        /* struct net *net = dev_net(vxlan->dev); */
@@ -885,7 +885,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 /* Delete entry (via netlink) */
 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
                            struct net_device *dev,
-                           const unsigned char *addr)
+                           const unsigned char *addr, u16 vid)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_fdb *f;
@@ -2299,9 +2299,9 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
        if (ipv6) {
                udp_conf.family = AF_INET6;
                udp_conf.use_udp6_tx_checksums =
-                   !!(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
+                   !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
                udp_conf.use_udp6_rx_checksums =
-                   !!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
+                   !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
        } else {
                udp_conf.family = AF_INET;
                udp_conf.local_ip.s_addr = INADDR_ANY;
index 6f7ae5f7bdaeb62e7b98be4dec53740de2b0a592..649fdae773832b70596e25a3e7146854ec90de36 100644 (file)
@@ -155,6 +155,7 @@ enum iwl_ucode_tlv_api {
  * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
  * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
  *     which also implies support for the scheduler configuration command
+ * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
  */
 enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_D0I3_SUPPORT                 = BIT(0),
@@ -163,6 +164,7 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT       = BIT(10),
        IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT         = BIT(11),
        IWL_UCODE_TLV_CAPA_DQA_SUPPORT                  = BIT(12),
+       IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT              = BIT(18),
 };
 
 /* The default calibrate table size if not specified by firmware file */
index 57325589ee5bcba07d315983e517b3e73ff82499..1ee9dcd26ad9db93dbf9c4dc55a76676a441f7a0 100644 (file)
@@ -2486,9 +2486,15 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
 
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
-               /* Use aux roc framework (HS20) */
-               ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
-                                              vif, duration);
+               if (mvm->fw->ucode_capa.capa[0] &
+                   IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) {
+                       /* Use aux roc framework (HS20) */
+                       ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
+                                                      vif, duration);
+                       goto out_unlock;
+               }
+               IWL_ERR(mvm, "hotspot not supported\n");
+               ret = -EINVAL;
                goto out_unlock;
        case NL80211_IFTYPE_P2P_DEVICE:
                /* handle below */
index 61f5d36eca6aaa50c5b33da090736662cc27b77a..846a2e6e34d855d62726eda65b51ee427bc1a939 100644 (file)
@@ -2249,6 +2249,16 @@ int rtl_pci_probe(struct pci_dev *pdev,
        /*like read eeprom and so on */
        rtlpriv->cfg->ops->read_eeprom_info(hw);
 
+       if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
+               err = -ENODEV;
+               goto fail3;
+       }
+       rtlpriv->cfg->ops->init_sw_leds(hw);
+
+       /*aspm */
+       rtl_pci_init_aspm(hw);
+
        /* Init mac80211 sw */
        err = rtl_init_core(hw);
        if (err) {
@@ -2264,16 +2274,6 @@ int rtl_pci_probe(struct pci_dev *pdev,
                goto fail3;
        }
 
-       if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
-               err = -ENODEV;
-               goto fail3;
-       }
-       rtlpriv->cfg->ops->init_sw_leds(hw);
-
-       /*aspm */
-       rtl_pci_init_aspm(hw);
-
        err = ieee80211_register_hw(hw);
        if (err) {
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
index 310d3163dc5b6a3f1e51a9a59ed999fe991a2f06..8ec8200002c7311025b3ae645c9956bf08c44a17 100644 (file)
@@ -3672,8 +3672,9 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
                mac->opmode == NL80211_IFTYPE_ADHOC)
                macid = sta->aid + 1;
        if (wirelessmode == WIRELESS_MODE_N_5G ||
-           wirelessmode == WIRELESS_MODE_AC_5G)
-               ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ];
+           wirelessmode == WIRELESS_MODE_AC_5G ||
+           wirelessmode == WIRELESS_MODE_A)
+               ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ] << 4;
        else
                ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ];
 
index 4e56a27f9689a925ff3cf26118c6ee505eb963a4..fab0d4b42f58fca511dc447b62ea91607732ba81 100644 (file)
@@ -39,7 +39,7 @@ struct backend_info {
 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
 static void connect(struct backend_info *be);
 static int read_xenbus_vif_flags(struct backend_info *be);
-static void backend_create_xenvif(struct backend_info *be);
+static int backend_create_xenvif(struct backend_info *be);
 static void unregister_hotplug_status_watch(struct backend_info *be);
 static void set_backend_state(struct backend_info *be,
                              enum xenbus_state state);
@@ -352,7 +352,9 @@ static int netback_probe(struct xenbus_device *dev,
        be->state = XenbusStateInitWait;
 
        /* This kicks hotplug scripts, so do it immediately. */
-       backend_create_xenvif(be);
+       err = backend_create_xenvif(be);
+       if (err)
+               goto fail;
 
        return 0;
 
@@ -397,19 +399,19 @@ static int netback_uevent(struct xenbus_device *xdev,
 }
 
 
-static void backend_create_xenvif(struct backend_info *be)
+static int backend_create_xenvif(struct backend_info *be)
 {
        int err;
        long handle;
        struct xenbus_device *dev = be->dev;
 
        if (be->vif != NULL)
-               return;
+               return 0;
 
        err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
        if (err != 1) {
                xenbus_dev_fatal(dev, err, "reading handle");
-               return;
+               return (err < 0) ? err : -EINVAL;
        }
 
        be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
@@ -417,10 +419,11 @@ static void backend_create_xenvif(struct backend_info *be)
                err = PTR_ERR(be->vif);
                be->vif = NULL;
                xenbus_dev_fatal(dev, err, "creating interface");
-               return;
+               return err;
        }
 
        kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
+       return 0;
 }
 
 static void backend_disconnect(struct backend_info *be)
index 9fab30af0e75abdcec135707363951d7e9e26f8c..084587d7cd134ce0e8e20410368f5b60b9e88f74 100644 (file)
@@ -590,6 +590,20 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev)
        return entry;
 }
 
+static int msi_verify_entries(struct pci_dev *dev)
+{
+       struct msi_desc *entry;
+
+       list_for_each_entry(entry, &dev->msi_list, list) {
+               if (!dev->no_64bit_msi || !entry->msg.address_hi)
+                       continue;
+               dev_err(&dev->dev, "Device has broken 64-bit MSI but arch"
+                       " tried to assign one above 4G\n");
+               return -EIO;
+       }
+       return 0;
+}
+
 /**
  * msi_capability_init - configure device's MSI capability structure
  * @dev: pointer to the pci_dev data structure of MSI device function
@@ -627,6 +641,13 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
                return ret;
        }
 
+       ret = msi_verify_entries(dev);
+       if (ret) {
+               msi_mask_irq(entry, mask, ~mask);
+               free_msi_irqs(dev);
+               return ret;
+       }
+
        ret = populate_msi_sysfs(dev);
        if (ret) {
                msi_mask_irq(entry, mask, ~mask);
@@ -739,6 +760,11 @@ static int msix_capability_init(struct pci_dev *dev,
        if (ret)
                goto out_avail;
 
+       /* Check if all MSI entries honor device restrictions */
+       ret = msi_verify_entries(dev);
+       if (ret)
+               goto out_free;
+
        /*
         * Some devices require MSI-X to be enabled before we can touch the
         * MSI-X registers.  We need to mask all the vectors to prevent
index 79e5c94107a9cc44fe8269f55ab72e8150005e0b..72533c58c1f3bc0d6a18412a197651399abbf6a2 100644 (file)
@@ -412,6 +412,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
        struct fc_frame_header *fh;
        struct fcoe_rcv_info *fr;
        struct fcoe_percpu_s *bg;
+       struct sk_buff *tmp_skb;
        unsigned short oxid;
 
        interface = container_of(ptype, struct bnx2fc_interface,
@@ -424,6 +425,12 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
                goto err;
        }
 
+       tmp_skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!tmp_skb)
+               goto err;
+
+       skb = tmp_skb;
+
        if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
                printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
                goto err;
index bca0de61ae80bbc8cc44e6877abd82a19d947e9c..4752fed476dfb293e04d5860e1fad2f6116f9327 100644 (file)
 
 #include "csio_defs.h"
 
-/* FCoE device IDs for T4 */
-#define CSIO_DEVID_T440DBG_FCOE                        0x4600
-#define CSIO_DEVID_T420CR_FCOE                 0x4601
-#define CSIO_DEVID_T422CR_FCOE                 0x4602
-#define CSIO_DEVID_T440CR_FCOE                 0x4603
-#define CSIO_DEVID_T420BCH_FCOE                        0x4604
-#define CSIO_DEVID_T440BCH_FCOE                        0x4605
-#define CSIO_DEVID_T440CH_FCOE                 0x4606
-#define CSIO_DEVID_T420SO_FCOE                 0x4607
-#define CSIO_DEVID_T420CX_FCOE                 0x4608
-#define CSIO_DEVID_T420BT_FCOE                 0x4609
-#define CSIO_DEVID_T404BT_FCOE                 0x460A
-#define CSIO_DEVID_B420_FCOE                   0x460B
-#define CSIO_DEVID_B404_FCOE                   0x460C
-#define CSIO_DEVID_T480CR_FCOE                 0x460D
-#define CSIO_DEVID_T440LPCR_FCOE               0x460E
-#define CSIO_DEVID_AMSTERDAM_T4_FCOE           0x460F
-#define CSIO_DEVID_HUAWEI_T480_FCOE            0x4680
-#define CSIO_DEVID_HUAWEI_T440_FCOE            0x4681
-#define CSIO_DEVID_HUAWEI_STG310_FCOE          0x4682
-#define CSIO_DEVID_ACROMAG_XMC_XAUI            0x4683
-#define CSIO_DEVID_ACROMAG_XMC_SFP_FCOE                0x4684
-#define CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE                0x4685
-#define CSIO_DEVID_HUAWEI_10GT_FCOE            0x4686
-#define CSIO_DEVID_HUAWEI_T440_TOE_FCOE                0x4687
-
-/* FCoE device IDs for T5 */
-#define CSIO_DEVID_T580DBG_FCOE                        0x5600
-#define CSIO_DEVID_T520CR_FCOE                 0x5601
-#define CSIO_DEVID_T522CR_FCOE                 0x5602
-#define CSIO_DEVID_T540CR_FCOE                 0x5603
-#define CSIO_DEVID_T520BCH_FCOE                        0x5604
-#define CSIO_DEVID_T540BCH_FCOE                        0x5605
-#define CSIO_DEVID_T540CH_FCOE                 0x5606
-#define CSIO_DEVID_T520SO_FCOE                 0x5607
-#define CSIO_DEVID_T520CX_FCOE                 0x5608
-#define CSIO_DEVID_T520BT_FCOE                 0x5609
-#define CSIO_DEVID_T504BT_FCOE                 0x560A
-#define CSIO_DEVID_B520_FCOE                   0x560B
-#define CSIO_DEVID_B504_FCOE                   0x560C
-#define CSIO_DEVID_T580CR2_FCOE                        0x560D
-#define CSIO_DEVID_T540LPCR_FCOE               0x560E
-#define CSIO_DEVID_AMSTERDAM_T5_FCOE           0x560F
-#define CSIO_DEVID_T580LPCR_FCOE               0x5610
-#define CSIO_DEVID_T520LLCR_FCOE               0x5611
-#define CSIO_DEVID_T560CR_FCOE                 0x5612
-#define CSIO_DEVID_T580CR_FCOE                 0x5613
-
 /* Define MACRO values */
 #define CSIO_HW_T4                             0x4000
 #define CSIO_T4_FCOE_ASIC                      0x4600
 #define CSIO_HW_T5                             0x5000
 #define CSIO_T5_FCOE_ASIC                      0x5600
 #define CSIO_HW_CHIP_MASK                      0xF000
+
 #define T4_REGMAP_SIZE                         (160 * 1024)
 #define T5_REGMAP_SIZE                         (332 * 1024)
 #define FW_FNAME_T4                            "cxgb4/t4fw.bin"
index 1ed5b21c0dd8af48fd3b68bb98dc3b339eeeae7e..34d20cc3e110fcc1ec99aad913b74e87651734e0 100644 (file)
@@ -955,6 +955,10 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        struct csio_hw *hw;
        struct csio_lnode *ln;
 
+       /* probe only T5 cards */
+       if (!csio_is_t5((pdev->device & CSIO_HW_CHIP_MASK)))
+               return -ENODEV;
+
        rv = csio_pci_init(pdev, &bars);
        if (rv)
                goto err;
@@ -1167,53 +1171,21 @@ static struct pci_error_handlers csio_err_handler = {
        .resume         = csio_pci_resume,
 };
 
-static const struct pci_device_id csio_pci_tbl[] = {
-       CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0),        /* T4 DEBUG FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0),         /* T420CR FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0),         /* T422CR FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0),         /* T440CR FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T420BCH_FCOE, 0),        /* T420BCH FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T440BCH_FCOE, 0),        /* T440BCH FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T440CH_FCOE, 0),         /* T440CH FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T420SO_FCOE, 0),         /* T420SO FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T420CX_FCOE, 0),         /* T420CX FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T420BT_FCOE, 0),         /* T420BT FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T404BT_FCOE, 0),         /* T404BT FCOE */
-       CSIO_DEVICE(CSIO_DEVID_B420_FCOE, 0),           /* B420 FCOE */
-       CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0),           /* B404 FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0),         /* T480 CR FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0),       /* T440 LP-CR FCOE */
-       CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T4_FCOE, 0),   /* AMSTERDAM T4 FCOE */
-       CSIO_DEVICE(CSIO_DEVID_HUAWEI_T480_FCOE, 0),    /* HUAWEI T480 FCOE */
-       CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_FCOE, 0),    /* HUAWEI T440 FCOE */
-       CSIO_DEVICE(CSIO_DEVID_HUAWEI_STG310_FCOE, 0),  /* HUAWEI STG FCOE */
-       CSIO_DEVICE(CSIO_DEVID_ACROMAG_XMC_XAUI, 0),    /* ACROMAG XAUI FCOE */
-       CSIO_DEVICE(CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE, 0),/* QUANTA MEZZ FCOE */
-       CSIO_DEVICE(CSIO_DEVID_HUAWEI_10GT_FCOE, 0),    /* HUAWEI 10GT FCOE */
-       CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_TOE_FCOE, 0),/* HUAWEI T4 TOE FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T580DBG_FCOE, 0),        /* T5 DEBUG FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T520CR_FCOE, 0),         /* T520CR FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T522CR_FCOE, 0),         /* T522CR FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T540CR_FCOE, 0),         /* T540CR FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T520BCH_FCOE, 0),        /* T520BCH FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T540BCH_FCOE, 0),        /* T540BCH FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T540CH_FCOE, 0),         /* T540CH FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T520SO_FCOE, 0),         /* T520SO FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T520CX_FCOE, 0),         /* T520CX FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T520BT_FCOE, 0),         /* T520BT FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T504BT_FCOE, 0),         /* T504BT FCOE */
-       CSIO_DEVICE(CSIO_DEVID_B520_FCOE, 0),           /* B520 FCOE */
-       CSIO_DEVICE(CSIO_DEVID_B504_FCOE, 0),           /* B504 FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T580CR2_FCOE, 0),        /* T580 CR FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T540LPCR_FCOE, 0),       /* T540 LP-CR FCOE */
-       CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T5_FCOE, 0),   /* AMSTERDAM T5 FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T580LPCR_FCOE, 0),       /* T580 LP-CR FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T520LLCR_FCOE, 0),       /* T520 LL-CR FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T560CR_FCOE, 0),         /* T560 CR FCOE */
-       CSIO_DEVICE(CSIO_DEVID_T580CR_FCOE, 0),         /* T580 CR FCOE */
-       { 0, 0, 0, 0, 0, 0, 0 }
-};
+/*
+ *  Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+       static struct pci_device_id csio_pci_tbl[] = {
+/* Define for iSCSI uses PF5, FCoE uses PF6 */
+#define CH_PCI_DEVICE_ID_FUNCTION      0x5
+#define CH_PCI_DEVICE_ID_FUNCTION2     0x6
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+               { PCI_VDEVICE(CHELSIO, (devid)), 0 }
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
 
+#include "t4_pci_id_tbl.h"
 
 static struct pci_driver csio_pci_driver = {
        .name           = KBUILD_MODNAME,
index 49014a143c6a9ab56ec81a56d3c7180156341d95..c1d04d4d3c6c140457c19e50865b29bd3287d54f 100644 (file)
@@ -202,6 +202,7 @@ static struct {
        {"IOMEGA", "Io20S         *F", NULL, BLIST_KEY},
        {"INSITE", "Floptical   F*8I", NULL, BLIST_KEY},
        {"INSITE", "I325VM", NULL, BLIST_KEY},
+       {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
        {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
        {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
        {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
index 8adf067ff019344eaf0c42b97007b4abac65e79e..1c3467b8256612b96bafaee3827312e21711de28 100644 (file)
@@ -102,7 +102,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
        clkfreq = devm_kzalloc(dev, sz * sizeof(*clkfreq),
                        GFP_KERNEL);
        if (!clkfreq) {
-               dev_err(dev, "%s: no memory\n", "freq-table-hz");
                ret = -ENOMEM;
                goto out;
        }
@@ -112,19 +111,19 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
        if (ret && (ret != -EINVAL)) {
                dev_err(dev, "%s: error reading array %d\n",
                                "freq-table-hz", ret);
-               goto free_clkfreq;
+               return ret;
        }
 
        for (i = 0; i < sz; i += 2) {
                ret = of_property_read_string_index(np,
                                "clock-names", i/2, (const char **)&name);
                if (ret)
-                       goto free_clkfreq;
+                       goto out;
 
                clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
                if (!clki) {
                        ret = -ENOMEM;
-                       goto free_clkfreq;
+                       goto out;
                }
 
                clki->min_freq = clkfreq[i];
@@ -134,8 +133,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
                                clki->min_freq, clki->max_freq, clki->name);
                list_add_tail(&clki->list, &hba->clk_list_head);
        }
-free_clkfreq:
-       kfree(clkfreq);
 out:
        return ret;
 }
@@ -162,10 +159,8 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
        }
 
        vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
-       if (!vreg) {
-               dev_err(dev, "No memory for %s regulator\n", name);
-               goto out;
-       }
+       if (!vreg)
+               return -ENOMEM;
 
        vreg->name = kstrdup(name, GFP_KERNEL);
 
index 497c38a4a86615178e367e40666937e2d969b41f..605ca60e8a10da25bed98f9d2ac6fdb42b13176f 100644 (file)
@@ -744,6 +744,8 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
        if (!ufshcd_is_clkgating_allowed(hba))
                return;
        device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+       cancel_work_sync(&hba->clk_gating.ungate_work);
+       cancel_delayed_work_sync(&hba->clk_gating.gate_work);
 }
 
 /* Must be called with host lock acquired */
@@ -2246,6 +2248,22 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
        return ret;
 }
 
+ /**
+ * ufshcd_init_pwr_info - setting the POR (power on reset)
+ * values in hba power info
+ * @hba: per-adapter instance
+ */
+static void ufshcd_init_pwr_info(struct ufs_hba *hba)
+{
+       hba->pwr_info.gear_rx = UFS_PWM_G1;
+       hba->pwr_info.gear_tx = UFS_PWM_G1;
+       hba->pwr_info.lane_rx = 1;
+       hba->pwr_info.lane_tx = 1;
+       hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
+       hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
+       hba->pwr_info.hs_rate = 0;
+}
+
 /**
  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
  * @hba: per-adapter instance
@@ -2844,8 +2862,13 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
        hba = shost_priv(sdev->host);
        scsi_deactivate_tcq(sdev, hba->nutrs);
        /* Drop the reference as it won't be needed anymore */
-       if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN)
+       if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
+               unsigned long flags;
+
+               spin_lock_irqsave(hba->host->host_lock, flags);
                hba->sdev_ufs_device = NULL;
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+       }
 }
 
 /**
@@ -4062,6 +4085,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
 {
        int ret = 0;
+       struct scsi_device *sdev_rpmb;
+       struct scsi_device *sdev_boot;
 
        hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
                ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
@@ -4070,56 +4095,33 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
                hba->sdev_ufs_device = NULL;
                goto out;
        }
+       scsi_device_put(hba->sdev_ufs_device);
 
-       hba->sdev_boot = __scsi_add_device(hba->host, 0, 0,
+       sdev_boot = __scsi_add_device(hba->host, 0, 0,
                ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
-       if (IS_ERR(hba->sdev_boot)) {
-               ret = PTR_ERR(hba->sdev_boot);
-               hba->sdev_boot = NULL;
+       if (IS_ERR(sdev_boot)) {
+               ret = PTR_ERR(sdev_boot);
                goto remove_sdev_ufs_device;
        }
+       scsi_device_put(sdev_boot);
 
-       hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
+       sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
                ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
-       if (IS_ERR(hba->sdev_rpmb)) {
-               ret = PTR_ERR(hba->sdev_rpmb);
-               hba->sdev_rpmb = NULL;
+       if (IS_ERR(sdev_rpmb)) {
+               ret = PTR_ERR(sdev_rpmb);
                goto remove_sdev_boot;
        }
+       scsi_device_put(sdev_rpmb);
        goto out;
 
 remove_sdev_boot:
-       scsi_remove_device(hba->sdev_boot);
+       scsi_remove_device(sdev_boot);
 remove_sdev_ufs_device:
        scsi_remove_device(hba->sdev_ufs_device);
 out:
        return ret;
 }
 
-/**
- * ufshcd_scsi_remove_wlus - Removes the W-LUs which were added by
- *                          ufshcd_scsi_add_wlus()
- * @hba: per-adapter instance
- *
- */
-static void ufshcd_scsi_remove_wlus(struct ufs_hba *hba)
-{
-       if (hba->sdev_ufs_device) {
-               scsi_remove_device(hba->sdev_ufs_device);
-               hba->sdev_ufs_device = NULL;
-       }
-
-       if (hba->sdev_boot) {
-               scsi_remove_device(hba->sdev_boot);
-               hba->sdev_boot = NULL;
-       }
-
-       if (hba->sdev_rpmb) {
-               scsi_remove_device(hba->sdev_rpmb);
-               hba->sdev_rpmb = NULL;
-       }
-}
-
 /**
  * ufshcd_probe_hba - probe hba to detect device and initialize
  * @hba: per-adapter instance
@@ -4134,6 +4136,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
        if (ret)
                goto out;
 
+       ufshcd_init_pwr_info(hba);
+
        /* UniPro link is active now */
        ufshcd_set_link_active(hba);
 
@@ -4264,12 +4268,18 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
                                         struct ufs_vreg *vreg)
 {
+       if (!vreg)
+               return 0;
+
        return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
 }
 
 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
                                         struct ufs_vreg *vreg)
 {
+       if (!vreg)
+               return 0;
+
        return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
 }
 
@@ -4471,7 +4481,7 @@ out:
                        if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
                                clk_disable_unprepare(clki->clk);
                }
-       } else if (!ret && on) {
+       } else if (on) {
                spin_lock_irqsave(hba->host->host_lock, flags);
                hba->clk_gating.state = CLKS_ON;
                spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -4675,11 +4685,25 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
 {
        unsigned char cmd[6] = { START_STOP };
        struct scsi_sense_hdr sshdr;
-       struct scsi_device *sdp = hba->sdev_ufs_device;
+       struct scsi_device *sdp;
+       unsigned long flags;
        int ret;
 
-       if (!sdp || !scsi_device_online(sdp))
-               return -ENODEV;
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       sdp = hba->sdev_ufs_device;
+       if (sdp) {
+               ret = scsi_device_get(sdp);
+               if (!ret && !scsi_device_online(sdp)) {
+                       ret = -ENODEV;
+                       scsi_device_put(sdp);
+               }
+       } else {
+               ret = -ENODEV;
+       }
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       if (ret)
+               return ret;
 
        /*
         * If scsi commands fail, the scsi mid-layer schedules scsi error-
@@ -4718,6 +4742,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
        if (!ret)
                hba->curr_dev_pwr_mode = pwr_mode;
 out:
+       scsi_device_put(sdp);
        hba->host->eh_noresume = 0;
        return ret;
 }
@@ -5087,7 +5112,7 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
        int ret = 0;
 
        if (!hba || !hba->is_powered)
-               goto out;
+               return 0;
 
        if (pm_runtime_suspended(hba->dev)) {
                if (hba->rpm_lvl == hba->spm_lvl)
@@ -5231,7 +5256,6 @@ EXPORT_SYMBOL(ufshcd_shutdown);
 void ufshcd_remove(struct ufs_hba *hba)
 {
        scsi_remove_host(hba->host);
-       ufshcd_scsi_remove_wlus(hba);
        /* disable interrupts */
        ufshcd_disable_intr(hba, hba->intr_mask);
        ufshcd_hba_stop(hba);
index 58ecdff5065c27d2dc3b6c563f641675799435dc..4a574aa458557a14ecc15b0d96d69dc8fa1fc9d8 100644 (file)
@@ -392,8 +392,6 @@ struct ufs_hba {
         * "UFS device" W-LU.
         */
        struct scsi_device *sdev_ufs_device;
-       struct scsi_device *sdev_rpmb;
-       struct scsi_device *sdev_boot;
 
        enum ufs_dev_pwr_mode curr_dev_pwr_mode;
        enum uic_link_state uic_link_state;
index 72e12bad14b9c478a8025db3ef7d31601c083aa4..d0d5542efc06db7a74b46a6a7230a4ce65ba53d5 100644 (file)
@@ -376,9 +376,6 @@ static void pump_transfers(unsigned long data)
        chip = dws->cur_chip;
        spi = message->spi;
 
-       if (unlikely(!chip->clk_div))
-               chip->clk_div = dws->max_freq / chip->speed_hz;
-
        if (message->state == ERROR_STATE) {
                message->status = -EIO;
                goto early_exit;
@@ -419,7 +416,7 @@ static void pump_transfers(unsigned long data)
        if (transfer->speed_hz) {
                speed = chip->speed_hz;
 
-               if (transfer->speed_hz != speed) {
+               if ((transfer->speed_hz != speed) || (!chip->clk_div)) {
                        speed = transfer->speed_hz;
 
                        /* clk_div doesn't support odd number */
@@ -581,7 +578,6 @@ static int dw_spi_setup(struct spi_device *spi)
                dev_err(&spi->dev, "No max speed HZ parameter\n");
                return -EINVAL;
        }
-       chip->speed_hz = spi->max_speed_hz;
 
        chip->tmode = 0; /* Tx & Rx */
        /* Default SPI mode is SCPOL = 0, SCPH = 0 */
index 39e2c0a55a2865acc6c50354cf90fa27263bc922..f63de781c72959c7c29b8fb2bb215f4e33b28f87 100644 (file)
@@ -562,9 +562,9 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
 
        sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
        txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
-                                          sspi->word_width;
+                                          (sspi->word_width >> 1);
        rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
-                                          sspi->word_width;
+                                          (sspi->word_width >> 1);
 
        if (!(spi->mode & SPI_CS_HIGH))
                regval |= SIRFSOC_SPI_CS_IDLE_STAT;
index ebcb33df2eb22facb58cebc10277c3ab12925a42..50f20f243981e68b0d007ff714476d0a29a42a80 100644 (file)
@@ -615,13 +615,13 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
                                sg_free_table(sgt);
                                return -ENOMEM;
                        }
-                       sg_buf = page_address(vm_page) +
-                               ((size_t)buf & ~PAGE_MASK);
+                       sg_set_page(&sgt->sgl[i], vm_page,
+                                   min, offset_in_page(buf));
                } else {
                        sg_buf = buf;
+                       sg_set_buf(&sgt->sgl[i], sg_buf, min);
                }
 
-               sg_set_buf(&sgt->sgl[i], sg_buf, min);
 
                buf += min;
                len -= min;
index 9935e66935af191e25d1ee4fee4ff70564228f20..eddef9cd2e1662087a4595cc48382b88c121832f 100644 (file)
@@ -275,11 +275,11 @@ u8 rtw_sitesurvey_cmd(struct adapter  *padapter, struct ndis_802_11_ssid *ssid,
        if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
                rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (ph2c == NULL)
                return _FAIL;
 
-       psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_KERNEL);
+       psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
        if (psurveyPara == NULL) {
                kfree(ph2c);
                return _FAIL;
@@ -405,7 +405,7 @@ u8 rtw_joinbss_cmd(struct adapter  *padapter, struct wlan_network *pnetwork)
        else
                RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid));
 
-       pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (pcmd == NULL) {
                res = _FAIL;
                RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd: memory allocate for cmd_obj fail!!!\n"));
@@ -755,13 +755,13 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
        u8      res = _SUCCESS;
 
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (ph2c == NULL) {
                res = _FAIL;
                goto exit;
        }
 
-       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
+       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
        if (pdrvextra_cmd_parm == NULL) {
                kfree(ph2c);
                res = _FAIL;
@@ -967,13 +967,13 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
        u8      res = _SUCCESS;
 
        if (enqueue) {
-               ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+               ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
                if (ph2c == NULL) {
                        res = _FAIL;
                        goto exit;
                }
 
-               pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
+               pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
                if (pdrvextra_cmd_parm == NULL) {
                        kfree(ph2c);
                        res = _FAIL;
@@ -1010,13 +1010,13 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
 
        u8      res = _SUCCESS;
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (ph2c == NULL) {
                res = _FAIL;
                goto exit;
        }
 
-       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
+       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
        if (pdrvextra_cmd_parm == NULL) {
                kfree(ph2c);
                res = _FAIL;
@@ -1088,13 +1088,13 @@ u8 rtw_ps_cmd(struct adapter *padapter)
 
        u8      res = _SUCCESS;
 
-       ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (ppscmd == NULL) {
                res = _FAIL;
                goto exit;
        }
 
-       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
+       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
        if (pdrvextra_cmd_parm == NULL) {
                kfree(ppscmd);
                res = _FAIL;
index 5ba5099ec20d1af8fd4415b8dbeab8d3dd1fdcc8..70b1bc3e0e63333abaa5ee2a2f63e4778f3124c2 100644 (file)
@@ -4241,12 +4241,12 @@ void report_survey_event(struct adapter *padapter,
        pcmdpriv = &padapter->cmdpriv;
 
 
-       pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (pcmd_obj == NULL)
                return;
 
        cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header));
-       pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
+       pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
        if (pevtcmd == NULL) {
                kfree(pcmd_obj);
                return;
@@ -4339,12 +4339,12 @@ void report_join_res(struct adapter *padapter, int res)
        struct mlme_ext_info    *pmlmeinfo = &(pmlmeext->mlmext_info);
        struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
 
-       pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (pcmd_obj == NULL)
                return;
 
        cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header));
-       pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
+       pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
        if (pevtcmd == NULL) {
                kfree(pcmd_obj);
                return;
@@ -4854,11 +4854,11 @@ void survey_timer_hdl(void *function_context)
                        pmlmeext->scan_abort = false;/* reset */
                }
 
-               ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+               ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
                if (ph2c == NULL)
                        goto exit_survey_timer_hdl;
 
-               psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_KERNEL);
+               psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
                if (psurveyPara == NULL) {
                        kfree(ph2c);
                        goto exit_survey_timer_hdl;
index 33ccbbbd8ed6903fb8dd5ff61c26b9c89ca0e94c..d300369977fae5e51834ffd7f06962ea97db6195 100644 (file)
@@ -935,7 +935,7 @@ int rtw_check_bcn_info(struct adapter  *Adapter, u8 *pframe, u32 packet_len)
                return true;
        }
 
-       bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_KERNEL);
+       bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_ATOMIC);
 
        subtype = GetFrameSubType(pframe) >> 4;
 
index 407a318b09dbe2837dc64573792e6f886cc88d62..2f87150a21b7e2c1b3f09dd5df10d3085c8fda83 100644 (file)
@@ -47,6 +47,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
        {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
        {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
        {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
+       {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
        {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
        {}      /* Terminating entry */
 };
index 1ab0018271c5c622c0b7556fb92a3a1d9ad38e5b..ad09e51ffae4d097109241d9a19b97c97858109b 100644 (file)
@@ -50,15 +50,14 @@ struct cpufreq_cooling_device {
        unsigned int cpufreq_state;
        unsigned int cpufreq_val;
        struct cpumask allowed_cpus;
+       struct list_head node;
 };
 static DEFINE_IDR(cpufreq_idr);
 static DEFINE_MUTEX(cooling_cpufreq_lock);
 
 static unsigned int cpufreq_dev_count;
 
-/* notify_table passes value to the CPUFREQ_ADJUST callback function. */
-#define NOTIFY_INVALID NULL
-static struct cpufreq_cooling_device *notify_device;
+static LIST_HEAD(cpufreq_dev_list);
 
 /**
  * get_idr - function to get a unique id.
@@ -287,15 +286,12 @@ static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
 
        cpufreq_device->cpufreq_state = cooling_state;
        cpufreq_device->cpufreq_val = clip_freq;
-       notify_device = cpufreq_device;
 
        for_each_cpu(cpuid, mask) {
                if (is_cpufreq_valid(cpuid))
                        cpufreq_update_policy(cpuid);
        }
 
-       notify_device = NOTIFY_INVALID;
-
        return 0;
 }
 
@@ -316,21 +312,28 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
 {
        struct cpufreq_policy *policy = data;
        unsigned long max_freq = 0;
+       struct cpufreq_cooling_device *cpufreq_dev;
 
-       if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID)
+       if (event != CPUFREQ_ADJUST)
                return 0;
 
-       if (cpumask_test_cpu(policy->cpu, &notify_device->allowed_cpus))
-               max_freq = notify_device->cpufreq_val;
-       else
-               return 0;
+       mutex_lock(&cooling_cpufreq_lock);
+       list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+               if (!cpumask_test_cpu(policy->cpu,
+                                       &cpufreq_dev->allowed_cpus))
+                       continue;
+
+               if (!cpufreq_dev->cpufreq_val)
+                       cpufreq_dev->cpufreq_val = get_cpu_frequency(
+                                       cpumask_any(&cpufreq_dev->allowed_cpus),
+                                       cpufreq_dev->cpufreq_state);
 
-       /* Never exceed user_policy.max */
-       if (max_freq > policy->user_policy.max)
-               max_freq = policy->user_policy.max;
+               max_freq = cpufreq_dev->cpufreq_val;
 
-       if (policy->max != max_freq)
-               cpufreq_verify_within_limits(policy, 0, max_freq);
+               if (policy->max != max_freq)
+                       cpufreq_verify_within_limits(policy, 0, max_freq);
+       }
+       mutex_unlock(&cooling_cpufreq_lock);
 
        return 0;
 }
@@ -486,6 +489,7 @@ __cpufreq_cooling_register(struct device_node *np,
                cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
                                          CPUFREQ_POLICY_NOTIFIER);
        cpufreq_dev_count++;
+       list_add(&cpufreq_dev->node, &cpufreq_dev_list);
 
        mutex_unlock(&cooling_cpufreq_lock);
 
@@ -549,6 +553,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
 
        cpufreq_dev = cdev->devdata;
        mutex_lock(&cooling_cpufreq_lock);
+       list_del(&cpufreq_dev->node);
        cpufreq_dev_count--;
 
        /* Unregister the notifier for the last cpufreq cooling device */
index 3f5ad25ddca811cf5a9c61509af9ac8c89550b28..b6be572704a4c7ff97055f1cb273ff3016399469 100644 (file)
@@ -417,13 +417,10 @@ void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf)
 
        th_zone = sensor_conf->pzone_data;
 
-       if (th_zone->therm_dev)
-               thermal_zone_device_unregister(th_zone->therm_dev);
+       thermal_zone_device_unregister(th_zone->therm_dev);
 
-       for (i = 0; i < th_zone->cool_dev_size; i++) {
-               if (th_zone->cool_dev[i])
-                       cpufreq_cooling_unregister(th_zone->cool_dev[i]);
-       }
+       for (i = 0; i < th_zone->cool_dev_size; ++i)
+               cpufreq_cooling_unregister(th_zone->cool_dev[i]);
 
        dev_info(sensor_conf->dev,
                "Exynos: Kernel Thermal management unregistered\n");
index 90163b384660247b343d9fc551dfe067128d6219..d1ec5804c0bb94cebeb22d5038b4861393047ba0 100644 (file)
@@ -275,6 +275,7 @@ int st_thermal_unregister(struct platform_device *pdev)
 }
 EXPORT_SYMBOL_GPL(st_thermal_unregister);
 
+#ifdef CONFIG_PM_SLEEP
 static int st_thermal_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -305,6 +306,8 @@ static int st_thermal_resume(struct device *dev)
 
        return 0;
 }
+#endif
+
 SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume);
 EXPORT_SYMBOL_GPL(st_thermal_pm_ops);
 
index 56982da4a9e9f77ac4fd062f95c00724cc7ade34..bf355050eab695f50c6220589faf69a0b9e3b6b6 100644 (file)
@@ -240,32 +240,6 @@ static int of_platform_serial_remove(struct platform_device *ofdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int of_serial_suspend(struct device *dev)
-{
-       struct of_serial_info *info = dev_get_drvdata(dev);
-
-       serial8250_suspend_port(info->line);
-       if (info->clk)
-               clk_disable_unprepare(info->clk);
-
-       return 0;
-}
-
-static int of_serial_resume(struct device *dev)
-{
-       struct of_serial_info *info = dev_get_drvdata(dev);
-
-       if (info->clk)
-               clk_prepare_enable(info->clk);
-
-       serial8250_resume_port(info->line);
-
-       return 0;
-}
-#endif
-static SIMPLE_DEV_PM_OPS(of_serial_pm_ops, of_serial_suspend, of_serial_resume);
-
 /*
  * A few common types, add more as needed.
  */
@@ -297,7 +271,6 @@ static struct platform_driver of_platform_serial_driver = {
                .name = "of_serial",
                .owner = THIS_MODULE,
                .of_match_table = of_platform_serial_table,
-               .pm = &of_serial_pm_ops,
        },
        .probe = of_platform_serial_probe,
        .remove = of_platform_serial_remove,
index 39b4081b632df2ce600501add8c011c7dbde75b3..96fafed92b76b0972401a13b4eb67a5b1dfdddb1 100644 (file)
@@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Creative SB Audigy 2 NX */
        { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Microsoft Wireless Laser Mouse 6000 Receiver */
+       { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Microsoft LifeCam-VX700 v2.0 */
        { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
 
index 711b23019d541f1fcc589e93a22d80ef677ec207..df38e7ef49761ce87f3532c37c556a1399ab8199 100644 (file)
@@ -791,6 +791,10 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
 
        trb = dwc->ep0_trb;
 
+       r = next_request(&ep0->request_list);
+       if (!r)
+               return;
+
        status = DWC3_TRB_SIZE_TRBSTS(trb->size);
        if (status == DWC3_TRBSTS_SETUP_PENDING) {
                dwc3_trace(trace_dwc3_ep0, "Setup Pending received");
@@ -801,10 +805,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
                return;
        }
 
-       r = next_request(&ep0->request_list);
-       if (!r)
-               return;
-
        ur = &r->request;
 
        length = trb->size & DWC3_TRB_SIZE_MASK;
index 696160d48ae8521651f4f313ee9998288c245ab4..388cfd83b6b667a8e40dffc6c61d9257839f2f81 100644 (file)
@@ -22,7 +22,6 @@
 
 
 #include <linux/slab.h>
-#include <linux/device.h>
 #include <asm/unaligned.h>
 
 #include "xhci.h"
@@ -1149,9 +1148,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
                 * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME
                 * is enabled, so also enable remote wake here.
                 */
-               if (hcd->self.root_hub->do_remote_wakeup
-                               && device_may_wakeup(hcd->self.controller)) {
-
+               if (hcd->self.root_hub->do_remote_wakeup) {
                        if (t1 & PORT_CONNECT) {
                                t2 |= PORT_WKOC_E | PORT_WKDISC_E;
                                t2 &= ~PORT_WKCONN_E;
index 9a69b1f1b300889d56200ac35d6d1cf22195c588..142b601f95636fdff622bca8c4fb1a9aef87093b 100644 (file)
@@ -281,7 +281,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
        if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
                pdev->no_d3cold = true;
 
-       return xhci_suspend(xhci);
+       return xhci_suspend(xhci, do_wakeup);
 }
 
 static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
index 3d78b0cd674b4cd07485dfe493b3cd67d0253fdf..646300cbe5f75d34fabf3fd4d13d1100c52e97c0 100644 (file)
@@ -204,7 +204,15 @@ static int xhci_plat_suspend(struct device *dev)
        struct usb_hcd  *hcd = dev_get_drvdata(dev);
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 
-       return xhci_suspend(xhci);
+       /*
+        * xhci_suspend() needs `do_wakeup` to know whether host is allowed
+        * to do wakeup during suspend. Since xhci_plat_suspend is currently
+        * only designed for system suspend, device_may_wakeup() is enough
+        * to dertermine whether host is allowed to do wakeup. Need to
+        * reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
+        * also applies to runtime suspend.
+        */
+       return xhci_suspend(xhci, device_may_wakeup(dev));
 }
 
 static int xhci_plat_resume(struct device *dev)
index bc6fcbc16f61ec820ba93d5fb6700cfcbd0ae2a2..06433aec81d71511f0583a1099d42d9977f8da3b 100644 (file)
@@ -1067,9 +1067,8 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
                                false);
                xhci_ring_cmd_db(xhci);
        } else {
-               /* Clear our internal halted state and restart the ring(s) */
+               /* Clear our internal halted state */
                xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
-               ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
        }
 }
 
@@ -1823,22 +1822,13 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
                ep->stopped_td = td;
                return 0;
        } else {
-               if (trb_comp_code == COMP_STALL) {
-                       /* The transfer is completed from the driver's
-                        * perspective, but we need to issue a set dequeue
-                        * command for this stalled endpoint to move the dequeue
-                        * pointer past the TD.  We can't do that here because
-                        * the halt condition must be cleared first.  Let the
-                        * USB class driver clear the stall later.
-                        */
-                       ep->stopped_td = td;
-                       ep->stopped_stream = ep_ring->stream_id;
-               } else if (xhci_requires_manual_halt_cleanup(xhci,
-                                       ep_ctx, trb_comp_code)) {
-                       /* Other types of errors halt the endpoint, but the
-                        * class driver doesn't call usb_reset_endpoint() unless
-                        * the error is -EPIPE.  Clear the halted status in the
-                        * xHCI hardware manually.
+               if (trb_comp_code == COMP_STALL ||
+                   xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
+                                                     trb_comp_code)) {
+                       /* Issue a reset endpoint command to clear the host side
+                        * halt, followed by a set dequeue command to move the
+                        * dequeue pointer past the TD.
+                        * The class driver clears the device side halt later.
                         */
                        xhci_cleanup_halted_endpoint(xhci,
                                        slot_id, ep_index, ep_ring->stream_id,
@@ -1958,9 +1948,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
                else
                        td->urb->actual_length = 0;
 
-               xhci_cleanup_halted_endpoint(xhci,
-                       slot_id, ep_index, 0, td, event_trb);
-               return finish_td(xhci, td, event_trb, event, ep, status, true);
+               return finish_td(xhci, td, event_trb, event, ep, status, false);
        }
        /*
         * Did we transfer any data, despite the errors that might have
@@ -2519,17 +2507,8 @@ cleanup:
                if (ret) {
                        urb = td->urb;
                        urb_priv = urb->hcpriv;
-                       /* Leave the TD around for the reset endpoint function
-                        * to use(but only if it's not a control endpoint,
-                        * since we already queued the Set TR dequeue pointer
-                        * command for stalled control endpoints).
-                        */
-                       if (usb_endpoint_xfer_control(&urb->ep->desc) ||
-                               (trb_comp_code != COMP_STALL &&
-                                       trb_comp_code != COMP_BABBLE))
-                               xhci_urb_free_priv(xhci, urb_priv);
-                       else
-                               kfree(urb_priv);
+
+                       xhci_urb_free_priv(xhci, urb_priv);
 
                        usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
                        if ((urb->actual_length != urb->transfer_buffer_length &&
index 2a5d45b4cb15ef30d82294de6c5d8e015449a383..033b46c470bdff8120b1e903ee3debbb9b998218 100644 (file)
@@ -35,6 +35,8 @@
 #define DRIVER_AUTHOR "Sarah Sharp"
 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
 
+#define        PORT_WAKE_BITS  (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
+
 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
 static int link_quirk;
 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
@@ -851,13 +853,47 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
        xhci_set_cmd_ring_deq(xhci);
 }
 
+static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
+{
+       int port_index;
+       __le32 __iomem **port_array;
+       unsigned long flags;
+       u32 t1, t2;
+
+       spin_lock_irqsave(&xhci->lock, flags);
+
+       /* disble usb3 ports Wake bits*/
+       port_index = xhci->num_usb3_ports;
+       port_array = xhci->usb3_ports;
+       while (port_index--) {
+               t1 = readl(port_array[port_index]);
+               t1 = xhci_port_state_to_neutral(t1);
+               t2 = t1 & ~PORT_WAKE_BITS;
+               if (t1 != t2)
+                       writel(t2, port_array[port_index]);
+       }
+
+       /* disble usb2 ports Wake bits*/
+       port_index = xhci->num_usb2_ports;
+       port_array = xhci->usb2_ports;
+       while (port_index--) {
+               t1 = readl(port_array[port_index]);
+               t1 = xhci_port_state_to_neutral(t1);
+               t2 = t1 & ~PORT_WAKE_BITS;
+               if (t1 != t2)
+                       writel(t2, port_array[port_index]);
+       }
+
+       spin_unlock_irqrestore(&xhci->lock, flags);
+}
+
 /*
  * Stop HC (not bus-specific)
  *
  * This is called when the machine transition into S3/S4 mode.
  *
  */
-int xhci_suspend(struct xhci_hcd *xhci)
+int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
 {
        int                     rc = 0;
        unsigned int            delay = XHCI_MAX_HALT_USEC;
@@ -868,6 +904,10 @@ int xhci_suspend(struct xhci_hcd *xhci)
                        xhci->shared_hcd->state != HC_STATE_SUSPENDED)
                return -EINVAL;
 
+       /* Clear root port wake on bits if wakeup not allowed. */
+       if (!do_wakeup)
+               xhci_disable_port_wake_on_bits(xhci);
+
        /* Don't poll the roothubs on bus suspend. */
        xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
        clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
@@ -2912,68 +2952,33 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
        }
 }
 
-/* Deal with stalled endpoints.  The core should have sent the control message
- * to clear the halt condition.  However, we need to make the xHCI hardware
- * reset its sequence number, since a device will expect a sequence number of
- * zero after the halt condition is cleared.
+/* Called when clearing halted device. The core should have sent the control
+ * message to clear the device halt condition. The host side of the halt should
+ * already be cleared with a reset endpoint command issued when the STALL tx
+ * event was received.
+ *
  * Context: in_interrupt
  */
+
 void xhci_endpoint_reset(struct usb_hcd *hcd,
                struct usb_host_endpoint *ep)
 {
        struct xhci_hcd *xhci;
-       struct usb_device *udev;
-       unsigned int ep_index;
-       unsigned long flags;
-       int ret;
-       struct xhci_virt_ep *virt_ep;
-       struct xhci_command *command;
 
        xhci = hcd_to_xhci(hcd);
-       udev = (struct usb_device *) ep->hcpriv;
-       /* Called with a root hub endpoint (or an endpoint that wasn't added
-        * with xhci_add_endpoint()
-        */
-       if (!ep->hcpriv)
-               return;
-       ep_index = xhci_get_endpoint_index(&ep->desc);
-       virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
-       if (!virt_ep->stopped_td) {
-               xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
-                       "Endpoint 0x%x not halted, refusing to reset.",
-                       ep->desc.bEndpointAddress);
-               return;
-       }
-       if (usb_endpoint_xfer_control(&ep->desc)) {
-               xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
-                               "Control endpoint stall already handled.");
-               return;
-       }
 
-       command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
-       if (!command)
-               return;
-
-       xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
-                       "Queueing reset endpoint command");
-       spin_lock_irqsave(&xhci->lock, flags);
-       ret = xhci_queue_reset_ep(xhci, command, udev->slot_id, ep_index);
        /*
-        * Can't change the ring dequeue pointer until it's transitioned to the
-        * stopped state, which is only upon a successful reset endpoint
-        * command.  Better hope that last command worked!
+        * We might need to implement the config ep cmd in xhci 4.8.1 note:
+        * The Reset Endpoint Command may only be issued to endpoints in the
+        * Halted state. If software wishes reset the Data Toggle or Sequence
+        * Number of an endpoint that isn't in the Halted state, then software
+        * may issue a Configure Endpoint Command with the Drop and Add bits set
+        * for the target endpoint. that is in the Stopped state.
         */
-       if (!ret) {
-               xhci_cleanup_stalled_ring(xhci, udev, ep_index);
-               kfree(virt_ep->stopped_td);
-               xhci_ring_cmd_db(xhci);
-       }
-       virt_ep->stopped_td = NULL;
-       virt_ep->stopped_stream = 0;
-       spin_unlock_irqrestore(&xhci->lock, flags);
 
-       if (ret)
-               xhci_warn(xhci, "FIXME allocate a new ring segment\n");
+       /* For now just print debug to follow the situation */
+       xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
+                ep->desc.bEndpointAddress);
 }
 
 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
index df76d642e7190bd04854a10024c06714dd7e48d0..d745715a1e2f53648b1e1c2b1288f9c963bb42be 100644 (file)
@@ -1746,7 +1746,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
 void xhci_init_driver(struct hc_driver *drv, int (*setup_fn)(struct usb_hcd *));
 
 #ifdef CONFIG_PM
-int xhci_suspend(struct xhci_hcd *xhci);
+int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
 int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
 #else
 #define        xhci_suspend    NULL
index cfd009dc401826cc8e052325249c791bf8a959d6..6c4eb3cf5efd599653641e5d96d20b05610a6ed5 100644 (file)
@@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
        { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
        { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+       { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */
        { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
index 0dad8ce5a60946431e41f38683971dfc8f1dab13..1ebb351b9e9a59c9dbd90ffda56cae1769de764e 100644 (file)
@@ -470,6 +470,39 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) },
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) },
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_4701_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9300_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9301_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9302_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9303_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9304_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9305_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9306_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9307_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9308_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9309_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930A_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930B_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930C_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930D_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930E_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930F_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9310_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9311_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9312_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9313_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9314_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9315_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9316_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9317_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9318_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9319_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931A_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931B_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931C_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931D_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931E_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931F_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) },
index 6786b705ccf606ca47cb471d76b25c4b2981bf10..e52409c9be999f817cbdb627f4be8ec6f127cbe6 100644 (file)
 #define BAYER_CONTOUR_CABLE_PID        0x6001
 
 /*
- * The following are the values for the Matrix Orbital FTDI Range
- * Anything in this range will use an FT232RL.
+ * Matrix Orbital Intelligent USB displays.
+ * http://www.matrixorbital.com
  */
 #define MTXORB_VID                     0x1B3D
 #define MTXORB_FTDI_RANGE_0100_PID     0x0100
 #define MTXORB_FTDI_RANGE_01FD_PID     0x01FD
 #define MTXORB_FTDI_RANGE_01FE_PID     0x01FE
 #define MTXORB_FTDI_RANGE_01FF_PID     0x01FF
-
-
+#define MTXORB_FTDI_RANGE_4701_PID     0x4701
+#define MTXORB_FTDI_RANGE_9300_PID     0x9300
+#define MTXORB_FTDI_RANGE_9301_PID     0x9301
+#define MTXORB_FTDI_RANGE_9302_PID     0x9302
+#define MTXORB_FTDI_RANGE_9303_PID     0x9303
+#define MTXORB_FTDI_RANGE_9304_PID     0x9304
+#define MTXORB_FTDI_RANGE_9305_PID     0x9305
+#define MTXORB_FTDI_RANGE_9306_PID     0x9306
+#define MTXORB_FTDI_RANGE_9307_PID     0x9307
+#define MTXORB_FTDI_RANGE_9308_PID     0x9308
+#define MTXORB_FTDI_RANGE_9309_PID     0x9309
+#define MTXORB_FTDI_RANGE_930A_PID     0x930A
+#define MTXORB_FTDI_RANGE_930B_PID     0x930B
+#define MTXORB_FTDI_RANGE_930C_PID     0x930C
+#define MTXORB_FTDI_RANGE_930D_PID     0x930D
+#define MTXORB_FTDI_RANGE_930E_PID     0x930E
+#define MTXORB_FTDI_RANGE_930F_PID     0x930F
+#define MTXORB_FTDI_RANGE_9310_PID     0x9310
+#define MTXORB_FTDI_RANGE_9311_PID     0x9311
+#define MTXORB_FTDI_RANGE_9312_PID     0x9312
+#define MTXORB_FTDI_RANGE_9313_PID     0x9313
+#define MTXORB_FTDI_RANGE_9314_PID     0x9314
+#define MTXORB_FTDI_RANGE_9315_PID     0x9315
+#define MTXORB_FTDI_RANGE_9316_PID     0x9316
+#define MTXORB_FTDI_RANGE_9317_PID     0x9317
+#define MTXORB_FTDI_RANGE_9318_PID     0x9318
+#define MTXORB_FTDI_RANGE_9319_PID     0x9319
+#define MTXORB_FTDI_RANGE_931A_PID     0x931A
+#define MTXORB_FTDI_RANGE_931B_PID     0x931B
+#define MTXORB_FTDI_RANGE_931C_PID     0x931C
+#define MTXORB_FTDI_RANGE_931D_PID     0x931D
+#define MTXORB_FTDI_RANGE_931E_PID     0x931E
+#define MTXORB_FTDI_RANGE_931F_PID     0x931F
 
 /*
  * The Mobility Lab (TML)
index 93cb7cebda62760bcaae46f3710e7477ff59a507..077c714f1285171ee3b9e4c418e0df42f60cd42c 100644 (file)
@@ -311,24 +311,30 @@ static void       usa26_indat_callback(struct urb *urb)
                if ((data[0] & 0x80) == 0) {
                        /* no errors on individual bytes, only
                           possible overrun err */
-                       if (data[0] & RXERROR_OVERRUN)
-                               err = TTY_OVERRUN;
-                       else
-                               err = 0;
+                       if (data[0] & RXERROR_OVERRUN) {
+                               tty_insert_flip_char(&port->port, 0,
+                                                               TTY_OVERRUN);
+                       }
                        for (i = 1; i < urb->actual_length ; ++i)
-                               tty_insert_flip_char(&port->port, data[i], err);
+                               tty_insert_flip_char(&port->port, data[i],
+                                                               TTY_NORMAL);
                } else {
                        /* some bytes had errors, every byte has status */
                        dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
                        for (i = 0; i + 1 < urb->actual_length; i += 2) {
-                               int stat = data[i], flag = 0;
-                               if (stat & RXERROR_OVERRUN)
-                                       flag |= TTY_OVERRUN;
-                               if (stat & RXERROR_FRAMING)
-                                       flag |= TTY_FRAME;
-                               if (stat & RXERROR_PARITY)
-                                       flag |= TTY_PARITY;
+                               int stat = data[i];
+                               int flag = TTY_NORMAL;
+
+                               if (stat & RXERROR_OVERRUN) {
+                                       tty_insert_flip_char(&port->port, 0,
+                                                               TTY_OVERRUN);
+                               }
                                /* XXX should handle break (0x10) */
+                               if (stat & RXERROR_PARITY)
+                                       flag = TTY_PARITY;
+                               else if (stat & RXERROR_FRAMING)
+                                       flag = TTY_FRAME;
+
                                tty_insert_flip_char(&port->port, data[i+1],
                                                flag);
                        }
@@ -649,14 +655,19 @@ static void       usa49_indat_callback(struct urb *urb)
                } else {
                        /* some bytes had errors, every byte has status */
                        for (i = 0; i + 1 < urb->actual_length; i += 2) {
-                               int stat = data[i], flag = 0;
-                               if (stat & RXERROR_OVERRUN)
-                                       flag |= TTY_OVERRUN;
-                               if (stat & RXERROR_FRAMING)
-                                       flag |= TTY_FRAME;
-                               if (stat & RXERROR_PARITY)
-                                       flag |= TTY_PARITY;
+                               int stat = data[i];
+                               int flag = TTY_NORMAL;
+
+                               if (stat & RXERROR_OVERRUN) {
+                                       tty_insert_flip_char(&port->port, 0,
+                                                               TTY_OVERRUN);
+                               }
                                /* XXX should handle break (0x10) */
+                               if (stat & RXERROR_PARITY)
+                                       flag = TTY_PARITY;
+                               else if (stat & RXERROR_FRAMING)
+                                       flag = TTY_FRAME;
+
                                tty_insert_flip_char(&port->port, data[i+1],
                                                flag);
                        }
@@ -713,15 +724,19 @@ static void usa49wg_indat_callback(struct urb *urb)
                         */
                        for (x = 0; x + 1 < len &&
                                    i + 1 < urb->actual_length; x += 2) {
-                               int stat = data[i], flag = 0;
+                               int stat = data[i];
+                               int flag = TTY_NORMAL;
 
-                               if (stat & RXERROR_OVERRUN)
-                                       flag |= TTY_OVERRUN;
-                               if (stat & RXERROR_FRAMING)
-                                       flag |= TTY_FRAME;
-                               if (stat & RXERROR_PARITY)
-                                       flag |= TTY_PARITY;
+                               if (stat & RXERROR_OVERRUN) {
+                                       tty_insert_flip_char(&port->port, 0,
+                                                               TTY_OVERRUN);
+                               }
                                /* XXX should handle break (0x10) */
+                               if (stat & RXERROR_PARITY)
+                                       flag = TTY_PARITY;
+                               else if (stat & RXERROR_FRAMING)
+                                       flag = TTY_FRAME;
+
                                tty_insert_flip_char(&port->port, data[i+1],
                                                     flag);
                                i += 2;
@@ -773,25 +788,31 @@ static void usa90_indat_callback(struct urb *urb)
                        if ((data[0] & 0x80) == 0) {
                                /* no errors on individual bytes, only
                                   possible overrun err*/
-                               if (data[0] & RXERROR_OVERRUN)
-                                       err = TTY_OVERRUN;
-                               else
-                                       err = 0;
+                               if (data[0] & RXERROR_OVERRUN) {
+                                       tty_insert_flip_char(&port->port, 0,
+                                                               TTY_OVERRUN);
+                               }
                                for (i = 1; i < urb->actual_length ; ++i)
                                        tty_insert_flip_char(&port->port,
-                                                       data[i], err);
+                                                       data[i], TTY_NORMAL);
                        }  else {
                        /* some bytes had errors, every byte has status */
                                dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
                                for (i = 0; i + 1 < urb->actual_length; i += 2) {
-                                       int stat = data[i], flag = 0;
-                                       if (stat & RXERROR_OVERRUN)
-                                               flag |= TTY_OVERRUN;
-                                       if (stat & RXERROR_FRAMING)
-                                               flag |= TTY_FRAME;
-                                       if (stat & RXERROR_PARITY)
-                                               flag |= TTY_PARITY;
+                                       int stat = data[i];
+                                       int flag = TTY_NORMAL;
+
+                                       if (stat & RXERROR_OVERRUN) {
+                                               tty_insert_flip_char(
+                                                               &port->port, 0,
+                                                               TTY_OVERRUN);
+                                       }
                                        /* XXX should handle break (0x10) */
+                                       if (stat & RXERROR_PARITY)
+                                               flag = TTY_PARITY;
+                                       else if (stat & RXERROR_FRAMING)
+                                               flag = TTY_FRAME;
+
                                        tty_insert_flip_char(&port->port,
                                                        data[i+1], flag);
                                }
index a7fe664b6b7d164e628c5b466e548efe7e10e7ec..70a098de429fc39934ef8808d3e6c5011f063352 100644 (file)
@@ -490,10 +490,9 @@ static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
                        if (*tty_flag == TTY_NORMAL)
                                *tty_flag = TTY_FRAME;
                }
-               if (lsr & UART_LSR_OE){
+               if (lsr & UART_LSR_OE) {
                        port->icount.overrun++;
-                       if (*tty_flag == TTY_NORMAL)
-                               *tty_flag = TTY_OVERRUN;
+                       tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
                }
        }
 
@@ -511,12 +510,8 @@ static void ssu100_process_read_urb(struct urb *urb)
        if ((len >= 4) &&
            (packet[0] == 0x1b) && (packet[1] == 0x1b) &&
            ((packet[2] == 0x00) || (packet[2] == 0x01))) {
-               if (packet[2] == 0x00) {
+               if (packet[2] == 0x00)
                        ssu100_update_lsr(port, packet[3], &flag);
-                       if (flag == TTY_OVERRUN)
-                               tty_insert_flip_char(&port->port, 0,
-                                               TTY_OVERRUN);
-               }
                if (packet[2] == 0x01)
                        ssu100_update_msr(port, packet[3]);
 
index 2fefaf923e4a2ecc6e3fd39b32f05631478806ed..18a283d6de1c8bd18663b57bbf7499510c49fa2d 100644 (file)
@@ -103,3 +103,10 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
                "VL711",
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_ATA_1X),
+
+/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
+               "Hitachi",
+               "External HDD",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_IGNORE_UAS),
index 34a1b9dea6dda9824eda07022cb544db83066eed..da0bbb456d3fcf6fee384b9287566cb64c5ae04a 100644 (file)
@@ -104,7 +104,7 @@ obj-$(CONFIG_QNX6FS_FS)             += qnx6/
 obj-$(CONFIG_AUTOFS4_FS)       += autofs4/
 obj-$(CONFIG_ADFS_FS)          += adfs/
 obj-$(CONFIG_FUSE_FS)          += fuse/
-obj-$(CONFIG_OVERLAYFS_FS)     += overlayfs/
+obj-$(CONFIG_OVERLAY_FS)       += overlayfs/
 obj-$(CONFIG_UDF_FS)           += udf/
 obj-$(CONFIG_SUN_OPENPROMFS)   += openpromfs/
 obj-$(CONFIG_OMFS_FS)          += omfs/
index 84a751005f5b8ad0f0f5cd6e3c5ec67df5f7440c..14b93159ef83a140483bb31a4a6c70286d209ed8 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -165,6 +165,15 @@ static struct vfsmount *aio_mnt;
 static const struct file_operations aio_ring_fops;
 static const struct address_space_operations aio_ctx_aops;
 
+/* Backing dev info for aio fs.
+ * -no dirty page accounting or writeback happens
+ */
+static struct backing_dev_info aio_fs_backing_dev_info = {
+       .name           = "aiofs",
+       .state          = 0,
+       .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY,
+};
+
 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
 {
        struct qstr this = QSTR_INIT("[aio]", 5);
@@ -176,6 +185,7 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
 
        inode->i_mapping->a_ops = &aio_ctx_aops;
        inode->i_mapping->private_data = ctx;
+       inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info;
        inode->i_size = PAGE_SIZE * nr_pages;
 
        path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
@@ -220,6 +230,9 @@ static int __init aio_setup(void)
        if (IS_ERR(aio_mnt))
                panic("Failed to create aio fs mount.");
 
+       if (bdi_init(&aio_fs_backing_dev_info))
+               panic("Failed to init aio fs backing dev info.");
+
        kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
        kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
 
@@ -281,11 +294,6 @@ static const struct file_operations aio_ring_fops = {
        .mmap = aio_ring_mmap,
 };
 
-static int aio_set_page_dirty(struct page *page)
-{
-       return 0;
-}
-
 #if IS_ENABLED(CONFIG_MIGRATION)
 static int aio_migratepage(struct address_space *mapping, struct page *new,
                        struct page *old, enum migrate_mode mode)
@@ -357,7 +365,7 @@ out:
 #endif
 
 static const struct address_space_operations aio_ctx_aops = {
-       .set_page_dirty = aio_set_page_dirty,
+       .set_page_dirty = __set_page_dirty_no_writeback,
 #if IS_ENABLED(CONFIG_MIGRATION)
        .migratepage    = aio_migratepage,
 #endif
@@ -412,7 +420,6 @@ static int aio_setup_ring(struct kioctx *ctx)
                pr_debug("pid(%d) page[%d]->count=%d\n",
                         current->pid, i, page_count(page));
                SetPageUptodate(page);
-               SetPageDirty(page);
                unlock_page(page);
 
                ctx->ring_pages[i] = page;
index 19bc6162fb8e899cc51bd3d777fcbddf91589aa6..150822ee0a0b9f9668f071885c4f018b1c9f6bf3 100644 (file)
@@ -80,13 +80,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
 {
        int i;
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-       /* lockdep really cares that we take all of these spinlocks
-        * in the right order.  If any of the locks in the path are not
-        * currently blocking, it is going to complain.  So, make really
-        * really sure by forcing the path to blocking before we clear
-        * the path blocking.
-        */
        if (held) {
                btrfs_set_lock_blocking_rw(held, held_rw);
                if (held_rw == BTRFS_WRITE_LOCK)
@@ -95,7 +88,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
                        held_rw = BTRFS_READ_LOCK_BLOCKING;
        }
        btrfs_set_path_blocking(p);
-#endif
 
        for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
                if (p->nodes[i] && p->locks[i]) {
@@ -107,10 +99,8 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
                }
        }
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
        if (held)
                btrfs_clear_lock_blocking_rw(held, held_rw);
-#endif
 }
 
 /* this also releases the path */
@@ -2893,7 +2883,7 @@ cow_done:
                                        }
                                        p->locks[level] = BTRFS_WRITE_LOCK;
                                } else {
-                                       err = btrfs_try_tree_read_lock(b);
+                                       err = btrfs_tree_read_lock_atomic(b);
                                        if (!err) {
                                                btrfs_set_path_blocking(p);
                                                btrfs_tree_read_lock(b);
@@ -3025,7 +3015,7 @@ again:
                        }
 
                        level = btrfs_header_level(b);
-                       err = btrfs_try_tree_read_lock(b);
+                       err = btrfs_tree_read_lock_atomic(b);
                        if (!err) {
                                btrfs_set_path_blocking(p);
                                btrfs_tree_read_lock(b);
index 5665d2149249d1d83a260c74f21889e1d394a6c3..f8229ef1b46df098a3b04260c4f943db3e924d49 100644 (file)
@@ -127,6 +127,26 @@ again:
        atomic_inc(&eb->spinning_readers);
 }
 
+/*
+ * take a spinning read lock.
+ * returns 1 if we get the read lock and 0 if we don't
+ * this won't wait for blocking writers
+ */
+int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
+{
+       if (atomic_read(&eb->blocking_writers))
+               return 0;
+
+       read_lock(&eb->lock);
+       if (atomic_read(&eb->blocking_writers)) {
+               read_unlock(&eb->lock);
+               return 0;
+       }
+       atomic_inc(&eb->read_locks);
+       atomic_inc(&eb->spinning_readers);
+       return 1;
+}
+
 /*
  * returns 1 if we get the read lock and 0 if we don't
  * this won't wait for blocking writers
@@ -158,9 +178,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
            atomic_read(&eb->blocking_readers))
                return 0;
 
-       if (!write_trylock(&eb->lock))
-               return 0;
-
+       write_lock(&eb->lock);
        if (atomic_read(&eb->blocking_writers) ||
            atomic_read(&eb->blocking_readers)) {
                write_unlock(&eb->lock);
index b81e0e9a48941891681eef385d10d071f6cbe51b..c44a9d5f5362b0dcb1c525eca16b57aead8a58ae 100644 (file)
@@ -35,6 +35,8 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw);
 void btrfs_assert_tree_locked(struct extent_buffer *eb);
 int btrfs_try_tree_read_lock(struct extent_buffer *eb);
 int btrfs_try_tree_write_lock(struct extent_buffer *eb);
+int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
+
 
 static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
 {
index 3ffef7f4e5cdd9d00ca454130070f4619a8707d6..5bc72b07fde22bcbb00451c7522333e2aa273323 100644 (file)
@@ -778,6 +778,7 @@ restart:
                        struct dentry *parent = lock_parent(dentry);
                        if (likely(!dentry->d_lockref.count)) {
                                __dentry_kill(dentry);
+                               dput(parent);
                                goto restart;
                        }
                        if (parent)
index fe839b9151161544cd5a4f3243918b25e995b599..d67a16f2a45df8fcce56b9ff3ec56f334d951c9c 100644 (file)
@@ -170,27 +170,6 @@ struct iso9660_options{
        s32 sbsector;
 };
 
-/*
- * Compute the hash for the isofs name corresponding to the dentry.
- */
-static int
-isofs_hash_common(struct qstr *qstr, int ms)
-{
-       const char *name;
-       int len;
-
-       len = qstr->len;
-       name = qstr->name;
-       if (ms) {
-               while (len && name[len-1] == '.')
-                       len--;
-       }
-
-       qstr->hash = full_name_hash(name, len);
-
-       return 0;
-}
-
 /*
  * Compute the hash for the isofs name corresponding to the dentry.
  */
@@ -263,6 +242,27 @@ isofs_dentry_cmpi(const struct dentry *parent, const struct dentry *dentry,
 }
 
 #ifdef CONFIG_JOLIET
+/*
+ * Compute the hash for the isofs name corresponding to the dentry.
+ */
+static int
+isofs_hash_common(struct qstr *qstr, int ms)
+{
+       const char *name;
+       int len;
+
+       len = qstr->len;
+       name = qstr->name;
+       if (ms) {
+               while (len && name[len-1] == '.')
+                       len--;
+       }
+
+       qstr->hash = full_name_hash(name, len);
+
+       return 0;
+}
+
 static int
 isofs_hash_ms(const struct dentry *dentry, struct qstr *qstr)
 {
index ed2b1151b171f275b6a5ebeea1946860876fb391..7cbdf1b2e4abd7286b6033c66a9d999e735737c9 100644 (file)
@@ -774,8 +774,12 @@ static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
 {
        if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
                rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
-               dprintk("%s slot is busy\n", __func__);
-               return false;
+               /* Race breaker */
+               if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
+                       dprintk("%s slot is busy\n", __func__);
+                       return false;
+               }
+               rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
        }
        return true;
 }
index 747f3b95bd11118aa477153fa97594127e2fbc1c..33a46a8dfaf73aaa65ecec7b4603b86ea4e49d83 100644 (file)
@@ -335,12 +335,15 @@ void              nfsd_lockd_shutdown(void);
        (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT)
 
 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
-#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
-       (NFSD4_1_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SECURITY_LABEL)
+#define NFSD4_2_SECURITY_ATTRS         FATTR4_WORD2_SECURITY_LABEL
 #else
-#define NFSD4_2_SUPPORTED_ATTRS_WORD2 0
+#define NFSD4_2_SECURITY_ATTRS         0
 #endif
 
+#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
+       (NFSD4_1_SUPPORTED_ATTRS_WORD2 | \
+       NFSD4_2_SECURITY_ATTRS)
+
 static inline u32 nfsd_suppattrs0(u32 minorversion)
 {
        return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0
index e60125976873b23314035580041ab73ef2f147ac..34355818a2e03460759b144f8f9c284d5a810423 100644 (file)
@@ -1,4 +1,4 @@
-config OVERLAYFS_FS
+config OVERLAY_FS
        tristate "Overlay filesystem support"
        help
          An overlay filesystem combines two filesystems - an 'upper' filesystem
index 8f91889480d0515011c3ebe64beef3660ea82b28..900daed3e91d28f1901c78cab4e9c2fdddf50043 100644 (file)
@@ -2,6 +2,6 @@
 # Makefile for the overlay filesystem.
 #
 
-obj-$(CONFIG_OVERLAYFS_FS) += overlayfs.o
+obj-$(CONFIG_OVERLAY_FS) += overlay.o
 
-overlayfs-objs := super.o inode.o dir.o readdir.o copy_up.o
+overlay-objs := super.o inode.o dir.o readdir.o copy_up.o
index 15cd91ad9940db5f130883120cf3b9606efe0705..8ffc4b980f1b68641c17a7bfb67979f205c658b5 100644 (file)
@@ -284,8 +284,7 @@ out:
        return ERR_PTR(err);
 }
 
-static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry,
-                                               enum ovl_path_type type)
+static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry)
 {
        int err;
        struct dentry *ret = NULL;
@@ -294,8 +293,17 @@ static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry,
        err = ovl_check_empty_dir(dentry, &list);
        if (err)
                ret = ERR_PTR(err);
-       else if (type == OVL_PATH_MERGE)
-               ret = ovl_clear_empty(dentry, &list);
+       else {
+               /*
+                * If no upperdentry then skip clearing whiteouts.
+                *
+                * Can race with copy-up, since we don't hold the upperdir
+                * mutex.  Doesn't matter, since copy-up can't create a
+                * non-empty directory from an empty one.
+                */
+               if (ovl_dentry_upper(dentry))
+                       ret = ovl_clear_empty(dentry, &list);
+       }
 
        ovl_cache_free(&list);
 
@@ -487,8 +495,7 @@ out:
        return err;
 }
 
-static int ovl_remove_and_whiteout(struct dentry *dentry,
-                                  enum ovl_path_type type, bool is_dir)
+static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
 {
        struct dentry *workdir = ovl_workdir(dentry);
        struct inode *wdir = workdir->d_inode;
@@ -500,7 +507,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry,
        int err;
 
        if (is_dir) {
-               opaquedir = ovl_check_empty_and_clear(dentry, type);
+               opaquedir = ovl_check_empty_and_clear(dentry);
                err = PTR_ERR(opaquedir);
                if (IS_ERR(opaquedir))
                        goto out;
@@ -515,9 +522,10 @@ static int ovl_remove_and_whiteout(struct dentry *dentry,
        if (IS_ERR(whiteout))
                goto out_unlock;
 
-       if (type == OVL_PATH_LOWER) {
+       upper = ovl_dentry_upper(dentry);
+       if (!upper) {
                upper = lookup_one_len(dentry->d_name.name, upperdir,
-                                          dentry->d_name.len);
+                                      dentry->d_name.len);
                err = PTR_ERR(upper);
                if (IS_ERR(upper))
                        goto kill_whiteout;
@@ -529,7 +537,6 @@ static int ovl_remove_and_whiteout(struct dentry *dentry,
        } else {
                int flags = 0;
 
-               upper = ovl_dentry_upper(dentry);
                if (opaquedir)
                        upper = opaquedir;
                err = -ESTALE;
@@ -648,7 +655,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
                cap_raise(override_cred->cap_effective, CAP_CHOWN);
                old_cred = override_creds(override_cred);
 
-               err = ovl_remove_and_whiteout(dentry, type, is_dir);
+               err = ovl_remove_and_whiteout(dentry, is_dir);
 
                revert_creds(old_cred);
                put_cred(override_cred);
@@ -781,7 +788,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
        }
 
        if (overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) {
-               opaquedir = ovl_check_empty_and_clear(new, new_type);
+               opaquedir = ovl_check_empty_and_clear(new);
                err = PTR_ERR(opaquedir);
                if (IS_ERR(opaquedir)) {
                        opaquedir = NULL;
index af2d18c9fcee19af345aa66c5bcf31407e316f21..07d74b24913bdee757377d103427883fb7d12e70 100644 (file)
@@ -235,26 +235,36 @@ out:
        return err;
 }
 
+static bool ovl_need_xattr_filter(struct dentry *dentry,
+                                 enum ovl_path_type type)
+{
+       return type == OVL_PATH_UPPER && S_ISDIR(dentry->d_inode->i_mode);
+}
+
 ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
                     void *value, size_t size)
 {
-       if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE &&
-           ovl_is_private_xattr(name))
+       struct path realpath;
+       enum ovl_path_type type = ovl_path_real(dentry, &realpath);
+
+       if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
                return -ENODATA;
 
-       return vfs_getxattr(ovl_dentry_real(dentry), name, value, size);
+       return vfs_getxattr(realpath.dentry, name, value, size);
 }
 
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
 {
+       struct path realpath;
+       enum ovl_path_type type = ovl_path_real(dentry, &realpath);
        ssize_t res;
        int off;
 
-       res = vfs_listxattr(ovl_dentry_real(dentry), list, size);
+       res = vfs_listxattr(realpath.dentry, list, size);
        if (res <= 0 || size == 0)
                return res;
 
-       if (ovl_path_type(dentry->d_parent) != OVL_PATH_MERGE)
+       if (!ovl_need_xattr_filter(dentry, type))
                return res;
 
        /* filter out private xattrs */
@@ -279,17 +289,16 @@ int ovl_removexattr(struct dentry *dentry, const char *name)
 {
        int err;
        struct path realpath;
-       enum ovl_path_type type;
+       enum ovl_path_type type = ovl_path_real(dentry, &realpath);
 
        err = ovl_want_write(dentry);
        if (err)
                goto out;
 
-       if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE &&
-           ovl_is_private_xattr(name))
+       err = -ENODATA;
+       if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
                goto out_drop_write;
 
-       type = ovl_path_real(dentry, &realpath);
        if (type == OVL_PATH_LOWER) {
                err = vfs_getxattr(realpath.dentry, name, NULL, 0);
                if (err < 0)
index 2a7ef4f8e2a6c37e033b771413fc66f75b567556..ab1e3dcbed9523d05b3bf4d73a5b685532e0acaa 100644 (file)
@@ -274,11 +274,11 @@ static int ovl_dir_mark_whiteouts(struct dentry *dir,
        return 0;
 }
 
-static inline int ovl_dir_read_merged(struct path *upperpath,
-                                     struct path *lowerpath,
-                                     struct list_head *list)
+static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
 {
        int err;
+       struct path lowerpath;
+       struct path upperpath;
        struct ovl_readdir_data rdd = {
                .ctx.actor = ovl_fill_merge,
                .list = list,
@@ -286,25 +286,28 @@ static inline int ovl_dir_read_merged(struct path *upperpath,
                .is_merge = false,
        };
 
-       if (upperpath->dentry) {
-               err = ovl_dir_read(upperpath, &rdd);
+       ovl_path_lower(dentry, &lowerpath);
+       ovl_path_upper(dentry, &upperpath);
+
+       if (upperpath.dentry) {
+               err = ovl_dir_read(&upperpath, &rdd);
                if (err)
                        goto out;
 
-               if (lowerpath->dentry) {
-                       err = ovl_dir_mark_whiteouts(upperpath->dentry, &rdd);
+               if (lowerpath.dentry) {
+                       err = ovl_dir_mark_whiteouts(upperpath.dentry, &rdd);
                        if (err)
                                goto out;
                }
        }
-       if (lowerpath->dentry) {
+       if (lowerpath.dentry) {
                /*
                 * Insert lowerpath entries before upperpath ones, this allows
                 * offsets to be reasonably constant
                 */
                list_add(&rdd.middle, rdd.list);
                rdd.is_merge = true;
-               err = ovl_dir_read(lowerpath, &rdd);
+               err = ovl_dir_read(&lowerpath, &rdd);
                list_del(&rdd.middle);
        }
 out:
@@ -329,8 +332,6 @@ static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
 static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
 {
        int res;
-       struct path lowerpath;
-       struct path upperpath;
        struct ovl_dir_cache *cache;
 
        cache = ovl_dir_cache(dentry);
@@ -347,10 +348,7 @@ static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
        cache->refcount = 1;
        INIT_LIST_HEAD(&cache->entries);
 
-       ovl_path_lower(dentry, &lowerpath);
-       ovl_path_upper(dentry, &upperpath);
-
-       res = ovl_dir_read_merged(&upperpath, &lowerpath, &cache->entries);
+       res = ovl_dir_read_merged(dentry, &cache->entries);
        if (res) {
                ovl_cache_free(&cache->entries);
                kfree(cache);
@@ -452,10 +450,10 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
        /*
         * Need to check if we started out being a lower dir, but got copied up
         */
-       if (!od->is_upper && ovl_path_type(dentry) == OVL_PATH_MERGE) {
+       if (!od->is_upper && ovl_path_type(dentry) != OVL_PATH_LOWER) {
                struct inode *inode = file_inode(file);
 
-               realfile =lockless_dereference(od->upperfile);
+               realfile = lockless_dereference(od->upperfile);
                if (!realfile) {
                        struct path upperpath;
 
@@ -538,14 +536,9 @@ const struct file_operations ovl_dir_operations = {
 int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
 {
        int err;
-       struct path lowerpath;
-       struct path upperpath;
        struct ovl_cache_entry *p;
 
-       ovl_path_upper(dentry, &upperpath);
-       ovl_path_lower(dentry, &lowerpath);
-
-       err = ovl_dir_read_merged(&upperpath, &lowerpath, list);
+       err = ovl_dir_read_merged(dentry, list);
        if (err)
                return err;
 
index 08b704cebfc4f8819944e09b05f5f7de35659b92..f16d318b71f8bbe4e77f8a3214e616101848e49c 100644 (file)
@@ -24,7 +24,7 @@ MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
 MODULE_DESCRIPTION("Overlay filesystem");
 MODULE_LICENSE("GPL");
 
-#define OVERLAYFS_SUPER_MAGIC 0x794c764f
+#define OVERLAYFS_SUPER_MAGIC 0x794c7630
 
 struct ovl_config {
        char *lowerdir;
@@ -84,12 +84,7 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry)
 
 static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe)
 {
-       struct dentry *upperdentry = ACCESS_ONCE(oe->__upperdentry);
-       /*
-        * Make sure to order reads to upperdentry wrt ovl_dentry_update()
-        */
-       smp_read_barrier_depends();
-       return upperdentry;
+       return lockless_dereference(oe->__upperdentry);
 }
 
 void ovl_path_upper(struct dentry *dentry, struct path *path)
@@ -462,11 +457,34 @@ static const match_table_t ovl_tokens = {
        {OPT_ERR,                       NULL}
 };
 
+static char *ovl_next_opt(char **s)
+{
+       char *sbegin = *s;
+       char *p;
+
+       if (sbegin == NULL)
+               return NULL;
+
+       for (p = sbegin; *p; p++) {
+               if (*p == '\\') {
+                       p++;
+                       if (!*p)
+                               break;
+               } else if (*p == ',') {
+                       *p = '\0';
+                       *s = p + 1;
+                       return sbegin;
+               }
+       }
+       *s = NULL;
+       return sbegin;
+}
+
 static int ovl_parse_opt(char *opt, struct ovl_config *config)
 {
        char *p;
 
-       while ((p = strsep(&opt, ",")) != NULL) {
+       while ((p = ovl_next_opt(&opt)) != NULL) {
                int token;
                substring_t args[MAX_OPT_ARGS];
 
@@ -554,15 +572,34 @@ out_dput:
        goto out_unlock;
 }
 
+static void ovl_unescape(char *s)
+{
+       char *d = s;
+
+       for (;; s++, d++) {
+               if (*s == '\\')
+                       s++;
+               *d = *s;
+               if (!*s)
+                       break;
+       }
+}
+
 static int ovl_mount_dir(const char *name, struct path *path)
 {
        int err;
+       char *tmp = kstrdup(name, GFP_KERNEL);
+
+       if (!tmp)
+               return -ENOMEM;
 
-       err = kern_path(name, LOOKUP_FOLLOW, path);
+       ovl_unescape(tmp);
+       err = kern_path(tmp, LOOKUP_FOLLOW, path);
        if (err) {
-               pr_err("overlayfs: failed to resolve '%s': %i\n", name, err);
+               pr_err("overlayfs: failed to resolve '%s': %i\n", tmp, err);
                err = -EINVAL;
        }
+       kfree(tmp);
        return err;
 }
 
@@ -776,11 +813,11 @@ static struct dentry *ovl_mount(struct file_system_type *fs_type, int flags,
 
 static struct file_system_type ovl_fs_type = {
        .owner          = THIS_MODULE,
-       .name           = "overlayfs",
+       .name           = "overlay",
        .mount          = ovl_mount,
        .kill_sb        = kill_anon_super,
 };
-MODULE_ALIAS_FS("overlayfs");
+MODULE_ALIAS_FS("overlay");
 
 static int __init ovl_init(void)
 {
index a929f86d0dddd52816d6fdfc41905b8fab76da88..d72b5b35f15edd965b89de2c4759bd9bc964f14f 100644 (file)
@@ -60,7 +60,7 @@
 #define ESC1_CLK_SRC                   43
 #define HDMI_CLK_SRC                   44
 #define VSYNC_CLK_SRC                  45
-#define RBCPR_CLK_SRC                  46
+#define MMSS_RBCPR_CLK_SRC             46
 #define RBBMTIMER_CLK_SRC              47
 #define MAPLE_CLK_SRC                  48
 #define VDP_CLK_SRC                    49
index be21af149f119394c68bd8018a8de39f2db067ee..2839c639f0920942d1e835dd464598432e22e974 100644 (file)
@@ -352,7 +352,6 @@ struct clk_divider {
 #define CLK_DIVIDER_READ_ONLY          BIT(5)
 
 extern const struct clk_ops clk_divider_ops;
-extern const struct clk_ops clk_divider_ro_ops;
 struct clk *clk_register_divider(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                void __iomem *reg, u8 shift, u8 width,
index 808dcb8cc04fbeee82f00bab944ff45e8087078a..0a8ce762a47fded2031d83ec3535feaed062806a 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/netdevice.h>
 #include <uapi/linux/if_bridge.h>
+#include <linux/bitops.h>
 
 struct br_ip {
        union {
@@ -32,11 +33,41 @@ struct br_ip_list {
        struct br_ip addr;
 };
 
+#define BR_HAIRPIN_MODE                BIT(0)
+#define BR_BPDU_GUARD          BIT(1)
+#define BR_ROOT_BLOCK          BIT(2)
+#define BR_MULTICAST_FAST_LEAVE        BIT(3)
+#define BR_ADMIN_COST          BIT(4)
+#define BR_LEARNING            BIT(5)
+#define BR_FLOOD               BIT(6)
+#define BR_AUTO_MASK           (BR_FLOOD | BR_LEARNING)
+#define BR_PROMISC             BIT(7)
+#define BR_PROXYARP            BIT(8)
+#define BR_LEARNING_SYNC       BIT(9)
+
 extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
 
 typedef int br_should_route_hook_t(struct sk_buff *skb);
 extern br_should_route_hook_t __rcu *br_should_route_hook;
 
+#if IS_ENABLED(CONFIG_BRIDGE)
+int br_fdb_external_learn_add(struct net_device *dev,
+                             const unsigned char *addr, u16 vid);
+int br_fdb_external_learn_del(struct net_device *dev,
+                             const unsigned char *addr, u16 vid);
+#else
+static inline int br_fdb_external_learn_add(struct net_device *dev,
+                                           const unsigned char *addr, u16 vid)
+{
+       return 0;
+}
+static inline int br_fdb_external_learn_del(struct net_device *dev,
+                                           const unsigned char *addr, u16 vid)
+{
+       return 0;
+}
+#endif
+
 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
 int br_multicast_list_adjacent(struct net_device *dev,
                               struct list_head *br_ip_list);
index 8bbd7bc1043d9c4d26ffff38b35b8540093fdb71..03fa332ad2a8cec4e26c212b9333e56f0c6d6169 100644 (file)
@@ -72,7 +72,7 @@ struct iio_event_data {
 
 #define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
 
-#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF)
+#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F)
 
 #define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
 
index ea53b04993f22745028d402e0238a30c91595afb..a6059bdf7b03baa4955c069637f686b3d709d819 100644 (file)
@@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 
-bool kvm_is_mmio_pfn(pfn_t pfn);
+bool kvm_is_reserved_pfn(pfn_t pfn);
 
 struct kvm_irq_ack_notifier {
        struct hlist_node link;
index 2cb772495f7afe91b8d970c0e3a0b6d1dbb29974..29c92ee9ed56d8a23fc0a587a889ebb60e8354b5 100644 (file)
@@ -754,13 +754,13 @@ struct netdev_fcoe_hbainfo {
 };
 #endif
 
-#define MAX_PHYS_PORT_ID_LEN 32
+#define MAX_PHYS_ITEM_ID_LEN 32
 
-/* This structure holds a unique identifier to identify the
- * physical port used by a netdevice.
+/* This structure holds a unique identifier to identify some
+ * physical item (port for example) used by a netdevice.
  */
-struct netdev_phys_port_id {
-       unsigned char id[MAX_PHYS_PORT_ID_LEN];
+struct netdev_phys_item_id {
+       unsigned char id[MAX_PHYS_ITEM_ID_LEN];
        unsigned char id_len;
 };
 
@@ -951,11 +951,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  *
  * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
  *                   struct net_device *dev,
- *                   const unsigned char *addr, u16 flags)
+ *                   const unsigned char *addr, u16 vid, u16 flags)
  *     Adds an FDB entry to dev for addr.
  * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
  *                   struct net_device *dev,
- *                   const unsigned char *addr)
+ *                   const unsigned char *addr, u16 vid)
  *     Deletes the FDB entry from dev coresponding to addr.
  * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
  *                    struct net_device *dev, struct net_device *filter_dev,
@@ -976,7 +976,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  *     USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
  *
  * int (*ndo_get_phys_port_id)(struct net_device *dev,
- *                            struct netdev_phys_port_id *ppid);
+ *                            struct netdev_phys_item_id *ppid);
  *     Called to get ID of physical port of this device. If driver does
  *     not implement this, it is assumed that the hw is not able to have
  *     multiple net devices on single physical port.
@@ -1018,6 +1018,15 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  *     performing GSO on a packet. The device returns true if it is
  *     able to GSO the packet, false otherwise. If the return value is
  *     false the stack will do software GSO.
+ *
+ * int (*ndo_switch_parent_id_get)(struct net_device *dev,
+ *                                struct netdev_phys_item_id *psid);
+ *     Called to get an ID of the switch chip this port is part of.
+ *     If driver implements this, it indicates that it represents a port
+ *     of a switch chip.
+ * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state);
+ *     Called to notify switch device port of bridge port STP
+ *     state change.
  */
 struct net_device_ops {
        int                     (*ndo_init)(struct net_device *dev);
@@ -1128,11 +1137,13 @@ struct net_device_ops {
                                               struct nlattr *tb[],
                                               struct net_device *dev,
                                               const unsigned char *addr,
+                                              u16 vid,
                                               u16 flags);
        int                     (*ndo_fdb_del)(struct ndmsg *ndm,
                                               struct nlattr *tb[],
                                               struct net_device *dev,
-                                              const unsigned char *addr);
+                                              const unsigned char *addr,
+                                              u16 vid);
        int                     (*ndo_fdb_dump)(struct sk_buff *skb,
                                                struct netlink_callback *cb,
                                                struct net_device *dev,
@@ -1150,7 +1161,7 @@ struct net_device_ops {
        int                     (*ndo_change_carrier)(struct net_device *dev,
                                                      bool new_carrier);
        int                     (*ndo_get_phys_port_id)(struct net_device *dev,
-                                                       struct netdev_phys_port_id *ppid);
+                                                       struct netdev_phys_item_id *ppid);
        void                    (*ndo_add_vxlan_port)(struct  net_device *dev,
                                                      sa_family_t sa_family,
                                                      __be16 port);
@@ -1169,6 +1180,12 @@ struct net_device_ops {
        int                     (*ndo_get_lock_subclass)(struct net_device *dev);
        bool                    (*ndo_gso_check) (struct sk_buff *skb,
                                                  struct net_device *dev);
+#ifdef CONFIG_NET_SWITCHDEV
+       int                     (*ndo_switch_parent_id_get)(struct net_device *dev,
+                                                           struct netdev_phys_item_id *psid);
+       int                     (*ndo_switch_port_stp_update)(struct net_device *dev,
+                                                             u8 state);
+#endif
 };
 
 /**
@@ -2868,7 +2885,7 @@ void dev_set_group(struct net_device *, int);
 int dev_set_mac_address(struct net_device *, struct sockaddr *);
 int dev_change_carrier(struct net_device *, bool new_carrier);
 int dev_get_phys_port_id(struct net_device *dev,
-                        struct netdev_phys_port_id *ppid);
+                        struct netdev_phys_item_id *ppid);
 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                                    struct netdev_queue *txq, int *ret);
index 5be8db45e368b23eb4c0d7b16b92f9db49290998..4c8ac5fcc224e2ab4c6af62cd01e130b2bf7dbb1 100644 (file)
@@ -331,6 +331,7 @@ struct pci_dev {
        unsigned int    is_added:1;
        unsigned int    is_busmaster:1; /* device is busmaster */
        unsigned int    no_msi:1;       /* device may not use msi */
+       unsigned int    no_64bit_msi:1; /* device may only use 32-bit MSIs */
        unsigned int    block_cfg_access:1;     /* config space access is blocked */
        unsigned int    broken_parity_status:1; /* Device generates false positive parity */
        unsigned int    irq_reroute_variant:2;  /* device needs IRQ rerouting variant */
index d5c89e0dd0e6725c614b491c78b5bfafe9cc46f4..51ce60c35f4c69a6df45d1e104d56d5e47798a78 100644 (file)
@@ -133,7 +133,13 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
        /* paired with smp_store_release() in percpu_ref_reinit() */
        smp_read_barrier_depends();
 
-       if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC))
+       /*
+        * Theoretically, the following could test just ATOMIC; however,
+        * then we'd have to mask off DEAD separately as DEAD may be
+        * visible without ATOMIC if we race with percpu_ref_kill().  DEAD
+        * implies ATOMIC anyway.  Test them together.
+        */
+       if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
                return false;
 
        *percpu_countp = (unsigned long __percpu *)percpu_ptr;
index 6cacbce1a06c54700de7017a008163cb0d4ee9b6..3b0419072f88f2f5addb46b265cf7e9b4282537d 100644 (file)
@@ -94,12 +94,15 @@ extern int ndo_dflt_fdb_add(struct ndmsg *ndm,
                            struct nlattr *tb[],
                            struct net_device *dev,
                            const unsigned char *addr,
-                            u16 flags);
+                           u16 vid,
+                           u16 flags);
 extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
                            struct nlattr *tb[],
                            struct net_device *dev,
-                           const unsigned char *addr);
+                           const unsigned char *addr,
+                           u16 vid);
 
 extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-                                  struct net_device *dev, u16 mode);
+                                  struct net_device *dev, u16 mode,
+                                  u32 flags, u32 mask);
 #endif /* __LINUX_RTNETLINK_H */
index 78c299f40baccc13f2f099e188ffb3a5dcdc0c26..7691ad5b47714263e356f2c2c05a7eb006e6c6c4 100644 (file)
@@ -2651,13 +2651,16 @@ static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
 }
 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
                                     struct iovec *iov);
-int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
-                                const struct iovec *from, int from_offset,
-                                int len);
-int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm,
-                          int offset, size_t count);
+static inline int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
+                           struct msghdr *msg)
+{
+       return skb_copy_and_csum_datagram_iovec(skb, hlen, msg->msg_iov);
+}
+int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
+                                struct iov_iter *from, int len);
 int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
                           struct iov_iter *to, int size);
+int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
 void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
 void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
@@ -2682,6 +2685,16 @@ int skb_ensure_writable(struct sk_buff *skb, int write_len);
 int skb_vlan_pop(struct sk_buff *skb);
 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
 
+static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
+{
+       return memcpy_fromiovec(data, msg->msg_iov, len);
+}
+
+static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
+{
+       return memcpy_toiovec(msg->msg_iov, data, len);
+}
+
 struct skb_checksum_ops {
        __wsum (*update)(const void *mem, int len, __wsum wsum);
        __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
index 4282778694006034bccca4ce3fbeba9be29537ec..0d87674fb7758736d9cdbf466fd717c5665f535e 100644 (file)
@@ -103,14 +103,14 @@ struct vsock_transport {
        int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk,
                             struct msghdr *msg, size_t len, int flags);
        int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *,
-                            struct iovec *, size_t len);
+                            struct msghdr *, size_t len);
        bool (*dgram_allow)(u32 cid, u32 port);
 
        /* STREAM. */
        /* TODO: stream_bind() */
-       ssize_t (*stream_dequeue)(struct vsock_sock *, struct iovec *,
+       ssize_t (*stream_dequeue)(struct vsock_sock *, struct msghdr *,
                                  size_t len, int flags);
-       ssize_t (*stream_enqueue)(struct vsock_sock *, struct iovec *,
+       ssize_t (*stream_enqueue)(struct vsock_sock *, struct msghdr *,
                                  size_t len);
        s64 (*stream_has_data)(struct vsock_sock *);
        s64 (*stream_has_space)(struct vsock_sock *);
index 6465bae80a4f8ee451d175b3dc8022340d418709..e339a9513e2963ea9f5bfbf77a2851836d36cddc 100644 (file)
@@ -151,4 +151,20 @@ static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
                                 (__force __be32)to, pseudohdr);
 }
 
+static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
+                                   int start, int offset)
+{
+       __sum16 *psum = (__sum16 *)(ptr + offset);
+       __wsum delta;
+
+       /* Subtract out checksum up to start */
+       csum = csum_sub(csum, csum_partial(ptr, start, 0));
+
+       /* Set derived checksum in packet */
+       delta = csum_sub(csum_fold(csum), *psum);
+       *psum = csum_fold(csum);
+
+       return delta;
+}
+
 #endif
index fe7994c48b75685174134e7817bbb20ef375f890..b2828a06a5a63355f1aa2be27e74a472cc99fc62 100644 (file)
@@ -37,6 +37,8 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int inet_ctl_sock_create(struct sock **sk, unsigned short family,
                         unsigned short type, unsigned char protocol,
                         struct net *net);
+int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
+                   int *addr_len);
 
 static inline void inet_ctl_sock_destroy(struct sock *sk)
 {
index 320f47b64a7a234794a3e7f6ac3f5795eadcfd77..e5cff6811b302edf4eea7d89cf94b073c2a8cd49 100644 (file)
@@ -150,7 +150,7 @@ int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
                     unsigned char *node);
 void ipxrtr_del_routes(struct ipx_interface *intrfc);
 int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
-                       struct iovec *iov, size_t len, int noblock);
+                       struct msghdr *msg, size_t len, int noblock);
 int ipxrtr_route_skb(struct sk_buff *skb);
 struct ipx_route *ipxrtr_lookup(__be32 net);
 int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
index 72a31db47ded2837f967247cc17840aed90197ab..487ef34bbd63ff1cfe511c7ee8b1501593a14de3 100644 (file)
@@ -219,7 +219,7 @@ struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *,
                                      const struct sctp_chunk *,
                                      __u32 tsn);
 struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *,
-                                       const struct msghdr *, size_t msg_len);
+                                       struct msghdr *, size_t msg_len);
 struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *,
                                   const struct sctp_chunk *,
                                   const __u8 *,
index 806e3b5b33510dab4900dc66816a07bca8a51c26..2bb2fcf5b11f0387c81b860ad2d3a6607da19a7d 100644 (file)
@@ -531,7 +531,7 @@ struct sctp_datamsg {
 
 struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
                                            struct sctp_sndrcvinfo *,
-                                           struct msghdr *, int len);
+                                           struct iov_iter *);
 void sctp_datamsg_free(struct sctp_datamsg *);
 void sctp_datamsg_put(struct sctp_datamsg *);
 void sctp_chunk_fail(struct sctp_chunk *, int error);
@@ -647,8 +647,8 @@ struct sctp_chunk {
 
 void sctp_chunk_hold(struct sctp_chunk *);
 void sctp_chunk_put(struct sctp_chunk *);
-int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
-                         struct iovec *data);
+int sctp_user_addto_chunk(struct sctp_chunk *chunk, int len,
+                         struct iov_iter *from);
 void sctp_chunk_free(struct sctp_chunk *);
 void  *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
 struct sctp_chunk *sctp_chunkify(struct sk_buff *,
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
new file mode 100644 (file)
index 0000000..8a6d164
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * include/net/switchdev.h - Switch device API
+ * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _LINUX_SWITCHDEV_H_
+#define _LINUX_SWITCHDEV_H_
+
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_NET_SWITCHDEV
+
+int netdev_switch_parent_id_get(struct net_device *dev,
+                               struct netdev_phys_item_id *psid);
+int netdev_switch_port_stp_update(struct net_device *dev, u8 state);
+
+#else
+
+static inline int netdev_switch_parent_id_get(struct net_device *dev,
+                                             struct netdev_phys_item_id *psid)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int netdev_switch_port_stp_update(struct net_device *dev,
+                                               u8 state)
+{
+       return -EOPNOTSUPP;
+}
+
+#endif
+
+#endif /* _LINUX_SWITCHDEV_H_ */
index e862497f75568d11cd4deb4f5f5a06712f63d6de..8bb00a27e219e902c9bdc2ac52f0a7c3ed53f005 100644 (file)
@@ -184,6 +184,8 @@ struct snd_pcm_ops {
 #define SNDRV_PCM_FMTBIT_DSD_U8                _SNDRV_PCM_FMTBIT(DSD_U8)
 #define SNDRV_PCM_FMTBIT_DSD_U16_LE    _SNDRV_PCM_FMTBIT(DSD_U16_LE)
 #define SNDRV_PCM_FMTBIT_DSD_U32_LE    _SNDRV_PCM_FMTBIT(DSD_U32_LE)
+#define SNDRV_PCM_FMTBIT_DSD_U16_BE    _SNDRV_PCM_FMTBIT(DSD_U16_BE)
+#define SNDRV_PCM_FMTBIT_DSD_U32_BE    _SNDRV_PCM_FMTBIT(DSD_U32_BE)
 
 #ifdef SNDRV_LITTLE_ENDIAN
 #define SNDRV_PCM_FMTBIT_S16           SNDRV_PCM_FMTBIT_S16_LE
index da17e456908d2d16d1500499858296a042b3186c..296a556454e344b2cad71bc508b1fd047ebdf380 100644 (file)
@@ -105,6 +105,7 @@ struct __fdb_entry {
 
 #define BRIDGE_MODE_VEB                0       /* Default loopback mode */
 #define BRIDGE_MODE_VEPA       1       /* 802.1Qbg defined VEPA mode */
+#define BRIDGE_MODE_SWDEV      2       /* Full switch device offload */
 
 /* Bridge management nested attributes
  * [IFLA_AF_SPEC] = {
index 36bddc233633be972e152f3b5c0bd5a1c74387fb..f7d0d2d7173aea9840ffe2a60d43021a9fed742a 100644 (file)
@@ -145,6 +145,7 @@ enum {
        IFLA_CARRIER,
        IFLA_PHYS_PORT_ID,
        IFLA_CARRIER_CHANGES,
+       IFLA_PHYS_SWITCH_ID,
        __IFLA_MAX
 };
 
@@ -244,6 +245,7 @@ enum {
        IFLA_BRPORT_LEARNING,   /* mac learning */
        IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */
        IFLA_BRPORT_PROXYARP,   /* proxy ARP */
+       IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */
        __IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
index 4a1d7e96dfe3d7ccf8669428a943ed53af37828e..f3d77f9f1e0bb582b1f1ca02fd8bd99ff60729dd 100644 (file)
@@ -35,11 +35,11 @@ enum {
  */
 
 #define NTF_USE                0x01
-#define NTF_PROXY      0x08    /* == ATF_PUBL */
-#define NTF_ROUTER     0x80
-
 #define NTF_SELF       0x02
 #define NTF_MASTER     0x04
+#define NTF_PROXY      0x08    /* == ATF_PUBL */
+#define NTF_EXT_LEARNED        0x10
+#define NTF_ROUTER     0x80
 
 /*
  *     Neighbor Cache Entry States.
index 6ee586728df97a0fc335a3c314e0828ac970023f..941d32f007dc250afb73a8045044d68b2646eecb 100644 (file)
@@ -220,7 +220,9 @@ typedef int __bitwise snd_pcm_format_t;
 #define        SNDRV_PCM_FORMAT_DSD_U8         ((__force snd_pcm_format_t) 48) /* DSD, 1-byte samples DSD (x8) */
 #define        SNDRV_PCM_FORMAT_DSD_U16_LE     ((__force snd_pcm_format_t) 49) /* DSD, 2-byte samples DSD (x16), little endian */
 #define        SNDRV_PCM_FORMAT_DSD_U32_LE     ((__force snd_pcm_format_t) 50) /* DSD, 4-byte samples DSD (x32), little endian */
-#define        SNDRV_PCM_FORMAT_LAST           SNDRV_PCM_FORMAT_DSD_U32_LE
+#define        SNDRV_PCM_FORMAT_DSD_U16_BE     ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */
+#define        SNDRV_PCM_FORMAT_DSD_U32_BE     ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */
+#define        SNDRV_PCM_FORMAT_LAST           SNDRV_PCM_FORMAT_DSD_U32_BE
 
 #ifdef SNDRV_LITTLE_ENDIAN
 #define        SNDRV_PCM_FORMAT_S16            SNDRV_PCM_FORMAT_S16_LE
index 1d0af8a2c6469bda46438dbd8383cbf535d65077..ed8f2cde34c57acce554ee08d79625c3b260ada8 100644 (file)
@@ -1640,7 +1640,6 @@ bool uprobe_deny_signal(void)
                if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
                        utask->state = UTASK_SSTEP_TRAPPED;
                        set_tsk_thread_flag(t, TIF_UPROBE);
-                       set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
                }
        }
 
index 67ad4e7f506a2509a0493138662c0fec7dc4d7fa..c65dac8c97cdd5dd72f636455dcecc2742d9706d 100644 (file)
@@ -75,6 +75,10 @@ static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task
        return 0;
 }
 
+static void update_curr_idle(struct rq *rq)
+{
+}
+
 /*
  * Simple, special scheduling class for the per-CPU idle tasks:
  */
@@ -101,4 +105,5 @@ const struct sched_class idle_sched_class = {
 
        .prio_changed           = prio_changed_idle,
        .switched_to            = switched_to_idle,
+       .update_curr            = update_curr_idle,
 };
index 67426e529f59c044eef35c88c8d906cab641bb64..79ffec45a6acd9415d31d90906b529a6bc0d752b 100644 (file)
@@ -102,6 +102,10 @@ get_rr_interval_stop(struct rq *rq, struct task_struct *task)
        return 0;
 }
 
+static void update_curr_stop(struct rq *rq)
+{
+}
+
 /*
  * Simple, special scheduling class for the per-CPU stop tasks:
  */
@@ -128,4 +132,5 @@ const struct sched_class stop_sched_class = {
 
        .prio_changed           = prio_changed_stop,
        .switched_to            = switched_to_stop,
+       .update_curr            = update_curr_stop,
 };
index 99815b5454bf4d68fdc36153951e434f7219b4d0..ff9ffc17fa0e1fc438e9e4ebec20376ddd6ec969 100644 (file)
@@ -228,6 +228,7 @@ source "net/vmw_vsock/Kconfig"
 source "net/netlink/Kconfig"
 source "net/mpls/Kconfig"
 source "net/hsr/Kconfig"
+source "net/switchdev/Kconfig"
 
 config RPS
        boolean
index 7ed1970074b07bfcf0803f468f62efba7a9018ac..95fc694e4ddc088ca419a745a8567f700a1b535a 100644 (file)
@@ -73,3 +73,6 @@ obj-$(CONFIG_OPENVSWITCH)     += openvswitch/
 obj-$(CONFIG_VSOCKETS) += vmw_vsock/
 obj-$(CONFIG_NET_MPLS_GSO)     += mpls/
 obj-$(CONFIG_HSR)              += hsr/
+ifneq ($(CONFIG_NET_SWITCHDEV),)
+obj-y                          += switchdev/
+endif
index 425942db17f64820248d1d830b9ecdd12ebdf8e6..0d0766ea5ab104c5bba47f3b8ca32b1858e9c94d 100644 (file)
@@ -1659,7 +1659,7 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
 
        SOCK_DEBUG(sk, "SK %p: Copy user data (%Zd bytes).\n", sk, len);
 
-       err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+       err = memcpy_from_msg(skb_put(skb, len), msg, len);
        if (err) {
                kfree_skb(skb);
                err = -EFAULT;
index 9cd1ccae9a11914a2c082ad2fdab1c7898f55707..f59112944c917e845c919b90449098aa7a3a1a7e 100644 (file)
@@ -570,15 +570,16 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
 }
 
 int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
-               size_t total_len)
+               size_t size)
 {
        struct sock *sk = sock->sk;
        DEFINE_WAIT(wait);
        struct atm_vcc *vcc;
        struct sk_buff *skb;
        int eff, error;
-       const void __user *buff;
-       int size;
+       struct iov_iter from;
+
+       iov_iter_init(&from, WRITE, m->msg_iov, m->msg_iovlen, size);
 
        lock_sock(sk);
        if (sock->state != SS_CONNECTED) {
@@ -589,12 +590,6 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
                error = -EISCONN;
                goto out;
        }
-       if (m->msg_iovlen != 1) {
-               error = -ENOSYS; /* fix this later @@@ */
-               goto out;
-       }
-       buff = m->msg_iov->iov_base;
-       size = m->msg_iov->iov_len;
        vcc = ATM_SD(sock);
        if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
            test_bit(ATM_VF_CLOSE, &vcc->flags) ||
@@ -607,7 +602,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
                error = 0;
                goto out;
        }
-       if (size < 0 || size > vcc->qos.txtp.max_sdu) {
+       if (size > vcc->qos.txtp.max_sdu) {
                error = -EMSGSIZE;
                goto out;
        }
@@ -639,7 +634,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
                goto out;
        skb->dev = NULL; /* for paths shared with net_device interfaces */
        ATM_SKB(skb)->atm_options = vcc->atm_options;
-       if (copy_from_user(skb_put(skb, size), buff, size)) {
+       if (copy_from_iter(skb_put(skb, size), size, &from) != size) {
                kfree_skb(skb);
                error = -EFAULT;
                goto out;
index f4f835e1937859da05ed48b18e813985727ce67a..ca049a7c9287d703f789b5842472a768152dd7ca 100644 (file)
@@ -1549,7 +1549,7 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
        skb_reserve(skb, size - len);
 
        /* User data follows immediately after the AX.25 data */
-       if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+       if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
                err = -EFAULT;
                kfree_skb(skb);
                goto out;
index 5e2cd25359781784018890be26b8196ff801dd55..2c245fdf319a60022328e8f3918251db526f88f8 100644 (file)
@@ -947,7 +947,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
        if (!skb)
                goto done;
 
-       if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+       if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
                err = -EFAULT;
                goto drop;
        }
index cbeef5f62f3b25380fbde84c853bca2b30aab820..f3e4a16fb1570e665a5c4177ffa69de10df4dbbe 100644 (file)
@@ -5767,7 +5767,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
        if (!buf)
                return -ENOMEM;
 
-       if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
+       if (memcpy_from_msg(buf, msg, msglen)) {
                err = -EFAULT;
                goto done;
        }
index 8bbbb5ec468c373f86c6caafe9187f3684775915..2348176401a0b19ad3b5e9129999381f43da50a0 100644 (file)
@@ -588,7 +588,7 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
                }
                skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE);
 
-               err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+               err = memcpy_from_msg(skb_put(skb, size), msg, size);
                if (err) {
                        kfree_skb(skb);
                        if (sent == 0)
index 7ee9e4ab00f882f827f7b63361dc82a24ca72a23..30e5ea3f1ad311388bf7f95aec8f70cb88e8fab9 100644 (file)
@@ -285,7 +285,7 @@ static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
        if (!skb)
                return err;
 
-       if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+       if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
                kfree_skb(skb);
                return -EFAULT;
        }
index 6f6c95cfe8f20958ce0d14522526a478549264ef..cc36e59db7d75203d89017e8d458b72370df33da 100644 (file)
@@ -90,7 +90,7 @@ static void fdb_rcu_free(struct rcu_head *head)
  * are then updated with the new information.
  * Called under RTNL.
  */
-static void fdb_add_hw(struct net_bridge *br, const unsigned char *addr)
+static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr)
 {
        int err;
        struct net_bridge_port *p;
@@ -118,7 +118,7 @@ undo:
  * the ports with needed information.
  * Called under RTNL.
  */
-static void fdb_del_hw(struct net_bridge *br, const unsigned char *addr)
+static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
 {
        struct net_bridge_port *p;
 
@@ -133,7 +133,7 @@ static void fdb_del_hw(struct net_bridge *br, const unsigned char *addr)
 static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
 {
        if (f->is_static)
-               fdb_del_hw(br, f->addr.addr);
+               fdb_del_hw_addr(br, f->addr.addr);
 
        hlist_del_rcu(&f->hlist);
        fdb_notify(br, f, RTM_DELNEIGH);
@@ -481,6 +481,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
                fdb->is_local = 0;
                fdb->is_static = 0;
                fdb->added_by_user = 0;
+               fdb->added_by_external_learn = 0;
                fdb->updated = fdb->used = jiffies;
                hlist_add_head_rcu(&fdb->hlist, head);
        }
@@ -514,7 +515,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                return -ENOMEM;
 
        fdb->is_local = fdb->is_static = 1;
-       fdb_add_hw(br, addr);
+       fdb_add_hw_addr(br, addr);
        fdb_notify(br, fdb, RTM_NEWNEIGH);
        return 0;
 }
@@ -613,7 +614,7 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
        ndm->ndm_family  = AF_BRIDGE;
        ndm->ndm_pad1    = 0;
        ndm->ndm_pad2    = 0;
-       ndm->ndm_flags   = 0;
+       ndm->ndm_flags   = fdb->added_by_external_learn ? NTF_EXT_LEARNED : 0;
        ndm->ndm_type    = 0;
        ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
        ndm->ndm_state   = fdb_to_nud(fdb);
@@ -754,19 +755,19 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
                        fdb->is_local = 1;
                        if (!fdb->is_static) {
                                fdb->is_static = 1;
-                               fdb_add_hw(br, addr);
+                               fdb_add_hw_addr(br, addr);
                        }
                } else if (state & NUD_NOARP) {
                        fdb->is_local = 0;
                        if (!fdb->is_static) {
                                fdb->is_static = 1;
-                               fdb_add_hw(br, addr);
+                               fdb_add_hw_addr(br, addr);
                        }
                } else {
                        fdb->is_local = 0;
                        if (fdb->is_static) {
                                fdb->is_static = 0;
-                               fdb_del_hw(br, addr);
+                               fdb_del_hw_addr(br, addr);
                        }
                }
 
@@ -805,33 +806,17 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
 /* Add new permanent fdb entry with RTM_NEWNEIGH */
 int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
               struct net_device *dev,
-              const unsigned char *addr, u16 nlh_flags)
+              const unsigned char *addr, u16 vid, u16 nlh_flags)
 {
        struct net_bridge_port *p;
        int err = 0;
        struct net_port_vlans *pv;
-       unsigned short vid = VLAN_N_VID;
 
        if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
                pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
                return -EINVAL;
        }
 
-       if (tb[NDA_VLAN]) {
-               if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
-                       pr_info("bridge: RTM_NEWNEIGH with invalid vlan\n");
-                       return -EINVAL;
-               }
-
-               vid = nla_get_u16(tb[NDA_VLAN]);
-
-               if (!vid || vid >= VLAN_VID_MASK) {
-                       pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
-                               vid);
-                       return -EINVAL;
-               }
-       }
-
        if (is_zero_ether_addr(addr)) {
                pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
                return -EINVAL;
@@ -845,7 +830,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        }
 
        pv = nbp_get_vlan_info(p);
-       if (vid != VLAN_N_VID) {
+       if (vid) {
                if (!pv || !test_bit(vid, pv->vlan_bitmap)) {
                        pr_info("bridge: RTM_NEWNEIGH with unconfigured "
                                "vlan %d on port %s\n", vid, dev->name);
@@ -903,27 +888,12 @@ static int __br_fdb_delete(struct net_bridge_port *p,
 /* Remove neighbor entry with RTM_DELNEIGH */
 int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
                  struct net_device *dev,
-                 const unsigned char *addr)
+                 const unsigned char *addr, u16 vid)
 {
        struct net_bridge_port *p;
        int err;
        struct net_port_vlans *pv;
-       unsigned short vid = VLAN_N_VID;
 
-       if (tb[NDA_VLAN]) {
-               if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
-                       pr_info("bridge: RTM_NEWNEIGH with invalid vlan\n");
-                       return -EINVAL;
-               }
-
-               vid = nla_get_u16(tb[NDA_VLAN]);
-
-               if (!vid || vid >= VLAN_VID_MASK) {
-                       pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
-                               vid);
-                       return -EINVAL;
-               }
-       }
        p = br_port_get_rtnl(dev);
        if (p == NULL) {
                pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
@@ -932,7 +902,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
        }
 
        pv = nbp_get_vlan_info(p);
-       if (vid != VLAN_N_VID) {
+       if (vid) {
                if (!pv || !test_bit(vid, pv->vlan_bitmap)) {
                        pr_info("bridge: RTM_DELNEIGH with unconfigured "
                                "vlan %d on port %s\n", vid, dev->name);
@@ -1014,3 +984,91 @@ void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
                }
        }
 }
+
+int br_fdb_external_learn_add(struct net_device *dev,
+                             const unsigned char *addr, u16 vid)
+{
+       struct net_bridge_port *p;
+       struct net_bridge *br;
+       struct hlist_head *head;
+       struct net_bridge_fdb_entry *fdb;
+       int err = 0;
+
+       rtnl_lock();
+
+       p = br_port_get_rtnl(dev);
+       if (!p) {
+               pr_info("bridge: %s not a bridge port\n", dev->name);
+               err = -EINVAL;
+               goto err_rtnl_unlock;
+       }
+
+       br = p->br;
+
+       spin_lock_bh(&br->hash_lock);
+
+       head = &br->hash[br_mac_hash(addr, vid)];
+       fdb = fdb_find(head, addr, vid);
+       if (!fdb) {
+               fdb = fdb_create(head, p, addr, vid);
+               if (!fdb) {
+                       err = -ENOMEM;
+                       goto err_unlock;
+               }
+               fdb->added_by_external_learn = 1;
+               fdb_notify(br, fdb, RTM_NEWNEIGH);
+       } else if (fdb->added_by_external_learn) {
+               /* Refresh entry */
+               fdb->updated = fdb->used = jiffies;
+       } else if (!fdb->added_by_user) {
+               /* Take over SW learned entry */
+               fdb->added_by_external_learn = 1;
+               fdb->updated = jiffies;
+               fdb_notify(br, fdb, RTM_NEWNEIGH);
+       }
+
+err_unlock:
+       spin_unlock_bh(&br->hash_lock);
+err_rtnl_unlock:
+       rtnl_unlock();
+
+       return err;
+}
+EXPORT_SYMBOL(br_fdb_external_learn_add);
+
+int br_fdb_external_learn_del(struct net_device *dev,
+                             const unsigned char *addr, u16 vid)
+{
+       struct net_bridge_port *p;
+       struct net_bridge *br;
+       struct hlist_head *head;
+       struct net_bridge_fdb_entry *fdb;
+       int err = 0;
+
+       rtnl_lock();
+
+       p = br_port_get_rtnl(dev);
+       if (!p) {
+               pr_info("bridge: %s not a bridge port\n", dev->name);
+               err = -EINVAL;
+               goto err_rtnl_unlock;
+       }
+
+       br = p->br;
+
+       spin_lock_bh(&br->hash_lock);
+
+       head = &br->hash[br_mac_hash(addr, vid)];
+       fdb = fdb_find(head, addr, vid);
+       if (fdb && fdb->added_by_external_learn)
+               fdb_delete(br, fdb);
+       else
+               err = -ENOENT;
+
+       spin_unlock_bh(&br->hash_lock);
+err_rtnl_unlock:
+       rtnl_unlock();
+
+       return err;
+}
+EXPORT_SYMBOL(br_fdb_external_learn_del);
index 86c239b06f6e136f571ca0e74ccf3f9065ed095a..9f5eb55a4d3a7e4b51cc3fa3fa0783a52e6f98fa 100644 (file)
@@ -281,6 +281,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
        [IFLA_BRPORT_MODE]      = { .type = NLA_U8 },
        [IFLA_BRPORT_GUARD]     = { .type = NLA_U8 },
        [IFLA_BRPORT_PROTECT]   = { .type = NLA_U8 },
+       [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
        [IFLA_BRPORT_LEARNING]  = { .type = NLA_U8 },
        [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
 };
index 8f3f081402586a3dddee01adc838b77f514db904..aea3d1339b3f3d248a9d5517725dbe3732c03d8e 100644 (file)
@@ -98,9 +98,10 @@ struct net_bridge_fdb_entry
        unsigned long                   updated;
        unsigned long                   used;
        mac_addr                        addr;
-       unsigned char                   is_local;
-       unsigned char                   is_static;
-       unsigned char                   added_by_user;
+       unsigned char                   is_local:1,
+                                       is_static:1,
+                                       added_by_user:1,
+                                       added_by_external_learn:1;
        __u16                           vlan_id;
 };
 
@@ -163,16 +164,6 @@ struct net_bridge_port
        struct rcu_head                 rcu;
 
        unsigned long                   flags;
-#define BR_HAIRPIN_MODE                0x00000001
-#define BR_BPDU_GUARD           0x00000002
-#define BR_ROOT_BLOCK          0x00000004
-#define BR_MULTICAST_FAST_LEAVE        0x00000008
-#define BR_ADMIN_COST          0x00000010
-#define BR_LEARNING            0x00000020
-#define BR_FLOOD               0x00000040
-#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING)
-#define BR_PROMISC             0x00000080
-#define BR_PROXYARP            0x00000100
 
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
        struct bridge_mcast_own_query   ip4_own_query;
@@ -404,9 +395,9 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
                   const unsigned char *addr, u16 vid, bool added_by_user);
 
 int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
-                 struct net_device *dev, const unsigned char *addr);
+                 struct net_device *dev, const unsigned char *addr, u16 vid);
 int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
-              const unsigned char *addr, u16 nlh_flags);
+              const unsigned char *addr, u16 vid, u16 nlh_flags);
 int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                struct net_device *dev, struct net_device *fdev, int idx);
 int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
index 2b047bcf42a4eb1c450c0283af9bea2676052836..fb3ebe6155134b4532ad4c85b2fea2260f0d6140 100644 (file)
@@ -12,6 +12,7 @@
  */
 #include <linux/kernel.h>
 #include <linux/rculist.h>
+#include <net/switchdev.h>
 
 #include "br_private.h"
 #include "br_private_stp.h"
@@ -38,7 +39,13 @@ void br_log_state(const struct net_bridge_port *p)
 
 void br_set_state(struct net_bridge_port *p, unsigned int state)
 {
+       int err;
+
        p->state = state;
+       err = netdev_switch_port_stp_update(p->dev, state);
+       if (err && err != -EOPNOTSUPP)
+               br_warn(p->br, "error setting offload STP state on port %u(%s)\n",
+                               (unsigned int) p->port_no, p->dev->name);
 }
 
 /* called under bridge lock */
index fbcd156099fbbaea571f694883e4b69c8fa5f979..ac618b0b8a4f52933a238b83234fc09c1187b310 100644 (file)
@@ -418,7 +418,7 @@ unlock:
                }
                release_sock(sk);
                chunk = min_t(unsigned int, skb->len, size);
-               if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
+               if (memcpy_to_msg(msg, skb->data, chunk)) {
                        skb_queue_head(&sk->sk_receive_queue, skb);
                        if (copied == 0)
                                copied = -EFAULT;
@@ -566,7 +566,7 @@ static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
 
        skb_reserve(skb, cf_sk->headroom);
 
-       ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+       ret = memcpy_from_msg(skb_put(skb, len), msg, len);
 
        if (ret)
                goto err;
@@ -641,7 +641,7 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
                 */
                size = min_t(int, size, skb_tailroom(skb));
 
-               err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+               err = memcpy_from_msg(skb_put(skb, size), msg, size);
                if (err) {
                        kfree_skb(skb);
                        goto out_err;
index dcb75c0e66c1b69979a88c65efe43084046f8bc2..01671187e3fe4a862000f6cd463463f8b4042561 100644 (file)
@@ -858,8 +858,7 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
 
                /* update can_frames content */
                for (i = 0; i < msg_head->nframes; i++) {
-                       err = memcpy_fromiovec((u8 *)&op->frames[i],
-                                              msg->msg_iov, CFSIZ);
+                       err = memcpy_from_msg((u8 *)&op->frames[i], msg, CFSIZ);
 
                        if (op->frames[i].can_dlc > 8)
                                err = -EINVAL;
@@ -894,8 +893,7 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                        op->frames = &op->sframe;
 
                for (i = 0; i < msg_head->nframes; i++) {
-                       err = memcpy_fromiovec((u8 *)&op->frames[i],
-                                              msg->msg_iov, CFSIZ);
+                       err = memcpy_from_msg((u8 *)&op->frames[i], msg, CFSIZ);
 
                        if (op->frames[i].can_dlc > 8)
                                err = -EINVAL;
@@ -1024,9 +1022,8 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
 
                if (msg_head->nframes) {
                        /* update can_frames content */
-                       err = memcpy_fromiovec((u8 *)op->frames,
-                                              msg->msg_iov,
-                                              msg_head->nframes * CFSIZ);
+                       err = memcpy_from_msg((u8 *)op->frames, msg,
+                                             msg_head->nframes * CFSIZ);
                        if (err < 0)
                                return err;
 
@@ -1072,8 +1069,8 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                }
 
                if (msg_head->nframes) {
-                       err = memcpy_fromiovec((u8 *)op->frames, msg->msg_iov,
-                                              msg_head->nframes * CFSIZ);
+                       err = memcpy_from_msg((u8 *)op->frames, msg,
+                                             msg_head->nframes * CFSIZ);
                        if (err < 0) {
                                if (op->frames != &op->sframe)
                                        kfree(op->frames);
@@ -1209,7 +1206,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
 
        can_skb_reserve(skb);
 
-       err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ);
+       err = memcpy_from_msg(skb_put(skb, CFSIZ), msg, CFSIZ);
        if (err < 0) {
                kfree_skb(skb);
                return err;
@@ -1285,7 +1282,7 @@ static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
 
        /* read message head information */
 
-       ret = memcpy_fromiovec((u8 *)&msg_head, msg->msg_iov, MHSIZ);
+       ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
        if (ret < 0)
                return ret;
 
@@ -1558,7 +1555,7 @@ static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
        if (skb->len < size)
                size = skb->len;
 
-       err = memcpy_toiovec(msg->msg_iov, skb->data, size);
+       err = memcpy_to_msg(msg, skb->data, size);
        if (err < 0) {
                skb_free_datagram(sk, skb);
                return err;
index 081e81fd017fa53f7a6ed3afd341601b43377531..dfdcffbb1070651f9d813bcc0b075de511479c35 100644 (file)
@@ -703,7 +703,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
        can_skb_reserve(skb);
        can_skb_prv(skb)->ifindex = dev->ifindex;
 
-       err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+       err = memcpy_from_msg(skb_put(skb, size), msg, size);
        if (err < 0)
                goto free_skb;
 
@@ -750,7 +750,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
        else
                size = skb->len;
 
-       err = memcpy_toiovec(msg->msg_iov, skb->data, size);
+       err = memcpy_to_msg(msg, skb->data, size);
        if (err < 0) {
                skb_free_datagram(sk, skb);
                return err;
index 26391a3fe3e5b384cd8b87ba5d41d9f16eaf42c4..b6e303b0f01fa818e507ef442c87fc4c5803dbd5 100644 (file)
@@ -480,18 +480,16 @@ short_copy:
 EXPORT_SYMBOL(skb_copy_datagram_iter);
 
 /**
- *     skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
+ *     skb_copy_datagram_from_iter - Copy a datagram from an iov_iter.
  *     @skb: buffer to copy
  *     @offset: offset in the buffer to start copying to
- *     @from: io vector to copy to
- *     @from_offset: offset in the io vector to start copying from
+ *     @from: the copy source
  *     @len: amount of data to copy to buffer from iovec
  *
  *     Returns 0 or -EFAULT.
- *     Note: the iovec is not modified during the copy.
  */
-int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
-                                const struct iovec *from, int from_offset,
+int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
+                                struct iov_iter *from,
                                 int len)
 {
        int start = skb_headlen(skb);
@@ -502,13 +500,11 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
        if (copy > 0) {
                if (copy > len)
                        copy = len;
-               if (memcpy_fromiovecend(skb->data + offset, from, from_offset,
-                                       copy))
+               if (copy_from_iter(skb->data + offset, copy, from) != copy)
                        goto fault;
                if ((len -= copy) == 0)
                        return 0;
                offset += copy;
-               from_offset += copy;
        }
 
        /* Copy paged appendix. Hmm... why does this look so complicated? */
@@ -520,24 +516,19 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
 
                end = start + skb_frag_size(frag);
                if ((copy = end - offset) > 0) {
-                       int err;
-                       u8  *vaddr;
-                       struct page *page = skb_frag_page(frag);
+                       size_t copied;
 
                        if (copy > len)
                                copy = len;
-                       vaddr = kmap(page);
-                       err = memcpy_fromiovecend(vaddr + frag->page_offset +
-                                                 offset - start,
-                                                 from, from_offset, copy);
-                       kunmap(page);
-                       if (err)
+                       copied = copy_page_from_iter(skb_frag_page(frag),
+                                         frag->page_offset + offset - start,
+                                         copy, from);
+                       if (copied != copy)
                                goto fault;
 
                        if (!(len -= copy))
                                return 0;
                        offset += copy;
-                       from_offset += copy;
                }
                start = end;
        }
@@ -551,16 +542,13 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
                if ((copy = end - offset) > 0) {
                        if (copy > len)
                                copy = len;
-                       if (skb_copy_datagram_from_iovec(frag_iter,
-                                                        offset - start,
-                                                        from,
-                                                        from_offset,
-                                                        copy))
+                       if (skb_copy_datagram_from_iter(frag_iter,
+                                                       offset - start,
+                                                       from, copy))
                                goto fault;
                        if ((len -= copy) == 0)
                                return 0;
                        offset += copy;
-                       from_offset += copy;
                }
                start = end;
        }
@@ -570,78 +558,61 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
 fault:
        return -EFAULT;
 }
-EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
+EXPORT_SYMBOL(skb_copy_datagram_from_iter);
 
 /**
- *     zerocopy_sg_from_iovec - Build a zerocopy datagram from an iovec
+ *     zerocopy_sg_from_iter - Build a zerocopy datagram from an iov_iter
  *     @skb: buffer to copy
- *     @from: io vector to copy from
- *     @offset: offset in the io vector to start copying from
- *     @count: amount of vectors to copy to buffer from
+ *     @from: the source to copy from
  *
  *     The function will first copy up to headlen, and then pin the userspace
  *     pages and build frags through them.
  *
  *     Returns 0, -EFAULT or -EMSGSIZE.
- *     Note: the iovec is not modified during the copy
  */
-int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
-                                 int offset, size_t count)
+int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
 {
-       int len = iov_length(from, count) - offset;
+       int len = iov_iter_count(from);
        int copy = min_t(int, skb_headlen(skb), len);
-       int size;
-       int i = 0;
+       int frag = 0;
 
        /* copy up to skb headlen */
-       if (skb_copy_datagram_from_iovec(skb, 0, from, offset, copy))
+       if (skb_copy_datagram_from_iter(skb, 0, from, copy))
                return -EFAULT;
 
-       if (len == copy)
-               return 0;
-
-       offset += copy;
-       while (count--) {
-               struct page *page[MAX_SKB_FRAGS];
-               int num_pages;
-               unsigned long base;
+       while (iov_iter_count(from)) {
+               struct page *pages[MAX_SKB_FRAGS];
+               size_t start;
+               ssize_t copied;
                unsigned long truesize;
+               int n = 0;
 
-               /* Skip over from offset and copied */
-               if (offset >= from->iov_len) {
-                       offset -= from->iov_len;
-                       ++from;
-                       continue;
-               }
-               len = from->iov_len - offset;
-               base = (unsigned long)from->iov_base + offset;
-               size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
-               if (i + size > MAX_SKB_FRAGS)
+               if (frag == MAX_SKB_FRAGS)
                        return -EMSGSIZE;
-               num_pages = get_user_pages_fast(base, size, 0, &page[i]);
-               if (num_pages != size) {
-                       release_pages(&page[i], num_pages, 0);
+
+               copied = iov_iter_get_pages(from, pages, ~0U,
+                                           MAX_SKB_FRAGS - frag, &start);
+               if (copied < 0)
                        return -EFAULT;
-               }
-               truesize = size * PAGE_SIZE;
-               skb->data_len += len;
-               skb->len += len;
+
+               iov_iter_advance(from, copied);
+
+               truesize = PAGE_ALIGN(copied + start);
+               skb->data_len += copied;
+               skb->len += copied;
                skb->truesize += truesize;
                atomic_add(truesize, &skb->sk->sk_wmem_alloc);
-               while (len) {
-                       int off = base & ~PAGE_MASK;
-                       int size = min_t(int, len, PAGE_SIZE - off);
-                       skb_fill_page_desc(skb, i, page[i], off, size);
-                       base += size;
-                       len -= size;
-                       i++;
+               while (copied) {
+                       int size = min_t(int, copied, PAGE_SIZE - start);
+                       skb_fill_page_desc(skb, frag++, pages[n], start, size);
+                       start = 0;
+                       copied -= size;
+                       n++;
                }
-               offset = 0;
-               ++from;
        }
        return 0;
 }
-EXPORT_SYMBOL(zerocopy_sg_from_iovec);
+EXPORT_SYMBOL(zerocopy_sg_from_iter);
 
 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                                      u8 __user *to, int len,
index ac4836241a965a71952469ba054f87d8dfca2d32..0814a560e5f3458714ffa316bcbf34c6af4a79c5 100644 (file)
@@ -5846,7 +5846,7 @@ EXPORT_SYMBOL(dev_change_carrier);
  *     Get device physical port ID
  */
 int dev_get_phys_port_id(struct net_device *dev,
-                        struct netdev_phys_port_id *ppid)
+                        struct netdev_phys_item_id *ppid)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
 
index 1a24602cd54e63caa38e40cdedf8009944e0824b..999341244434309b6275fe6230a9f1bf7c57d2b2 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/capability.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
+#include <net/switchdev.h>
 #include <linux/if_arp.h>
 #include <linux/slab.h>
 #include <linux/nsproxy.h>
@@ -404,7 +405,7 @@ static ssize_t phys_port_id_show(struct device *dev,
                return restart_syscall();
 
        if (dev_isalive(netdev)) {
-               struct netdev_phys_port_id ppid;
+               struct netdev_phys_item_id ppid;
 
                ret = dev_get_phys_port_id(netdev, &ppid);
                if (!ret)
@@ -416,6 +417,28 @@ static ssize_t phys_port_id_show(struct device *dev,
 }
 static DEVICE_ATTR_RO(phys_port_id);
 
+static ssize_t phys_switch_id_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct net_device *netdev = to_net_dev(dev);
+       ssize_t ret = -EINVAL;
+
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       if (dev_isalive(netdev)) {
+               struct netdev_phys_item_id ppid;
+
+               ret = netdev_switch_parent_id_get(netdev, &ppid);
+               if (!ret)
+                       ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
+       }
+       rtnl_unlock();
+
+       return ret;
+}
+static DEVICE_ATTR_RO(phys_switch_id);
+
 static struct attribute *net_class_attrs[] = {
        &dev_attr_netdev_group.attr,
        &dev_attr_type.attr,
@@ -441,6 +464,7 @@ static struct attribute *net_class_attrs[] = {
        &dev_attr_tx_queue_len.attr,
        &dev_attr_gro_flush_timeout.attr,
        &dev_attr_phys_port_id.attr,
+       &dev_attr_phys_switch_id.attr,
        NULL,
 };
 ATTRIBUTE_GROUPS(net_class);
index a6882686ca3a10fc3be7ced6299dc7385ffd239d..61cb7e7cc3c7725cf8435f61578268328a76e3d9 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/mutex.h>
 #include <linux/if_addr.h>
 #include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
 #include <linux/pci.h>
 #include <linux/etherdevice.h>
 
@@ -43,6 +44,7 @@
 
 #include <linux/inet.h>
 #include <linux/netdevice.h>
+#include <net/switchdev.h>
 #include <net/ip.h>
 #include <net/protocol.h>
 #include <net/arp.h>
@@ -868,7 +870,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
               + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
               + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
               + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
-              + nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */
+              + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
+              + nla_total_size(MAX_PHYS_ITEM_ID_LEN); /* IFLA_PHYS_SWITCH_ID */
 }
 
 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
@@ -952,7 +955,7 @@ static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
 {
        int err;
-       struct netdev_phys_port_id ppid;
+       struct netdev_phys_item_id ppid;
 
        err = dev_get_phys_port_id(dev, &ppid);
        if (err) {
@@ -967,6 +970,24 @@ static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
        return 0;
 }
 
+static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
+{
+       int err;
+       struct netdev_phys_item_id psid;
+
+       err = netdev_switch_parent_id_get(dev, &psid);
+       if (err) {
+               if (err == -EOPNOTSUPP)
+                       return 0;
+               return err;
+       }
+
+       if (nla_put(skb, IFLA_PHYS_SWITCH_ID, psid.id_len, psid.id))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
 static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                            int type, u32 pid, u32 seq, u32 change,
                            unsigned int flags, u32 ext_filter_mask)
@@ -1039,6 +1060,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
        if (rtnl_phys_port_id_fill(skb, dev))
                goto nla_put_failure;
 
+       if (rtnl_phys_switch_id_fill(skb, dev))
+               goto nla_put_failure;
+
        attr = nla_reserve(skb, IFLA_STATS,
                        sizeof(struct rtnl_link_stats));
        if (attr == NULL)
@@ -1196,8 +1220,9 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
        [IFLA_PROMISCUITY]      = { .type = NLA_U32 },
        [IFLA_NUM_TX_QUEUES]    = { .type = NLA_U32 },
        [IFLA_NUM_RX_QUEUES]    = { .type = NLA_U32 },
-       [IFLA_PHYS_PORT_ID]     = { .type = NLA_BINARY, .len = MAX_PHYS_PORT_ID_LEN },
+       [IFLA_PHYS_PORT_ID]     = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
        [IFLA_CARRIER_CHANGES]  = { .type = NLA_U32 },  /* ignored */
+       [IFLA_PHYS_SWITCH_ID]   = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
 };
 
 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -2312,7 +2337,7 @@ errout:
 int ndo_dflt_fdb_add(struct ndmsg *ndm,
                     struct nlattr *tb[],
                     struct net_device *dev,
-                    const unsigned char *addr,
+                    const unsigned char *addr, u16 vid,
                     u16 flags)
 {
        int err = -EINVAL;
@@ -2338,6 +2363,28 @@ int ndo_dflt_fdb_add(struct ndmsg *ndm,
 }
 EXPORT_SYMBOL(ndo_dflt_fdb_add);
 
+static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid)
+{
+       u16 vid = 0;
+
+       if (vlan_attr) {
+               if (nla_len(vlan_attr) != sizeof(u16)) {
+                       pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan\n");
+                       return -EINVAL;
+               }
+
+               vid = nla_get_u16(vlan_attr);
+
+               if (!vid || vid >= VLAN_VID_MASK) {
+                       pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan id %d\n",
+                               vid);
+                       return -EINVAL;
+               }
+       }
+       *p_vid = vid;
+       return 0;
+}
+
 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
        struct net *net = sock_net(skb->sk);
@@ -2345,6 +2392,7 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct nlattr *tb[NDA_MAX+1];
        struct net_device *dev;
        u8 *addr;
+       u16 vid;
        int err;
 
        err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
@@ -2370,6 +2418,10 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
 
        addr = nla_data(tb[NDA_LLADDR]);
 
+       err = fdb_vid_parse(tb[NDA_VLAN], &vid);
+       if (err)
+               return err;
+
        err = -EOPNOTSUPP;
 
        /* Support fdb on master device the net/bridge default case */
@@ -2378,7 +2430,8 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
                struct net_device *br_dev = netdev_master_upper_dev_get(dev);
                const struct net_device_ops *ops = br_dev->netdev_ops;
 
-               err = ops->ndo_fdb_add(ndm, tb, dev, addr, nlh->nlmsg_flags);
+               err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
+                                      nlh->nlmsg_flags);
                if (err)
                        goto out;
                else
@@ -2389,9 +2442,10 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
        if ((ndm->ndm_flags & NTF_SELF)) {
                if (dev->netdev_ops->ndo_fdb_add)
                        err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
+                                                          vid,
                                                           nlh->nlmsg_flags);
                else
-                       err = ndo_dflt_fdb_add(ndm, tb, dev, addr,
+                       err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
                                               nlh->nlmsg_flags);
 
                if (!err) {
@@ -2409,7 +2463,7 @@ out:
 int ndo_dflt_fdb_del(struct ndmsg *ndm,
                     struct nlattr *tb[],
                     struct net_device *dev,
-                    const unsigned char *addr)
+                    const unsigned char *addr, u16 vid)
 {
        int err = -EINVAL;
 
@@ -2438,6 +2492,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct net_device *dev;
        int err = -EINVAL;
        __u8 *addr;
+       u16 vid;
 
        if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
@@ -2465,6 +2520,10 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
 
        addr = nla_data(tb[NDA_LLADDR]);
 
+       err = fdb_vid_parse(tb[NDA_VLAN], &vid);
+       if (err)
+               return err;
+
        err = -EOPNOTSUPP;
 
        /* Support fdb on master device the net/bridge default case */
@@ -2474,7 +2533,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
                const struct net_device_ops *ops = br_dev->netdev_ops;
 
                if (ops->ndo_fdb_del)
-                       err = ops->ndo_fdb_del(ndm, tb, dev, addr);
+                       err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
 
                if (err)
                        goto out;
@@ -2485,9 +2544,10 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
        /* Embedded bridge, macvlan, and any other device support */
        if (ndm->ndm_flags & NTF_SELF) {
                if (dev->netdev_ops->ndo_fdb_del)
-                       err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr);
+                       err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr,
+                                                          vid);
                else
-                       err = ndo_dflt_fdb_del(ndm, tb, dev, addr);
+                       err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
 
                if (!err) {
                        rtnl_fdb_notify(dev, addr, RTM_DELNEIGH);
@@ -2627,12 +2687,22 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
        return skb->len;
 }
 
+static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
+                              unsigned int attrnum, unsigned int flag)
+{
+       if (mask & flag)
+               return nla_put_u8(skb, attrnum, !!(flags & flag));
+       return 0;
+}
+
 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-                           struct net_device *dev, u16 mode)
+                           struct net_device *dev, u16 mode,
+                           u32 flags, u32 mask)
 {
        struct nlmsghdr *nlh;
        struct ifinfomsg *ifm;
        struct nlattr *br_afspec;
+       struct nlattr *protinfo;
        u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
        struct net_device *br_dev = netdev_master_upper_dev_get(dev);
 
@@ -2671,6 +2741,33 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
        }
        nla_nest_end(skb, br_afspec);
 
+       protinfo = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
+       if (!protinfo)
+               goto nla_put_failure;
+
+       if (brport_nla_put_flag(skb, flags, mask,
+                               IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
+           brport_nla_put_flag(skb, flags, mask,
+                               IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
+           brport_nla_put_flag(skb, flags, mask,
+                               IFLA_BRPORT_FAST_LEAVE,
+                               BR_MULTICAST_FAST_LEAVE) ||
+           brport_nla_put_flag(skb, flags, mask,
+                               IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
+           brport_nla_put_flag(skb, flags, mask,
+                               IFLA_BRPORT_LEARNING, BR_LEARNING) ||
+           brport_nla_put_flag(skb, flags, mask,
+                               IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
+           brport_nla_put_flag(skb, flags, mask,
+                               IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
+           brport_nla_put_flag(skb, flags, mask,
+                               IFLA_BRPORT_PROXYARP, BR_PROXYARP)) {
+               nla_nest_cancel(skb, protinfo);
+               goto nla_put_failure;
+       }
+
+       nla_nest_end(skb, protinfo);
+
        return nlmsg_end(skb, nlh);
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
@@ -2685,13 +2782,20 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
        int idx = 0;
        u32 portid = NETLINK_CB(cb->skb).portid;
        u32 seq = cb->nlh->nlmsg_seq;
-       struct nlattr *extfilt;
        u32 filter_mask = 0;
 
-       extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
-                                 IFLA_EXT_MASK);
-       if (extfilt)
-               filter_mask = nla_get_u32(extfilt);
+       if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
+               struct nlattr *extfilt;
+
+               extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
+                                         IFLA_EXT_MASK);
+               if (extfilt) {
+                       if (nla_len(extfilt) < sizeof(filter_mask))
+                               return -EINVAL;
+
+                       filter_mask = nla_get_u32(extfilt);
+               }
+       }
 
        rcu_read_lock();
        for_each_netdev_rcu(net, dev) {
@@ -2798,6 +2902,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (br_spec) {
                nla_for_each_nested(attr, br_spec, rem) {
                        if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
+                               if (nla_len(attr) < sizeof(flags))
+                                       return -EINVAL;
+
                                have_flags = true;
                                flags = nla_get_u16(attr);
                                break;
@@ -2868,6 +2975,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (br_spec) {
                nla_for_each_nested(attr, br_spec, rem) {
                        if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
+                               if (nla_len(attr) < sizeof(flags))
+                                       return -EINVAL;
+
                                have_flags = true;
                                flags = nla_get_u16(attr);
                                break;
index 8e6ae9422a7bf05543de4ba8f279e40de3c55c8b..19f0387390879cca24873e736f6879245e56e575 100644 (file)
@@ -781,7 +781,7 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                goto out_release;
 
        skb_reserve(skb, sk->sk_prot->max_header);
-       rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+       rc = memcpy_from_msg(skb_put(skb, len), msg, len);
        if (rc != 0)
                goto out_discard;
 
index 25733d53814763c85e49a612df3bbcdb202992d6..810228646de38f9fe26eb2c75a84fbc000840f7b 100644 (file)
@@ -1760,7 +1760,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
                if ((chunk + copied) > size)
                        chunk = size - copied;
 
-               if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
+               if (memcpy_to_msg(msg, skb->data, chunk)) {
                        rv = -EFAULT;
                        break;
                }
@@ -2032,7 +2032,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
 
                skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER);
 
-               if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+               if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
                        err = -EFAULT;
                        goto out;
                }
index b8555ec71387f1c98f2403ee22926cff846225a9..2c7a93e7167e97bc22b949812550a46e2f48be9d 100644 (file)
@@ -276,7 +276,7 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        if (err < 0)
                goto out_skb;
 
-       err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+       err = memcpy_from_msg(skb_put(skb, size), msg, size);
        if (err < 0)
                goto out_skb;
 
index 21c38945ab8b0b2ddef710b609f58d4ebecaaf5c..61e9d2972947e7827a2af431ec5000b70dc9d99f 100644 (file)
@@ -150,7 +150,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk,
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
 
-       err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+       err = memcpy_from_msg(skb_put(skb, size), msg, size);
        if (err < 0)
                goto out_skb;
 
index 3a096bb2d59658f6078c42ef68dfa1d932adda5d..a44773c8346c13c24535448f7e33105c894ac279 100644 (file)
@@ -1386,6 +1386,17 @@ out:
        return pp;
 }
 
+int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+{
+       if (sk->sk_family == AF_INET)
+               return ip_recv_error(sk, msg, len, addr_len);
+#if IS_ENABLED(CONFIG_IPV6)
+       if (sk->sk_family == AF_INET6)
+               return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
+#endif
+       return -EINVAL;
+}
+
 static int inet_gro_complete(struct sk_buff *skb, int nhoff)
 {
        __be16 newlen = htons(skb->len - nhoff);
index 3dfe9828e7ef73cd6239324ea35e4e0c14415fa0..b986298a7ba39908290ccd808a24947d776b91b4 100644 (file)
@@ -64,15 +64,13 @@ static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
 }
 
 static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
-                                 void *data, int hdrlen, u8 ipproto)
+                                 void *data, size_t hdrlen, u8 ipproto)
 {
        __be16 *pd = data;
-       u16 start = ntohs(pd[0]);
-       u16 offset = ntohs(pd[1]);
-       u16 poffset = 0;
-       u16 plen;
-       __wsum csum, delta;
-       __sum16 *psum;
+       size_t start = ntohs(pd[0]);
+       size_t offset = ntohs(pd[1]);
+       size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
+       __wsum delta;
 
        if (skb->remcsum_offload) {
                /* Already processed in GRO path */
@@ -80,35 +78,15 @@ static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
                return guehdr;
        }
 
-       if (start > skb->len - hdrlen ||
-           offset > skb->len - hdrlen - sizeof(u16))
-               return NULL;
-
-       if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE))
-               __skb_checksum_complete(skb);
-
-       plen = hdrlen + offset + sizeof(u16);
        if (!pskb_may_pull(skb, plen))
                return NULL;
        guehdr = (struct guehdr *)&udp_hdr(skb)[1];
 
-       if (ipproto == IPPROTO_IP && sizeof(struct iphdr) < plen) {
-               struct iphdr *ip = (struct iphdr *)(skb->data + hdrlen);
-
-               /* If next header happens to be IP we can skip that for the
-                * checksum calculation since the IP header checksum is zero
-                * if correct.
-                */
-               poffset = ip->ihl * 4;
-       }
-
-       csum = csum_sub(skb->csum, skb_checksum(skb, poffset + hdrlen,
-                                               start - poffset - hdrlen, 0));
+       if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE))
+               __skb_checksum_complete(skb);
 
-       /* Set derived checksum in packet */
-       psum = (__sum16 *)(skb->data + hdrlen + offset);
-       delta = csum_sub(csum_fold(csum), *psum);
-       *psum = csum_fold(csum);
+       delta = remcsum_adjust((void *)guehdr + hdrlen,
+                              skb->csum, start, offset);
 
        /* Adjust skb->csum since we changed the packet */
        skb->csum = csum_add(skb->csum, delta);
@@ -158,9 +136,6 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
 
        ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
 
-       /* Pull UDP header now, skb->data points to guehdr */
-       __skb_pull(skb, sizeof(struct udphdr));
-
        /* Pull csum through the guehdr now . This can be used if
         * there is a remote checksum offload.
         */
@@ -188,7 +163,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
        if (unlikely(guehdr->control))
                return gue_control_message(skb, guehdr);
 
-       __skb_pull(skb, hdrlen);
+       __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
        skb_reset_transport_header(skb);
 
        return -guehdr->proto_ctype;
@@ -248,24 +223,17 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
                                      size_t hdrlen, u8 ipproto)
 {
        __be16 *pd = data;
-       u16 start = ntohs(pd[0]);
-       u16 offset = ntohs(pd[1]);
-       u16 poffset = 0;
-       u16 plen;
-       void *ptr;
-       __wsum csum, delta;
-       __sum16 *psum;
+       size_t start = ntohs(pd[0]);
+       size_t offset = ntohs(pd[1]);
+       size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
+       __wsum delta;
 
        if (skb->remcsum_offload)
                return guehdr;
 
-       if (start > skb_gro_len(skb) - hdrlen ||
-           offset > skb_gro_len(skb) - hdrlen - sizeof(u16) ||
-           !NAPI_GRO_CB(skb)->csum_valid || skb->remcsum_offload)
+       if (!NAPI_GRO_CB(skb)->csum_valid)
                return NULL;
 
-       plen = hdrlen + offset + sizeof(u16);
-
        /* Pull checksum that will be written */
        if (skb_gro_header_hard(skb, off + plen)) {
                guehdr = skb_gro_header_slow(skb, off + plen, off);
@@ -273,26 +241,8 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
                        return NULL;
        }
 
-       ptr = (void *)guehdr + hdrlen;
-
-       if (ipproto == IPPROTO_IP &&
-           (hdrlen + sizeof(struct iphdr) < plen)) {
-               struct iphdr *ip = (struct iphdr *)(ptr + hdrlen);
-
-               /* If next header happens to be IP we can skip
-                * that for the checksum calculation since the
-                * IP header checksum is zero if correct.
-                */
-               poffset = ip->ihl * 4;
-       }
-
-       csum = csum_sub(NAPI_GRO_CB(skb)->csum,
-                       csum_partial(ptr + poffset, start - poffset, 0));
-
-       /* Set derived checksum in packet */
-       psum = (__sum16 *)(ptr + offset);
-       delta = csum_sub(csum_fold(csum), *psum);
-       *psum = csum_fold(csum);
+       delta = remcsum_adjust((void *)guehdr + hdrlen,
+                              NAPI_GRO_CB(skb)->csum, start, offset);
 
        /* Adjust skb->csum since we changed the packet */
        skb->csum = csum_add(skb->csum, delta);
index 3e861011e4a31e57b21c3fa507c178d3693a7970..1a7e979e80ba356f685ecfe020b98855f19db0a3 100644 (file)
@@ -528,6 +528,7 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
        .validate       = vti_tunnel_validate,
        .newlink        = vti_newlink,
        .changelink     = vti_changelink,
+       .dellink        = ip_tunnel_dellink,
        .get_size       = vti_get_size,
        .fill_info      = vti_fill_info,
 };
index ce2920f5bef391c8e0c500ba8b9d17c415621274..8dd4ae0424fcd75269b3dfb4950aff0723f22155 100644 (file)
@@ -217,6 +217,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
                                             &ipv6_hdr(skb)->daddr))
                                continue;
 #endif
+               } else {
+                       continue;
                }
 
                if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
@@ -660,7 +662,7 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
         *      Fetch the ICMP header provided by the userland.
         *      iovec is modified! The ICMP header is consumed.
         */
-       if (memcpy_fromiovec(user_icmph, msg->msg_iov, icmph_len))
+       if (memcpy_from_msg(user_icmph, msg, icmph_len))
                return -EFAULT;
 
        if (family == AF_INET) {
@@ -853,16 +855,8 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        if (flags & MSG_OOB)
                goto out;
 
-       if (flags & MSG_ERRQUEUE) {
-               if (family == AF_INET) {
-                       return ip_recv_error(sk, msg, len, addr_len);
-#if IS_ENABLED(CONFIG_IPV6)
-               } else if (family == AF_INET6) {
-                       return pingv6_ops.ipv6_recv_error(sk, msg, len,
-                                                         addr_len);
-#endif
-               }
-       }
+       if (flags & MSG_ERRQUEUE)
+               return inet_recv_error(sk, msg, len, addr_len);
 
        skb = skb_recv_datagram(sk, flags, noblock, &err);
        if (!skb)
index c239f4740d10b10b67ef4fa44c831851fb9e1dcf..dc13a3657e8e1b81ba0cb1fcd5386a9d0b106168 100644 (file)
@@ -1349,7 +1349,7 @@ static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
 
                if (len > 0) {
                        if (!(flags & MSG_TRUNC))
-                               err = memcpy_toiovec(msg->msg_iov, &c, 1);
+                               err = memcpy_to_msg(msg, &c, 1);
                        len = 1;
                } else
                        msg->msg_flags |= MSG_TRUNC;
@@ -1598,7 +1598,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        u32 urg_hole = 0;
 
        if (unlikely(flags & MSG_ERRQUEUE))
-               return ip_recv_error(sk, msg, len, addr_len);
+               return inet_recv_error(sk, msg, len, addr_len);
 
        if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
            (sk->sk_state == TCP_ESTABLISHED))
index d22a31f27ab4f16d40d5a168cbb7b01dbe20a2c5..69de1a1c05c9fc575b171f85711a1e3c5aede8c0 100644 (file)
@@ -4368,7 +4368,7 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
        if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
                goto err_free;
 
-       if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size))
+       if (memcpy_from_msg(skb_put(skb, size), msg, size))
                goto err_free;
 
        TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
index 2c6a955fd5c32bf102534a6d4f8a661cf5641331..33f5ff068c7958515e0f63792883a58fb5d6a341 100644 (file)
@@ -598,7 +598,10 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
        if (th->rst)
                return;
 
-       if (skb_rtable(skb)->rt_type != RTN_LOCAL)
+       /* If sk not NULL, it means we did a successful lookup and incoming
+        * route had to be correct. prequeue might have dropped our dst.
+        */
+       if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
                return;
 
        /* Swap the send and the receive. */
index 4a16b9129079791e160634f4dfdcd0302beab568..b2d606833ce4bd725b3b1929d481d4c49d5ebcd2 100644 (file)
@@ -1284,9 +1284,8 @@ try_again:
                err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
                                            msg, copied);
        else {
-               err = skb_copy_and_csum_datagram_iovec(skb,
-                                                      sizeof(struct udphdr),
-                                                      msg->msg_iov);
+               err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr),
+                                                    msg);
 
                if (err == -EINVAL)
                        goto csum_copy_err;
index 9eac3a7fefa39b5f57086c74934dd5e8867020d3..f7c8bbeb27b704c0106f714d5a0677c27d3346e0 100644 (file)
@@ -5380,10 +5380,8 @@ static void __net_exit addrconf_exit_net(struct net *net)
        __addrconf_sysctl_unregister(net->ipv6.devconf_dflt);
        __addrconf_sysctl_unregister(net->ipv6.devconf_all);
 #endif
-       if (!net_eq(net, &init_net)) {
-               kfree(net->ipv6.devconf_dflt);
-               kfree(net->ipv6.devconf_all);
-       }
+       kfree(net->ipv6.devconf_dflt);
+       kfree(net->ipv6.devconf_all);
 }
 
 static struct pernet_operations addrconf_ops = {
index f6e2533c1145e5111b15411f91fa05697ed7a2e0..13cda4c6313bad7ecca5b20b5be7c8ac28bcf1ab 100644 (file)
@@ -502,11 +502,11 @@ static int ip6gre_rcv(struct sk_buff *skb)
 
                skb->protocol = gre_proto;
                /* WCCP version 1 and 2 protocol decoding.
-                * - Change protocol to IP
+                * - Change protocol to IPv6
                 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
                 */
                if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
-                       skb->protocol = htons(ETH_P_IP);
+                       skb->protocol = htons(ETH_P_IPV6);
                        if ((*(h + offset) & 0xF0) != 0x40)
                                offset += 4;
                }
index fd76ce938c32ffd7fc1c007d7e208c0d92a2de8c..46d452a56d3e1ce9b1a0f9f8d8edfede3675bf21 100644 (file)
@@ -69,7 +69,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
        int nhoff;
 
        if (unlikely(skb_shinfo(skb)->gso_type &
-                    ~(SKB_GSO_UDP |
+                    ~(SKB_GSO_TCPV4 |
+                      SKB_GSO_UDP |
                       SKB_GSO_DODGY |
                       SKB_GSO_TCP_ECN |
                       SKB_GSO_GRE |
index b04ed72c454247886d7d99ae5c9e36e949b48cd1..8db6c98fe21858f4b3f630af277a0137e438aa8d 100644 (file)
@@ -79,15 +79,13 @@ int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst,
        uh->source = src_port;
 
        uh->len = htons(skb->len);
-       uh->check = 0;
 
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
        IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
                            | IPSKB_REROUTED);
        skb_dst_set(skb, dst);
 
-       udp6_set_csum(udp_get_no_check6_tx(sk), skb, &inet6_sk(sk)->saddr,
-                     &sk->sk_v6_daddr, skb->len);
+       udp6_set_csum(udp_get_no_check6_tx(sk), skb, saddr, daddr, skb->len);
 
        __skb_push(skb, sizeof(*ip6h));
        skb_reset_network_header(skb);
index 83082168b0565cae0b758a649940a59a7f3ac6b6..16a7e81e3f9987ae321776791344ae77f632d39e 100644 (file)
@@ -911,6 +911,15 @@ static int vti6_newlink(struct net *src_net, struct net_device *dev,
        return vti6_tnl_create2(dev);
 }
 
+static void vti6_dellink(struct net_device *dev, struct list_head *head)
+{
+       struct net *net = dev_net(dev);
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+       if (dev != ip6n->fb_tnl_dev)
+               unregister_netdevice_queue(dev, head);
+}
+
 static int vti6_changelink(struct net_device *dev, struct nlattr *tb[],
                           struct nlattr *data[])
 {
@@ -986,6 +995,7 @@ static struct rtnl_link_ops vti6_link_ops __read_mostly = {
        .setup          = vti6_dev_setup,
        .validate       = vti6_validate,
        .newlink        = vti6_newlink,
+       .dellink        = vti6_dellink,
        .changelink     = vti6_changelink,
        .get_size       = vti6_get_size,
        .fill_info      = vti6_fill_info,
@@ -1026,6 +1036,7 @@ static int __net_init vti6_init_net(struct net *net)
        if (!ip6n->fb_tnl_dev)
                goto err_alloc_dev;
        dev_net_set(ip6n->fb_tnl_dev, net);
+       ip6n->fb_tnl_dev->rtnl_link_ops = &vti6_link_ops;
 
        err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
        if (err < 0)
index 0cbcf98f2cabe2adfacb1f07372524099e56391b..8baa53e17a3044031417976feca82a183eb9cdb7 100644 (file)
@@ -492,7 +492,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
                        goto csum_copy_err;
                err = skb_copy_datagram_msg(skb, 0, msg, copied);
        } else {
-               err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
+               err = skb_copy_and_csum_datagram_msg(skb, 0, msg);
                if (err == -EINVAL)
                        goto csum_copy_err;
        }
index 1985b4933a6bbc6a24ae502f601afac29896fab8..d06af89162f12fcd6b13e18acc1130206e65816f 100644 (file)
@@ -903,7 +903,10 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
        if (th->rst)
                return;
 
-       if (!ipv6_unicast_destination(skb))
+       /* If sk not NULL, it means we did a successful lookup and incoming
+        * route had to be correct. prequeue might have dropped our dst.
+        */
+       if (!sk && !ipv6_unicast_destination(skb))
                return;
 
 #ifdef CONFIG_TCP_MD5SIG
index dbc0b042bdd6b09eeb3adf5fd81b8bb8bf727515..7cfb5d745a2d4c6b3b3b524f947f69a2a692209c 100644 (file)
@@ -428,7 +428,7 @@ try_again:
                err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
                                            msg, copied);
        else {
-               err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
+               err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), msg);
                if (err == -EINVAL)
                        goto csum_copy_err;
        }
index 97dc4320ac1526be1ec32a2840490d4b4bbb020a..f11ad1d95e0e6e88294003fee338743863daa443 100644 (file)
@@ -1745,8 +1745,7 @@ static int ipx_sendmsg(struct kiocb *iocb, struct socket *sock,
                memcpy(usipx->sipx_node, ipxs->dest_addr.node, IPX_NODE_LEN);
        }
 
-       rc = ipxrtr_route_packet(sk, usipx, msg->msg_iov, len,
-                                flags & MSG_DONTWAIT);
+       rc = ipxrtr_route_packet(sk, usipx, msg, len, flags & MSG_DONTWAIT);
        if (rc >= 0)
                rc = len;
 out:
index 67e7ad3d46b1fb4489a175836351607e7f5ae741..3e2a32a9f3bda13459d974fed70bb8dcc4c54fea 100644 (file)
@@ -165,7 +165,7 @@ int ipxrtr_route_skb(struct sk_buff *skb)
  * Route an outgoing frame from a socket.
  */
 int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
-                       struct iovec *iov, size_t len, int noblock)
+                       struct msghdr *msg, size_t len, int noblock)
 {
        struct sk_buff *skb;
        struct ipx_sock *ipxs = ipx_sk(sk);
@@ -229,7 +229,7 @@ int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
        memcpy(ipx->ipx_dest.node, usipx->sipx_node, IPX_NODE_LEN);
        ipx->ipx_dest.sock              = usipx->sipx_port;
 
-       rc = memcpy_fromiovec(skb_put(skb, len), iov, len);
+       rc = memcpy_from_msg(skb_put(skb, len), msg, len);
        if (rc) {
                kfree_skb(skb);
                goto out_put;
index e8c409055922924c6f7a868e10b892ae523a73a9..568edc72d7371f2b01dffa9af9cc114fad5d7967 100644 (file)
@@ -1319,7 +1319,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
        skb_reserve(skb, self->max_header_size + 16);
        skb_reset_transport_header(skb);
        skb_put(skb, len);
-       err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
+       err = memcpy_from_msg(skb_transport_header(skb), msg, len);
        if (err) {
                kfree_skb(skb);
                goto out_err;
@@ -1466,7 +1466,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
                }
 
                chunk = min_t(unsigned int, skb->len, size);
-               if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
+               if (memcpy_to_msg(msg, skb->data, chunk)) {
                        skb_queue_head(&sk->sk_receive_queue, skb);
                        if (copied == 0)
                                copied = -EFAULT;
@@ -1569,7 +1569,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
 
        pr_debug("%s(), appending user data\n", __func__);
        skb_put(skb, len);
-       err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
+       err = memcpy_from_msg(skb_transport_header(skb), msg, len);
        if (err) {
                kfree_skb(skb);
                goto out;
@@ -1678,7 +1678,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
 
        pr_debug("%s(), appending user data\n", __func__);
        skb_put(skb, len);
-       err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
+       err = memcpy_from_msg(skb_transport_header(skb), msg, len);
        if (err) {
                kfree_skb(skb);
                goto out;
index 057b5647ef925ef5fbc0edcded57226ca511b8b8..1cd3f810723985efcbd13431e078b9885de2e2f7 100644 (file)
@@ -1122,7 +1122,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
        }
        if (iucv->transport == AF_IUCV_TRANS_HIPER)
                skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
-       if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+       if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
                err = -EFAULT;
                goto fail;
        }
index e5883091a8c6b4bd985ebaaa5b4c0b1e401b652c..f8ac939d52b4b83ce3720e274f33461347029661 100644 (file)
@@ -3611,7 +3611,7 @@ static int pfkey_sendmsg(struct kiocb *kiocb,
                goto out;
 
        err = -EFAULT;
-       if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len))
+       if (memcpy_from_msg(skb_put(skb,len), msg, len))
                goto out;
 
        hdr = pfkey_get_base_msg(skb, &err);
index a6cc1fed2b529a9e97248d1a6e47585067c4e24b..05dfc8aa36afc83b61e2cefeb76738ded8a4db2d 100644 (file)
@@ -441,7 +441,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
        *((__be32 *) skb_put(skb, 4)) = 0;
 
        /* Copy user data into skb */
-       rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+       rc = memcpy_from_msg(skb_put(skb, len), msg, len);
        if (rc < 0) {
                kfree_skb(skb);
                goto error;
index c559bcdf4679eb4e68123bc4d4f31cabf6b035f9..cc7a828fc914d7e05a9495b70df4b1411e0de60e 100644 (file)
@@ -346,8 +346,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
        skb_put(skb, 2);
 
        /* Copy user data into skb */
-       error = memcpy_fromiovec(skb_put(skb, total_len), m->msg_iov,
-                                total_len);
+       error = memcpy_from_msg(skb_put(skb, total_len), m, total_len);
        if (error < 0) {
                kfree_skb(skb);
                goto error_put_sess_tun;
index af662669f9513e4b643448d201846ca3fc647ccd..2c0b83ce43bda478f6c56ebdc0951a54658c9098 100644 (file)
@@ -921,7 +921,7 @@ static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock,
        skb->dev      = llc->dev;
        skb->protocol = llc_proto_type(addr->sllc_arphrd);
        skb_reserve(skb, hdrlen);
-       rc = memcpy_fromiovec(skb_put(skb, copied), msg->msg_iov, copied);
+       rc = memcpy_from_msg(skb_put(skb, copied), msg, copied);
        if (rc)
                goto out;
        if (sk->sk_type == SOCK_DGRAM || addr->sllc_ua) {
index 9ef88c8dd68ae89771263d6e922905f34bfe8293..a11674806707e18fb9d86860ad0717dfda0b0da2 100644 (file)
@@ -611,16 +611,12 @@ __nf_conntrack_confirm(struct sk_buff *skb)
         */
        NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
        pr_debug("Confirming conntrack %p\n", ct);
-
-       /* We have to check the DYING flag after unlink to prevent
-        * a race against nf_ct_get_next_corpse() possibly called from
-        * user context, else we insert an already 'dead' hash, blocking
-        * further use of that particular connection -JM.
-        */
-       nf_ct_del_from_dying_or_unconfirmed_list(ct);
+       /* We have to check the DYING flag inside the lock to prevent
+          a race against nf_ct_get_next_corpse() possibly called from
+          user context, else we insert an already 'dead' hash, blocking
+          further use of that particular connection -JM */
 
        if (unlikely(nf_ct_is_dying(ct))) {
-               nf_ct_add_to_dying_list(ct);
                nf_conntrack_double_unlock(hash, reply_hash);
                local_bh_enable();
                return NF_ACCEPT;
@@ -640,6 +636,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
                        goto out;
 
+       nf_ct_del_from_dying_or_unconfirmed_list(ct);
+
        /* Timer relative to confirmation time, not original
           setting time, otherwise we'd get timer wrap in
           weird delay cases. */
index e1aad6eeac147c04231e0762e91675414c02e4b5..63aa5c8acf1285f6864a11dcf572eb022d7fe624 100644 (file)
@@ -2325,7 +2325,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
        NETLINK_CB(skb).flags   = netlink_skb_flags;
 
        err = -EFAULT;
-       if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+       if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
                kfree_skb(skb);
                goto out;
        }
index 7e13f6afcd1fb774264a3fa890603fc1c633baaf..69f1d5e9959f23646e82686cf5d17e1685c34c4f 100644 (file)
@@ -1113,7 +1113,7 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
        skb_put(skb, len);
 
        /* User data follows immediately after the NET/ROM transport header */
-       if (memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len)) {
+       if (memcpy_from_msg(skb_transport_header(skb), msg, len)) {
                kfree_skb(skb);
                err = -EFAULT;
                goto out;
index a3ad69a4c648c76779bf496153bfa7eb01f5354c..c4da0c2d8a14dc8c7e676dc0ca3b4900ab31318f 100644 (file)
@@ -665,7 +665,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
        if (msg_data == NULL)
                return -ENOMEM;
 
-       if (memcpy_fromiovec(msg_data, msg->msg_iov, len)) {
+       if (memcpy_from_msg(msg_data, msg, len)) {
                kfree(msg_data);
                return -EFAULT;
        }
@@ -731,7 +731,7 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
        if (msg_data == NULL)
                return -ENOMEM;
 
-       if (memcpy_fromiovec(msg_data, msg->msg_iov, len)) {
+       if (memcpy_from_msg(msg_data, msg, len)) {
                kfree(msg_data);
                return -EFAULT;
        }
index 9d7d2b7ba5e413ac81e038eb099de6be5d81cfe4..373e138c0ab6687686c084ac6daad55af7b1dc72 100644 (file)
@@ -231,7 +231,7 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
        if (skb == NULL)
                return rc;
 
-       rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+       rc = memcpy_from_msg(skb_put(skb, len), msg, len);
        if (rc < 0) {
                kfree_skb(skb);
                return rc;
index 58af58026dd2889d7565f2ecc46506f0400f6d4a..efa84450113698a4aa36f29704569afe0d11f126 100644 (file)
@@ -378,7 +378,7 @@ static void unregister_prot_hook(struct sock *sk, bool sync)
                __unregister_prot_hook(sk, sync);
 }
 
-static inline __pure struct page *pgv_to_page(void *addr)
+static inline struct page * __pure pgv_to_page(void *addr)
 {
        if (is_vmalloc_addr(addr))
                return vmalloc_to_page(addr);
@@ -1676,7 +1676,7 @@ retry:
                        if (len < hhlen)
                                skb_reset_network_header(skb);
                }
-               err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+               err = memcpy_from_msg(skb_put(skb, len), msg, len);
                if (err)
                        goto out_free;
                goto retry;
@@ -2408,6 +2408,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        unsigned short gso_type = 0;
        int hlen, tlen;
        int extra_len = 0;
+       struct iov_iter from;
+       ssize_t n;
+
+       iov_iter_init(&from, WRITE, msg->msg_iov, msg->msg_iovlen, len);
 
        /*
         *      Get and verify the address.
@@ -2446,9 +2450,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
 
                len -= vnet_hdr_len;
 
-               err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
-                                      vnet_hdr_len);
-               if (err < 0)
+               err = -EFAULT;
+               n = copy_from_iter(&vnet_hdr, vnet_hdr_len, &from);
+               if (n != vnet_hdr_len)
                        goto out_unlock;
 
                if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
@@ -2518,7 +2522,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        }
 
        /* Returns -EFAULT on error */
-       err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
+       err = skb_copy_datagram_from_iter(skb, offset, &from, len);
        if (err)
                goto out_free;
 
@@ -2950,8 +2954,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
                        vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
                } /* else everything is zero */
 
-               err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
-                                    vnet_hdr_len);
+               err = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_len);
                if (err < 0)
                        goto out_free;
        }
index 0918bc21eae6483f4f98cbdcbf9063b0b2cc10fd..26054b4b467c63d3bce92b8226ec767ad65ad960 100644 (file)
@@ -109,7 +109,7 @@ static int pn_sendmsg(struct kiocb *iocb, struct sock *sk,
                return err;
        skb_reserve(skb, MAX_PHONET_HEADER);
 
-       err = memcpy_fromiovec((void *)skb_put(skb, len), msg->msg_iov, len);
+       err = memcpy_from_msg((void *)skb_put(skb, len), msg, len);
        if (err < 0) {
                kfree_skb(skb);
                return err;
index 9cd069dfaf6518700575b05b0b56e811b8f6078c..5d3f2b7507d45a9b78ba0fd412cc3a8f57ad9178 100644 (file)
@@ -1141,7 +1141,7 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
                return err;
 
        skb_reserve(skb, MAX_PHONET_HEADER + 3 + pn->aligned);
-       err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+       err = memcpy_from_msg(skb_put(skb, len), msg, len);
        if (err < 0)
                goto outfree;
 
index 7280ab8810c203ed574131c4c9b8a5ea10cf292b..c36d713229e0f5c5a1b43fe227a1e04480ba100d 100644 (file)
@@ -316,8 +316,7 @@ int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
 void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
 void rds_ib_recv_refill(struct rds_connection *conn, int prefill);
 void rds_ib_inc_free(struct rds_incoming *inc);
-int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
-                            size_t size);
+int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
 void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context);
 void rds_ib_recv_tasklet_fn(unsigned long data);
 void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
index d67de453c35aae7257bd29e9791adc2595f6a7d7..1b981a4e42c214d575a838b096da368a7f0316c6 100644 (file)
@@ -472,15 +472,12 @@ static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache
        return head;
 }
 
-int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
-                           size_t size)
+int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
 {
        struct rds_ib_incoming *ibinc;
        struct rds_page_frag *frag;
-       struct iovec *iov = first_iov;
        unsigned long to_copy;
        unsigned long frag_off = 0;
-       unsigned long iov_off = 0;
        int copied = 0;
        int ret;
        u32 len;
@@ -489,37 +486,25 @@ int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
        frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
        len = be32_to_cpu(inc->i_hdr.h_len);
 
-       while (copied < size && copied < len) {
+       while (iov_iter_count(to) && copied < len) {
                if (frag_off == RDS_FRAG_SIZE) {
                        frag = list_entry(frag->f_item.next,
                                          struct rds_page_frag, f_item);
                        frag_off = 0;
                }
-               while (iov_off == iov->iov_len) {
-                       iov_off = 0;
-                       iov++;
-               }
-
-               to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
-               to_copy = min_t(size_t, to_copy, size - copied);
+               to_copy = min_t(unsigned long, iov_iter_count(to),
+                               RDS_FRAG_SIZE - frag_off);
                to_copy = min_t(unsigned long, to_copy, len - copied);
 
-               rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
-                        "[%p, %u] + %lu\n",
-                        to_copy, iov->iov_base, iov->iov_len, iov_off,
-                        sg_page(&frag->f_sg), frag->f_sg.offset, frag_off);
-
                /* XXX needs + offset for multiple recvs per page */
-               ret = rds_page_copy_to_user(sg_page(&frag->f_sg),
-                                           frag->f_sg.offset + frag_off,
-                                           iov->iov_base + iov_off,
-                                           to_copy);
-               if (ret) {
-                       copied = ret;
-                       break;
-               }
+               rds_stats_add(s_copy_to_user, to_copy);
+               ret = copy_page_to_iter(sg_page(&frag->f_sg),
+                                       frag->f_sg.offset + frag_off,
+                                       to_copy,
+                                       to);
+               if (ret != to_copy)
+                       return -EFAULT;
 
-               iov_off += to_copy;
                frag_off += to_copy;
                copied += to_copy;
        }
index 04ce3b193f79022b7b585906a60432696fcb5dd5..cbe6674e31ee52f6c9fd4380e3f0942127ede7c9 100644 (file)
@@ -325,8 +325,7 @@ int rds_iw_recv(struct rds_connection *conn);
 int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
                       gfp_t page_gfp, int prefill);
 void rds_iw_inc_free(struct rds_incoming *inc);
-int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
-                            size_t size);
+int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
 void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context);
 void rds_iw_recv_tasklet_fn(unsigned long data);
 void rds_iw_recv_init_ring(struct rds_iw_connection *ic);
index aa8bf6786008afdb6de72221d4678628f93469a0..a66d1794b2d0472e511a179ae9872c2766fb8dd8 100644 (file)
@@ -303,15 +303,12 @@ void rds_iw_inc_free(struct rds_incoming *inc)
        BUG_ON(atomic_read(&rds_iw_allocation) < 0);
 }
 
-int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
-                           size_t size)
+int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
 {
        struct rds_iw_incoming *iwinc;
        struct rds_page_frag *frag;
-       struct iovec *iov = first_iov;
        unsigned long to_copy;
        unsigned long frag_off = 0;
-       unsigned long iov_off = 0;
        int copied = 0;
        int ret;
        u32 len;
@@ -320,37 +317,25 @@ int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
        frag = list_entry(iwinc->ii_frags.next, struct rds_page_frag, f_item);
        len = be32_to_cpu(inc->i_hdr.h_len);
 
-       while (copied < size && copied < len) {
+       while (iov_iter_count(to) && copied < len) {
                if (frag_off == RDS_FRAG_SIZE) {
                        frag = list_entry(frag->f_item.next,
                                          struct rds_page_frag, f_item);
                        frag_off = 0;
                }
-               while (iov_off == iov->iov_len) {
-                       iov_off = 0;
-                       iov++;
-               }
-
-               to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
-               to_copy = min_t(size_t, to_copy, size - copied);
+               to_copy = min_t(unsigned long, iov_iter_count(to),
+                               RDS_FRAG_SIZE - frag_off);
                to_copy = min_t(unsigned long, to_copy, len - copied);
 
-               rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
-                        "[%p, %lu] + %lu\n",
-                        to_copy, iov->iov_base, iov->iov_len, iov_off,
-                        frag->f_page, frag->f_offset, frag_off);
-
                /* XXX needs + offset for multiple recvs per page */
-               ret = rds_page_copy_to_user(frag->f_page,
-                                           frag->f_offset + frag_off,
-                                           iov->iov_base + iov_off,
-                                           to_copy);
-               if (ret) {
-                       copied = ret;
-                       break;
-               }
+               rds_stats_add(s_copy_to_user, to_copy);
+               ret = copy_page_to_iter(frag->f_page,
+                                       frag->f_offset + frag_off,
+                                       to_copy,
+                                       to);
+               if (ret != to_copy)
+                       return -EFAULT;
 
-               iov_off += to_copy;
                frag_off += to_copy;
                copied += to_copy;
        }
index aba232f9f3081968081edb0ed6ece382e954b653..ff2202218187530378b270b0df4838286cb9fc14 100644 (file)
@@ -264,75 +264,54 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
        return rm;
 }
 
-int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
-                                              size_t total_len)
+int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from)
 {
        unsigned long to_copy;
-       unsigned long iov_off;
        unsigned long sg_off;
-       struct iovec *iov;
        struct scatterlist *sg;
        int ret = 0;
 
-       rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
+       rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from));
 
        /*
         * now allocate and copy in the data payload.
         */
        sg = rm->data.op_sg;
-       iov = first_iov;
-       iov_off = 0;
        sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
 
-       while (total_len) {
+       while (iov_iter_count(from)) {
                if (!sg_page(sg)) {
-                       ret = rds_page_remainder_alloc(sg, total_len,
+                       ret = rds_page_remainder_alloc(sg, iov_iter_count(from),
                                                       GFP_HIGHUSER);
                        if (ret)
-                               goto out;
+                               return ret;
                        rm->data.op_nents++;
                        sg_off = 0;
                }
 
-               while (iov_off == iov->iov_len) {
-                       iov_off = 0;
-                       iov++;
-               }
-
-               to_copy = min(iov->iov_len - iov_off, sg->length - sg_off);
-               to_copy = min_t(size_t, to_copy, total_len);
-
-               rdsdebug("copying %lu bytes from user iov [%p, %zu] + %lu to "
-                        "sg [%p, %u, %u] + %lu\n",
-                        to_copy, iov->iov_base, iov->iov_len, iov_off,
-                        (void *)sg_page(sg), sg->offset, sg->length, sg_off);
+               to_copy = min_t(unsigned long, iov_iter_count(from),
+                               sg->length - sg_off);
 
-               ret = rds_page_copy_from_user(sg_page(sg), sg->offset + sg_off,
-                                             iov->iov_base + iov_off,
-                                             to_copy);
-               if (ret)
-                       goto out;
+               rds_stats_add(s_copy_from_user, to_copy);
+               ret = copy_page_from_iter(sg_page(sg), sg->offset + sg_off,
+                                         to_copy, from);
+               if (ret != to_copy)
+                       return -EFAULT;
 
-               iov_off += to_copy;
-               total_len -= to_copy;
                sg_off += to_copy;
 
                if (sg_off == sg->length)
                        sg++;
        }
 
-out:
        return ret;
 }
 
-int rds_message_inc_copy_to_user(struct rds_incoming *inc,
-                                struct iovec *first_iov, size_t size)
+int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
 {
        struct rds_message *rm;
-       struct iovec *iov;
        struct scatterlist *sg;
        unsigned long to_copy;
-       unsigned long iov_off;
        unsigned long vec_off;
        int copied;
        int ret;
@@ -341,36 +320,20 @@ int rds_message_inc_copy_to_user(struct rds_incoming *inc,
        rm = container_of(inc, struct rds_message, m_inc);
        len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 
-       iov = first_iov;
-       iov_off = 0;
        sg = rm->data.op_sg;
        vec_off = 0;
        copied = 0;
 
-       while (copied < size && copied < len) {
-               while (iov_off == iov->iov_len) {
-                       iov_off = 0;
-                       iov++;
-               }
-
-               to_copy = min(iov->iov_len - iov_off, sg->length - vec_off);
-               to_copy = min_t(size_t, to_copy, size - copied);
+       while (iov_iter_count(to) && copied < len) {
+               to_copy = min(iov_iter_count(to), sg->length - vec_off);
                to_copy = min_t(unsigned long, to_copy, len - copied);
 
-               rdsdebug("copying %lu bytes to user iov [%p, %zu] + %lu to "
-                        "sg [%p, %u, %u] + %lu\n",
-                        to_copy, iov->iov_base, iov->iov_len, iov_off,
-                        sg_page(sg), sg->offset, sg->length, vec_off);
-
-               ret = rds_page_copy_to_user(sg_page(sg), sg->offset + vec_off,
-                                           iov->iov_base + iov_off,
-                                           to_copy);
-               if (ret) {
-                       copied = ret;
-                       break;
-               }
+               rds_stats_add(s_copy_to_user, to_copy);
+               ret = copy_page_to_iter(sg_page(sg), sg->offset + vec_off,
+                                       to_copy, to);
+               if (ret != to_copy)
+                       return -EFAULT;
 
-               iov_off += to_copy;
                vec_off += to_copy;
                copied += to_copy;
 
index 48f8ffc60f8f1cee8e63448fe3aa7aaac0d5d4c1..c2a5eef41343c816f70e6dc16fc7a2fa9ea2d684 100644 (file)
@@ -431,8 +431,7 @@ struct rds_transport {
        int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
        int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
        int (*recv)(struct rds_connection *conn);
-       int (*inc_copy_to_user)(struct rds_incoming *inc, struct iovec *iov,
-                               size_t size);
+       int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
        void (*inc_free)(struct rds_incoming *inc);
 
        int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
@@ -657,8 +656,7 @@ rds_conn_connecting(struct rds_connection *conn)
 /* message.c */
 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
-int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
-                                              size_t total_len);
+int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from);
 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
                                 __be16 dport, u64 seq);
@@ -667,8 +665,7 @@ int rds_message_add_extension(struct rds_header *hdr,
 int rds_message_next_extension(struct rds_header *hdr,
                               unsigned int *pos, void *buf, unsigned int *buflen);
 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
-int rds_message_inc_copy_to_user(struct rds_incoming *inc,
-                                struct iovec *first_iov, size_t size);
+int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
 void rds_message_inc_free(struct rds_incoming *inc);
 void rds_message_addref(struct rds_message *rm);
 void rds_message_put(struct rds_message *rm);
index bd82522534fc52f1705c7833d1b443c4ca695398..47d7b1029b33cc7d6a630f42ec1ff65d4fdd42db 100644 (file)
@@ -404,6 +404,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
        int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
        DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
        struct rds_incoming *inc = NULL;
+       struct iov_iter to;
 
        /* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
        timeo = sock_rcvtimeo(sk, nonblock);
@@ -449,8 +450,8 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
                rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
                         &inc->i_conn->c_faddr,
                         ntohs(inc->i_hdr.h_sport));
-               ret = inc->i_conn->c_trans->inc_copy_to_user(inc, msg->msg_iov,
-                                                            size);
+               iov_iter_init(&to, READ, msg->msg_iov, msg->msg_iovlen, size);
+               ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &to);
                if (ret < 0)
                        break;
 
index 0a64541020b0b52edb0cf197a55ff12cc36ccb8e..4de62ead1c711f75f225cd484a13437baa73b87e 100644 (file)
@@ -934,7 +934,9 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
        int queued = 0, allocated_mr = 0;
        int nonblock = msg->msg_flags & MSG_DONTWAIT;
        long timeo = sock_sndtimeo(sk, nonblock);
+       struct iov_iter from;
 
+       iov_iter_init(&from, WRITE, msg->msg_iov, msg->msg_iovlen, payload_len);
        /* Mirror Linux UDP mirror of BSD error message compatibility */
        /* XXX: Perhaps MSG_MORE someday */
        if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
@@ -982,7 +984,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
                        ret = -ENOMEM;
                        goto out;
                }
-               ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
+               ret = rds_message_copy_from_user(rm, &from);
                if (ret)
                        goto out;
        }
index 65637491f72857954207dc7fa974335110d44ef4..0dbdd37162da34ea33eddbd610d90aa78b32b93c 100644 (file)
@@ -69,8 +69,7 @@ void rds_tcp_recv_exit(void);
 void rds_tcp_data_ready(struct sock *sk);
 int rds_tcp_recv(struct rds_connection *conn);
 void rds_tcp_inc_free(struct rds_incoming *inc);
-int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
-                            size_t size);
+int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
 
 /* tcp_send.c */
 void rds_tcp_xmit_prepare(struct rds_connection *conn);
index 9ae6e0a264ec0c52d2d495da66d1acdfb36f0917..fbc5ef88bc0e692ea4cf9cb59f6163905510928e 100644 (file)
@@ -59,50 +59,30 @@ void rds_tcp_inc_free(struct rds_incoming *inc)
 /*
  * this is pretty lame, but, whatever.
  */
-int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
-                            size_t size)
+int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
 {
        struct rds_tcp_incoming *tinc;
-       struct iovec *iov, tmp;
        struct sk_buff *skb;
-       unsigned long to_copy, skb_off;
        int ret = 0;
 
-       if (size == 0)
+       if (!iov_iter_count(to))
                goto out;
 
        tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
-       iov = first_iov;
-       tmp = *iov;
 
        skb_queue_walk(&tinc->ti_skb_list, skb) {
-               skb_off = 0;
-               while (skb_off < skb->len) {
-                       while (tmp.iov_len == 0) {
-                               iov++;
-                               tmp = *iov;
-                       }
-
-                       to_copy = min(tmp.iov_len, size);
+               unsigned long to_copy, skb_off;
+               for (skb_off = 0; skb_off < skb->len; skb_off += to_copy) {
+                       to_copy = iov_iter_count(to);
                        to_copy = min(to_copy, skb->len - skb_off);
 
-                       rdsdebug("ret %d size %zu skb %p skb_off %lu "
-                                "skblen %d iov_base %p iov_len %zu cpy %lu\n",
-                                ret, size, skb, skb_off, skb->len,
-                                tmp.iov_base, tmp.iov_len, to_copy);
-
-                       /* modifies tmp as it copies */
-                       if (skb_copy_datagram_iovec(skb, skb_off, &tmp,
-                                                   to_copy)) {
-                               ret = -EFAULT;
-                               goto out;
-                       }
+                       if (skb_copy_datagram_iter(skb, skb_off, to, to_copy))
+                               return -EFAULT;
 
                        rds_stats_add(s_copy_to_user, to_copy);
-                       size -= to_copy;
                        ret += to_copy;
-                       skb_off += to_copy;
-                       if (size == 0)
+
+                       if (!iov_iter_count(to))
                                goto out;
                }
        }
index 9b600c20a7a326e357d4917f149d1f2276bee597..43bac7c4dd9e62b136b97ad2fa99d81dbd39bf11 100644 (file)
@@ -1121,7 +1121,7 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
        skb_reset_transport_header(skb);
        skb_put(skb, len);
 
-       err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
+       err = memcpy_from_msg(skb_transport_header(skb), msg, len);
        if (err) {
                kfree_skb(skb);
                return err;
index cbd7e1fd23b41bb7ba0bb348c3a1a287652cca93..9b05924cc386ecc2cdb9816be27e439637fb37b3 100644 (file)
@@ -481,12 +481,11 @@ begin:
                if (likely(rate))
                        do_div(len, rate);
                /* Since socket rate can change later,
-                * clamp the delay to 125 ms.
-                * TODO: maybe segment the too big skb, as in commit
-                * e43ac79a4bc ("sch_tbf: segment too big GSO packets")
+                * clamp the delay to 1 second.
+                * Really, providers of too big packets should be fixed !
                 */
-               if (unlikely(len > 125 * NSEC_PER_MSEC)) {
-                       len = 125 * NSEC_PER_MSEC;
+               if (unlikely(len > NSEC_PER_SEC)) {
+                       len = NSEC_PER_SEC;
                        q->stat_pkts_too_long++;
                }
 
index 158701da2d31c8c862fcffd59404b7c08d3b610a..a3380917f1973dba6c43507bc16b4f3fbc090c8a 100644 (file)
@@ -164,7 +164,7 @@ static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chu
  */
 struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
                                            struct sctp_sndrcvinfo *sinfo,
-                                           struct msghdr *msgh, int msg_len)
+                                           struct iov_iter *from)
 {
        int max, whole, i, offset, over, err;
        int len, first_len;
@@ -172,6 +172,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
        struct sctp_chunk *chunk;
        struct sctp_datamsg *msg;
        struct list_head *pos, *temp;
+       size_t msg_len = iov_iter_count(from);
        __u8 frag;
 
        msg = sctp_datamsg_new(GFP_KERNEL);
@@ -279,12 +280,10 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
                        goto errout;
                }
 
-               err = sctp_user_addto_chunk(chunk, offset, len, msgh->msg_iov);
+               err = sctp_user_addto_chunk(chunk, len, from);
                if (err < 0)
                        goto errout_chunk_free;
 
-               offset += len;
-
                /* Put the chunk->skb back into the form expected by send.  */
                __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
                           - (__u8 *)chunk->skb->data);
@@ -317,7 +316,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
                        goto errout;
                }
 
-               err = sctp_user_addto_chunk(chunk, offset, over, msgh->msg_iov);
+               err = sctp_user_addto_chunk(chunk, over, from);
 
                /* Put the chunk->skb back into the form expected by send.  */
                __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
index 9f32741abb1c7b142265297dc2fac78b74b3d195..e49e231cef529ecaf4bf2bc3e6a168b8f8b6fc06 100644 (file)
@@ -1001,7 +1001,7 @@ no_mem:
 
 /* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error.  */
 struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
-                                       const struct msghdr *msg,
+                                       struct msghdr *msg,
                                        size_t paylen)
 {
        struct sctp_chunk *retval;
@@ -1018,7 +1018,7 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
                if (!payload)
                        goto err_payload;
 
-               err = memcpy_fromiovec(payload, msg->msg_iov, paylen);
+               err = memcpy_from_msg(payload, msg, paylen);
                if (err < 0)
                        goto err_copy;
        }
@@ -1491,26 +1491,26 @@ static void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk,
  * chunk is not big enough.
  * Returns a kernel err value.
  */
-int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
-                         struct iovec *data)
+int sctp_user_addto_chunk(struct sctp_chunk *chunk, int len,
+                         struct iov_iter *from)
 {
-       __u8 *target;
-       int err = 0;
+       void *target;
+       ssize_t copied;
 
        /* Make room in chunk for data.  */
        target = skb_put(chunk->skb, len);
 
        /* Copy data (whole iovec) into chunk */
-       if ((err = memcpy_fromiovecend(target, data, off, len)))
-               goto out;
+       copied = copy_from_iter(target, len, from);
+       if (copied != len)
+               return -EFAULT;
 
        /* Adjust the chunk length field.  */
        chunk->chunk_hdr->length =
                htons(ntohs(chunk->chunk_hdr->length) + len);
        chunk->chunk_end = skb_tail_pointer(chunk->skb);
 
-out:
-       return err;
+       return 0;
 }
 
 /* Helper function to assign a TSN if needed.  This assumes that both
index 85e0b653edd73617a763b382018b7788538a78a9..0397ac9fd98c2601da7c15822599607235cae14c 100644 (file)
@@ -1609,6 +1609,9 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
        __u16 sinfo_flags = 0;
        long timeo;
        int err;
+       struct iov_iter from;
+
+       iov_iter_init(&from, WRITE, msg->msg_iov, msg->msg_iovlen, msg_len);
 
        err = 0;
        sp = sctp_sk(sk);
@@ -1947,7 +1950,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
        }
 
        /* Break the message into multiple chunks of maximum size. */
-       datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len);
+       datamsg = sctp_datamsg_from_user(asoc, sinfo, &from);
        if (IS_ERR(datamsg)) {
                err = PTR_ERR(datamsg);
                goto out_free;
index 3f959c681885ba41ccdf6300bb27a35745ec11be..f9c052d508f008c4fb74567ea8cbad13f305d497 100644 (file)
@@ -1019,17 +1019,12 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
        xid = *p++;
        calldir = *p;
 
-       if (bc_xprt)
-               req = xprt_lookup_rqst(bc_xprt, xid);
-
-       if (!req) {
-               printk(KERN_NOTICE
-                       "%s: Got unrecognized reply: "
-                       "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
-                       __func__, ntohl(calldir),
-                       bc_xprt, ntohl(xid));
+       if (!bc_xprt)
                return -EAGAIN;
-       }
+       spin_lock_bh(&bc_xprt->transport_lock);
+       req = xprt_lookup_rqst(bc_xprt, xid);
+       if (!req)
+               goto unlock_notfound;
 
        memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
        /*
@@ -1040,11 +1035,21 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
        dst = &req->rq_private_buf.head[0];
        src = &rqstp->rq_arg.head[0];
        if (dst->iov_len < src->iov_len)
-               return -EAGAIN; /* whatever; just giving up. */
+               goto unlock_eagain; /* whatever; just giving up. */
        memcpy(dst->iov_base, src->iov_base, src->iov_len);
        xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
        rqstp->rq_arg.len = 0;
+       spin_unlock_bh(&bc_xprt->transport_lock);
        return 0;
+unlock_notfound:
+       printk(KERN_NOTICE
+               "%s: Got unrecognized reply: "
+               "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
+               __func__, ntohl(calldir),
+               bc_xprt, ntohl(xid));
+unlock_eagain:
+       spin_unlock_bh(&bc_xprt->transport_lock);
+       return -EAGAIN;
 }
 
 static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len)
diff --git a/net/switchdev/Kconfig b/net/switchdev/Kconfig
new file mode 100644 (file)
index 0000000..1557545
--- /dev/null
@@ -0,0 +1,13 @@
+#
+# Configuration for Switch device support
+#
+
+config NET_SWITCHDEV
+       boolean "Switch (and switch-ish) device support (EXPERIMENTAL)"
+       depends on INET
+       ---help---
+         This module provides glue between core networking code and device
+         drivers in order to support hardware switch chips in very generic
+         meaning of the word "switch". This include devices supporting L2/L3 but
+         also various flow offloading chips, including switches embedded into
+         SR-IOV NICs.
diff --git a/net/switchdev/Makefile b/net/switchdev/Makefile
new file mode 100644 (file)
index 0000000..5ed63ed
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for the Switch device API
+#
+
+obj-$(CONFIG_NET_SWITCHDEV) += switchdev.o
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
new file mode 100644 (file)
index 0000000..d162b21
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * net/switchdev/switchdev.c - Switch device API
+ * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <net/switchdev.h>
+
+/**
+ *     netdev_switch_parent_id_get - Get ID of a switch
+ *     @dev: port device
+ *     @psid: switch ID
+ *
+ *     Get ID of a switch this port is part of.
+ */
+int netdev_switch_parent_id_get(struct net_device *dev,
+                               struct netdev_phys_item_id *psid)
+{
+       const struct net_device_ops *ops = dev->netdev_ops;
+
+       if (!ops->ndo_switch_parent_id_get)
+               return -EOPNOTSUPP;
+       return ops->ndo_switch_parent_id_get(dev, psid);
+}
+EXPORT_SYMBOL(netdev_switch_parent_id_get);
+
+/**
+ *     netdev_switch_port_stp_update - Notify switch device port of STP
+ *                                     state change
+ *     @dev: port device
+ *     @state: port STP state
+ *
+ *     Notify switch device port of bridge port STP state change.
+ */
+int netdev_switch_port_stp_update(struct net_device *dev, u8 state)
+{
+       const struct net_device_ops *ops = dev->netdev_ops;
+
+       if (!ops->ndo_switch_port_stp_update)
+               return -EOPNOTSUPP;
+       WARN_ON(!ops->ndo_switch_parent_id_get);
+       return ops->ndo_switch_port_stp_update(dev, state);
+}
+EXPORT_SYMBOL(netdev_switch_port_stp_update);
index b8a13caad59a518dddd2a8a50ad3a9a04f363cae..333e4592772ced97b3a636c0b7a854f795a140cd 100644 (file)
@@ -7,8 +7,8 @@ obj-$(CONFIG_TIPC) := tipc.o
 tipc-y += addr.o bcast.o bearer.o config.o \
           core.o link.o discover.o msg.o  \
           name_distr.o  subscr.o name_table.o net.o  \
-          netlink.o node.o node_subscr.o \
-          socket.o log.o eth_media.o server.o
+          netlink.o node.o socket.o log.o eth_media.o \
+          server.o
 
 tipc-$(CONFIG_TIPC_MEDIA_IB)   += ib_media.o
 tipc-$(CONFIG_SYSCTL)          += sysctl.o
index 556b26ad4b1efbb34b9a85f160298fb0d36d889e..f0761c771734fc0fd94dc85c18559830c921b661 100644 (file)
@@ -217,12 +217,13 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
  */
 static void bclink_retransmit_pkt(u32 after, u32 to)
 {
-       struct sk_buff *buf;
+       struct sk_buff *skb;
 
-       buf = bcl->first_out;
-       while (buf && less_eq(buf_seqno(buf), after))
-               buf = buf->next;
-       tipc_link_retransmit(bcl, buf, mod(to - after));
+       skb_queue_walk(&bcl->outqueue, skb) {
+               if (more(buf_seqno(skb), after))
+                       break;
+       }
+       tipc_link_retransmit(bcl, skb, mod(to - after));
 }
 
 /**
@@ -245,14 +246,14 @@ void tipc_bclink_wakeup_users(void)
  */
 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
 {
-       struct sk_buff *crs;
+       struct sk_buff *skb, *tmp;
        struct sk_buff *next;
        unsigned int released = 0;
 
        tipc_bclink_lock();
        /* Bail out if tx queue is empty (no clean up is required) */
-       crs = bcl->first_out;
-       if (!crs)
+       skb = skb_peek(&bcl->outqueue);
+       if (!skb)
                goto exit;
 
        /* Determine which messages need to be acknowledged */
@@ -271,43 +272,43 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
                 * Bail out if specified sequence number does not correspond
                 * to a message that has been sent and not yet acknowledged
                 */
-               if (less(acked, buf_seqno(crs)) ||
+               if (less(acked, buf_seqno(skb)) ||
                    less(bcl->fsm_msg_cnt, acked) ||
                    less_eq(acked, n_ptr->bclink.acked))
                        goto exit;
        }
 
        /* Skip over packets that node has previously acknowledged */
-       while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked))
-               crs = crs->next;
+       skb_queue_walk(&bcl->outqueue, skb) {
+               if (more(buf_seqno(skb), n_ptr->bclink.acked))
+                       break;
+       }
 
        /* Update packets that node is now acknowledging */
+       skb_queue_walk_from_safe(&bcl->outqueue, skb, tmp) {
+               if (more(buf_seqno(skb), acked))
+                       break;
 
-       while (crs && less_eq(buf_seqno(crs), acked)) {
-               next = crs->next;
-
-               if (crs != bcl->next_out)
-                       bcbuf_decr_acks(crs);
-               else {
-                       bcbuf_set_acks(crs, 0);
+               next = tipc_skb_queue_next(&bcl->outqueue, skb);
+               if (skb != bcl->next_out) {
+                       bcbuf_decr_acks(skb);
+               } else {
+                       bcbuf_set_acks(skb, 0);
                        bcl->next_out = next;
                        bclink_set_last_sent();
                }
 
-               if (bcbuf_acks(crs) == 0) {
-                       bcl->first_out = next;
-                       bcl->out_queue_size--;
-                       kfree_skb(crs);
+               if (bcbuf_acks(skb) == 0) {
+                       __skb_unlink(skb, &bcl->outqueue);
+                       kfree_skb(skb);
                        released = 1;
                }
-               crs = next;
        }
        n_ptr->bclink.acked = acked;
 
        /* Try resolving broadcast link congestion, if necessary */
-
        if (unlikely(bcl->next_out)) {
-               tipc_link_push_queue(bcl);
+               tipc_link_push_packets(bcl);
                bclink_set_last_sent();
        }
        if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks)))
@@ -327,19 +328,16 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
        struct sk_buff *buf;
 
        /* Ignore "stale" link state info */
-
        if (less_eq(last_sent, n_ptr->bclink.last_in))
                return;
 
        /* Update link synchronization state; quit if in sync */
-
        bclink_update_last_sent(n_ptr, last_sent);
 
        if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
                return;
 
        /* Update out-of-sync state; quit if loss is still unconfirmed */
-
        if ((++n_ptr->bclink.oos_state) == 1) {
                if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
                        return;
@@ -347,15 +345,15 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
        }
 
        /* Don't NACK if one has been recently sent (or seen) */
-
        if (n_ptr->bclink.oos_state & 0x1)
                return;
 
        /* Send NACK */
-
        buf = tipc_buf_acquire(INT_H_SIZE);
        if (buf) {
                struct tipc_msg *msg = buf_msg(buf);
+               struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue);
+               u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
 
                tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
                              INT_H_SIZE, n_ptr->addr);
@@ -363,9 +361,7 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
                msg_set_mc_netid(msg, tipc_net_id);
                msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
                msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
-               msg_set_bcgap_to(msg, n_ptr->bclink.deferred_head
-                                ? buf_seqno(n_ptr->bclink.deferred_head) - 1
-                                : n_ptr->bclink.last_sent);
+               msg_set_bcgap_to(msg, to);
 
                tipc_bclink_lock();
                tipc_bearer_send(MAX_BEARERS, buf, NULL);
@@ -402,20 +398,20 @@ static void bclink_peek_nack(struct tipc_msg *msg)
 
 /* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster
  *                    and to identified node local sockets
- * @buf: chain of buffers containing message
+ * @list: chain of buffers containing message
  * Consumes the buffer chain, except when returning -ELINKCONG
  * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
  */
-int tipc_bclink_xmit(struct sk_buff *buf)
+int tipc_bclink_xmit(struct sk_buff_head *list)
 {
        int rc = 0;
        int bc = 0;
-       struct sk_buff *clbuf;
+       struct sk_buff *skb;
 
        /* Prepare clone of message for local node */
-       clbuf = tipc_msg_reassemble(buf);
-       if (unlikely(!clbuf)) {
-               kfree_skb_list(buf);
+       skb = tipc_msg_reassemble(list);
+       if (unlikely(!skb)) {
+               __skb_queue_purge(list);
                return -EHOSTUNREACH;
        }
 
@@ -423,11 +419,13 @@ int tipc_bclink_xmit(struct sk_buff *buf)
        if (likely(bclink)) {
                tipc_bclink_lock();
                if (likely(bclink->bcast_nodes.count)) {
-                       rc = __tipc_link_xmit(bcl, buf);
+                       rc = __tipc_link_xmit(bcl, list);
                        if (likely(!rc)) {
+                               u32 len = skb_queue_len(&bcl->outqueue);
+
                                bclink_set_last_sent();
                                bcl->stats.queue_sz_counts++;
-                               bcl->stats.accu_queue_sz += bcl->out_queue_size;
+                               bcl->stats.accu_queue_sz += len;
                        }
                        bc = 1;
                }
@@ -435,13 +433,13 @@ int tipc_bclink_xmit(struct sk_buff *buf)
        }
 
        if (unlikely(!bc))
-               kfree_skb_list(buf);
+               __skb_queue_purge(list);
 
        /* Deliver message clone */
        if (likely(!rc))
-               tipc_sk_mcast_rcv(clbuf);
+               tipc_sk_mcast_rcv(skb);
        else
-               kfree_skb(clbuf);
+               kfree_skb(skb);
 
        return rc;
 }
@@ -462,7 +460,6 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
         * Unicast an ACK periodically, ensuring that
         * all nodes in the cluster don't ACK at the same time
         */
-
        if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
                tipc_link_proto_xmit(node->active_links[node->addr & 1],
                                     STATE_MSG, 0, 0, 0, 0, 0);
@@ -484,7 +481,6 @@ void tipc_bclink_rcv(struct sk_buff *buf)
        int deferred = 0;
 
        /* Screen out unwanted broadcast messages */
-
        if (msg_mc_netid(msg) != tipc_net_id)
                goto exit;
 
@@ -497,7 +493,6 @@ void tipc_bclink_rcv(struct sk_buff *buf)
                goto unlock;
 
        /* Handle broadcast protocol message */
-
        if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
                if (msg_type(msg) != STATE_MSG)
                        goto unlock;
@@ -518,14 +513,12 @@ void tipc_bclink_rcv(struct sk_buff *buf)
        }
 
        /* Handle in-sequence broadcast message */
-
        seqno = msg_seqno(msg);
        next_in = mod(node->bclink.last_in + 1);
 
        if (likely(seqno == next_in)) {
 receive:
                /* Deliver message to destination */
-
                if (likely(msg_isdata(msg))) {
                        tipc_bclink_lock();
                        bclink_accept_pkt(node, seqno);
@@ -574,7 +567,6 @@ receive:
                buf = NULL;
 
                /* Determine new synchronization state */
-
                tipc_node_lock(node);
                if (unlikely(!tipc_node_is_up(node)))
                        goto unlock;
@@ -582,33 +574,26 @@ receive:
                if (node->bclink.last_in == node->bclink.last_sent)
                        goto unlock;
 
-               if (!node->bclink.deferred_head) {
+               if (skb_queue_empty(&node->bclink.deferred_queue)) {
                        node->bclink.oos_state = 1;
                        goto unlock;
                }
 
-               msg = buf_msg(node->bclink.deferred_head);
+               msg = buf_msg(skb_peek(&node->bclink.deferred_queue));
                seqno = msg_seqno(msg);
                next_in = mod(next_in + 1);
                if (seqno != next_in)
                        goto unlock;
 
                /* Take in-sequence message from deferred queue & deliver it */
-
-               buf = node->bclink.deferred_head;
-               node->bclink.deferred_head = buf->next;
-               buf->next = NULL;
-               node->bclink.deferred_size--;
+               buf = __skb_dequeue(&node->bclink.deferred_queue);
                goto receive;
        }
 
        /* Handle out-of-sequence broadcast message */
-
        if (less(next_in, seqno)) {
-               deferred = tipc_link_defer_pkt(&node->bclink.deferred_head,
-                                              &node->bclink.deferred_tail,
+               deferred = tipc_link_defer_pkt(&node->bclink.deferred_queue,
                                               buf);
-               node->bclink.deferred_size += deferred;
                bclink_update_last_sent(node, seqno);
                buf = NULL;
        }
@@ -963,6 +948,8 @@ int tipc_bclink_init(void)
        sprintf(bcbearer->media.name, "tipc-broadcast");
 
        spin_lock_init(&bclink->lock);
+       __skb_queue_head_init(&bcl->outqueue);
+       __skb_queue_head_init(&bcl->deferred_queue);
        __skb_queue_head_init(&bcl->waiting_sks);
        bcl->next_out_no = 1;
        spin_lock_init(&bclink->node.lock);
index 443de084d3e885fc6dded0e594ac6f765095f9ed..644d79129fbaeb1ac3022fd02f9a6c4dafca9b7d 100644 (file)
@@ -100,7 +100,7 @@ int  tipc_bclink_reset_stats(void);
 int  tipc_bclink_set_queue_limits(u32 limit);
 void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
 uint  tipc_bclink_get_mtu(void);
-int tipc_bclink_xmit(struct sk_buff *buf);
+int tipc_bclink_xmit(struct sk_buff_head *list);
 void tipc_bclink_wakeup_users(void);
 int tipc_nl_add_bc_link(struct tipc_nl_msg *msg);
 
index b1d905209e83f9558c4d01ddebf6a6327cc4ad5c..2c1230ac5dfe8d66642b30e144d8015ef6a76ed5 100644 (file)
@@ -165,7 +165,7 @@ extern struct tipc_bearer __rcu *bearer_list[];
  * TIPC routines available to supported media types
  */
 
-void tipc_rcv(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
+void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *tb_ptr);
 int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority);
 int tipc_disable_bearer(const char *name);
 
index b578b10feefa8f0d1a3374cc94aaed0e3629b58c..84602137ce20f1fe051d7c3ebd2a8fafa10c2b43 100644 (file)
@@ -192,6 +192,7 @@ struct tipc_skb_cb {
        struct sk_buff *tail;
        bool deferred;
        bool wakeup_pending;
+       bool bundling;
        u16 chain_sz;
        u16 chain_imp;
 };
index 4738cb1bf7c0cd130104e1eb3021b2d35483bc9f..34bf15c90c78e38f562d2288883174002258cfa7 100644 (file)
@@ -149,18 +149,6 @@ static void link_init_max_pkt(struct tipc_link *l_ptr)
        l_ptr->max_pkt_probes = 0;
 }
 
-static u32 link_next_sent(struct tipc_link *l_ptr)
-{
-       if (l_ptr->next_out)
-               return buf_seqno(l_ptr->next_out);
-       return mod(l_ptr->next_out_no);
-}
-
-static u32 link_last_sent(struct tipc_link *l_ptr)
-{
-       return mod(link_next_sent(l_ptr) - 1);
-}
-
 /*
  *  Simple non-static link routines (i.e. referenced outside this file)
  */
@@ -183,14 +171,17 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
  */
 static void link_timeout(struct tipc_link *l_ptr)
 {
+       struct sk_buff *skb;
+
        tipc_node_lock(l_ptr->owner);
 
        /* update counters used in statistical profiling of send traffic */
-       l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
+       l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->outqueue);
        l_ptr->stats.queue_sz_counts++;
 
-       if (l_ptr->first_out) {
-               struct tipc_msg *msg = buf_msg(l_ptr->first_out);
+       skb = skb_peek(&l_ptr->outqueue);
+       if (skb) {
+               struct tipc_msg *msg = buf_msg(skb);
                u32 length = msg_size(msg);
 
                if ((msg_user(msg) == MSG_FRAGMENTER) &&
@@ -218,11 +209,10 @@ static void link_timeout(struct tipc_link *l_ptr)
        }
 
        /* do all other link processing performed on a periodic basis */
-
        link_state_event(l_ptr, TIMEOUT_EVT);
 
        if (l_ptr->next_out)
-               tipc_link_push_queue(l_ptr);
+               tipc_link_push_packets(l_ptr);
 
        tipc_node_unlock(l_ptr->owner);
 }
@@ -301,6 +291,8 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
        link_init_max_pkt(l_ptr);
 
        l_ptr->next_out_no = 1;
+       __skb_queue_head_init(&l_ptr->outqueue);
+       __skb_queue_head_init(&l_ptr->deferred_queue);
        __skb_queue_head_init(&l_ptr->waiting_sks);
 
        link_reset_statistics(l_ptr);
@@ -379,29 +371,18 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport,
  */
 static void link_prepare_wakeup(struct tipc_link *link)
 {
-       struct sk_buff_head *wq = &link->waiting_sks;
-       struct sk_buff *buf;
-       uint pend_qsz = link->out_queue_size;
+       uint pend_qsz = skb_queue_len(&link->outqueue);
+       struct sk_buff *skb, *tmp;
 
-       for (buf = skb_peek(wq); buf; buf = skb_peek(wq)) {
-               if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(buf)->chain_imp])
+       skb_queue_walk_safe(&link->waiting_sks, skb, tmp) {
+               if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp])
                        break;
-               pend_qsz += TIPC_SKB_CB(buf)->chain_sz;
-               __skb_queue_tail(&link->owner->waiting_sks, __skb_dequeue(wq));
+               pend_qsz += TIPC_SKB_CB(skb)->chain_sz;
+               __skb_unlink(skb, &link->waiting_sks);
+               __skb_queue_tail(&link->owner->waiting_sks, skb);
        }
 }
 
-/**
- * link_release_outqueue - purge link's outbound message queue
- * @l_ptr: pointer to link
- */
-static void link_release_outqueue(struct tipc_link *l_ptr)
-{
-       kfree_skb_list(l_ptr->first_out);
-       l_ptr->first_out = NULL;
-       l_ptr->out_queue_size = 0;
-}
-
 /**
  * tipc_link_reset_fragments - purge link's inbound message fragments queue
  * @l_ptr: pointer to link
@@ -418,11 +399,9 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr)
  */
 void tipc_link_purge_queues(struct tipc_link *l_ptr)
 {
-       kfree_skb_list(l_ptr->oldest_deferred_in);
-       kfree_skb_list(l_ptr->first_out);
+       __skb_queue_purge(&l_ptr->deferred_queue);
+       __skb_queue_purge(&l_ptr->outqueue);
        tipc_link_reset_fragments(l_ptr);
-       kfree_skb(l_ptr->proto_msg_queue);
-       l_ptr->proto_msg_queue = NULL;
 }
 
 void tipc_link_reset(struct tipc_link *l_ptr)
@@ -454,25 +433,16 @@ void tipc_link_reset(struct tipc_link *l_ptr)
        }
 
        /* Clean up all queues: */
-       link_release_outqueue(l_ptr);
-       kfree_skb(l_ptr->proto_msg_queue);
-       l_ptr->proto_msg_queue = NULL;
-       kfree_skb_list(l_ptr->oldest_deferred_in);
+       __skb_queue_purge(&l_ptr->outqueue);
+       __skb_queue_purge(&l_ptr->deferred_queue);
        if (!skb_queue_empty(&l_ptr->waiting_sks)) {
                skb_queue_splice_init(&l_ptr->waiting_sks, &owner->waiting_sks);
                owner->action_flags |= TIPC_WAKEUP_USERS;
        }
-       l_ptr->retransm_queue_head = 0;
-       l_ptr->retransm_queue_size = 0;
-       l_ptr->last_out = NULL;
-       l_ptr->first_out = NULL;
        l_ptr->next_out = NULL;
        l_ptr->unacked_window = 0;
        l_ptr->checkpoint = 1;
        l_ptr->next_out_no = 1;
-       l_ptr->deferred_inqueue_sz = 0;
-       l_ptr->oldest_deferred_in = NULL;
-       l_ptr->newest_deferred_in = NULL;
        l_ptr->fsm_msg_cnt = 0;
        l_ptr->stale_count = 0;
        link_reset_statistics(l_ptr);
@@ -694,9 +664,10 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
  * - For all other messages we discard the buffer and return -EHOSTUNREACH
  * - For TIPC internal messages we also reset the link
  */
-static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf)
+static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list)
 {
-       struct tipc_msg *msg = buf_msg(buf);
+       struct sk_buff *skb = skb_peek(list);
+       struct tipc_msg *msg = buf_msg(skb);
        uint imp = tipc_msg_tot_importance(msg);
        u32 oport = msg_tot_origport(msg);
 
@@ -709,30 +680,30 @@ static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf)
                goto drop;
        if (unlikely(msg_reroute_cnt(msg)))
                goto drop;
-       if (TIPC_SKB_CB(buf)->wakeup_pending)
+       if (TIPC_SKB_CB(skb)->wakeup_pending)
                return -ELINKCONG;
-       if (link_schedule_user(link, oport, TIPC_SKB_CB(buf)->chain_sz, imp))
+       if (link_schedule_user(link, oport, skb_queue_len(list), imp))
                return -ELINKCONG;
 drop:
-       kfree_skb_list(buf);
+       __skb_queue_purge(list);
        return -EHOSTUNREACH;
 }
 
 /**
  * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
  * @link: link to use
- * @buf: chain of buffers containing message
+ * @list: chain of buffers containing message
+ *
  * Consumes the buffer chain, except when returning -ELINKCONG
  * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
  * user data messages) or -EHOSTUNREACH (all other messages/senders)
  * Only the socket functions tipc_send_stream() and tipc_send_packet() need
  * to act on the return value, since they may need to do more send attempts.
  */
-int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf)
+int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list)
 {
-       struct tipc_msg *msg = buf_msg(buf);
+       struct tipc_msg *msg = buf_msg(skb_peek(list));
        uint psz = msg_size(msg);
-       uint qsz = link->out_queue_size;
        uint sndlim = link->queue_limit[0];
        uint imp = tipc_msg_tot_importance(msg);
        uint mtu = link->max_pkt;
@@ -740,71 +711,83 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf)
        uint seqno = link->next_out_no;
        uint bc_last_in = link->owner->bclink.last_in;
        struct tipc_media_addr *addr = &link->media_addr;
-       struct sk_buff *next = buf->next;
+       struct sk_buff_head *outqueue = &link->outqueue;
+       struct sk_buff *skb, *tmp;
 
        /* Match queue limits against msg importance: */
-       if (unlikely(qsz >= link->queue_limit[imp]))
-               return tipc_link_cong(link, buf);
+       if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp]))
+               return tipc_link_cong(link, list);
 
        /* Has valid packet limit been used ? */
        if (unlikely(psz > mtu)) {
-               kfree_skb_list(buf);
+               __skb_queue_purge(list);
                return -EMSGSIZE;
        }
 
        /* Prepare each packet for sending, and add to outqueue: */
-       while (buf) {
-               next = buf->next;
-               msg = buf_msg(buf);
+       skb_queue_walk_safe(list, skb, tmp) {
+               __skb_unlink(skb, list);
+               msg = buf_msg(skb);
                msg_set_word(msg, 2, ((ack << 16) | mod(seqno)));
                msg_set_bcast_ack(msg, bc_last_in);
 
-               if (!link->first_out) {
-                       link->first_out = buf;
-               } else if (qsz < sndlim) {
-                       link->last_out->next = buf;
-               } else if (tipc_msg_bundle(link->last_out, buf, mtu)) {
+               if (skb_queue_len(outqueue) < sndlim) {
+                       __skb_queue_tail(outqueue, skb);
+                       tipc_bearer_send(link->bearer_id, skb, addr);
+                       link->next_out = NULL;
+                       link->unacked_window = 0;
+               } else if (tipc_msg_bundle(outqueue, skb, mtu)) {
                        link->stats.sent_bundled++;
-                       buf = next;
-                       next = buf->next;
                        continue;
-               } else if (tipc_msg_make_bundle(&buf, mtu, link->addr)) {
+               } else if (tipc_msg_make_bundle(outqueue, skb, mtu,
+                                               link->addr)) {
                        link->stats.sent_bundled++;
                        link->stats.sent_bundles++;
-                       link->last_out->next = buf;
                        if (!link->next_out)
-                               link->next_out = buf;
+                               link->next_out = skb_peek_tail(outqueue);
                } else {
-                       link->last_out->next = buf;
+                       __skb_queue_tail(outqueue, skb);
                        if (!link->next_out)
-                               link->next_out = buf;
-               }
-
-               /* Send packet if possible: */
-               if (likely(++qsz <= sndlim)) {
-                       tipc_bearer_send(link->bearer_id, buf, addr);
-                       link->next_out = next;
-                       link->unacked_window = 0;
+                               link->next_out = skb;
                }
                seqno++;
-               link->last_out = buf;
-               buf = next;
        }
        link->next_out_no = seqno;
-       link->out_queue_size = qsz;
        return 0;
 }
 
+static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
+{
+       __skb_queue_head_init(list);
+       __skb_queue_tail(list, skb);
+}
+
+static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
+{
+       struct sk_buff_head head;
+
+       skb2list(skb, &head);
+       return __tipc_link_xmit(link, &head);
+}
+
+int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector)
+{
+       struct sk_buff_head head;
+
+       skb2list(skb, &head);
+       return tipc_link_xmit(&head, dnode, selector);
+}
+
 /**
  * tipc_link_xmit() is the general link level function for message sending
- * @buf: chain of buffers containing message
+ * @list: chain of buffers containing message
  * @dsz: amount of user data to be sent
  * @dnode: address of destination node
  * @selector: a number used for deterministic link selection
  * Consumes the buffer chain, except when returning -ELINKCONG
  * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
  */
-int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector)
+int tipc_link_xmit(struct sk_buff_head *list, u32 dnode, u32 selector)
 {
        struct tipc_link *link = NULL;
        struct tipc_node *node;
@@ -815,17 +798,22 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector)
                tipc_node_lock(node);
                link = node->active_links[selector & 1];
                if (link)
-                       rc = __tipc_link_xmit(link, buf);
+                       rc = __tipc_link_xmit(link, list);
                tipc_node_unlock(node);
        }
 
        if (link)
                return rc;
 
-       if (likely(in_own_node(dnode)))
-               return tipc_sk_rcv(buf);
+       if (likely(in_own_node(dnode))) {
+               /* As a node local message chain never contains more than one
+                * buffer, we just need to dequeue one SKB buffer from the
+                * head list.
+                */
+               return tipc_sk_rcv(__skb_dequeue(list));
+       }
+       __skb_queue_purge(list);
 
-       kfree_skb_list(buf);
        return rc;
 }
 
@@ -839,17 +827,17 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector)
  */
 static void tipc_link_sync_xmit(struct tipc_link *link)
 {
-       struct sk_buff *buf;
+       struct sk_buff *skb;
        struct tipc_msg *msg;
 
-       buf = tipc_buf_acquire(INT_H_SIZE);
-       if (!buf)
+       skb = tipc_buf_acquire(INT_H_SIZE);
+       if (!skb)
                return;
 
-       msg = buf_msg(buf);
+       msg = buf_msg(skb);
        tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr);
        msg_set_last_bcast(msg, link->owner->bclink.acked);
-       __tipc_link_xmit(link, buf);
+       __tipc_link_xmit_skb(link, skb);
 }
 
 /*
@@ -869,85 +857,46 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
        kfree_skb(buf);
 }
 
+struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
+                                   const struct sk_buff *skb)
+{
+       if (skb_queue_is_last(list, skb))
+               return NULL;
+       return skb->next;
+}
+
 /*
- * tipc_link_push_packet: Push one unsent packet to the media
+ * tipc_link_push_packets - push unsent packets to bearer
+ *
+ * Push out the unsent messages of a link where congestion
+ * has abated. Node is locked.
+ *
+ * Called with node locked
  */
-static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
-{
-       struct sk_buff *buf = l_ptr->first_out;
-       u32 r_q_size = l_ptr->retransm_queue_size;
-       u32 r_q_head = l_ptr->retransm_queue_head;
-
-       /* Step to position where retransmission failed, if any,    */
-       /* consider that buffers may have been released in meantime */
-       if (r_q_size && buf) {
-               u32 last = lesser(mod(r_q_head + r_q_size),
-                                 link_last_sent(l_ptr));
-               u32 first = buf_seqno(buf);
-
-               while (buf && less(first, r_q_head)) {
-                       first = mod(first + 1);
-                       buf = buf->next;
-               }
-               l_ptr->retransm_queue_head = r_q_head = first;
-               l_ptr->retransm_queue_size = r_q_size = mod(last - first);
-       }
-
-       /* Continue retransmission now, if there is anything: */
-       if (r_q_size && buf) {
-               msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
-               msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
-               tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
-               l_ptr->retransm_queue_head = mod(++r_q_head);
-               l_ptr->retransm_queue_size = --r_q_size;
-               l_ptr->stats.retransmitted++;
-               return 0;
-       }
-
-       /* Send deferred protocol message, if any: */
-       buf = l_ptr->proto_msg_queue;
-       if (buf) {
-               msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
-               msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
-               tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
-               l_ptr->unacked_window = 0;
-               kfree_skb(buf);
-               l_ptr->proto_msg_queue = NULL;
-               return 0;
-       }
+void tipc_link_push_packets(struct tipc_link *l_ptr)
+{
+       struct sk_buff_head *outqueue = &l_ptr->outqueue;
+       struct sk_buff *skb = l_ptr->next_out;
+       struct tipc_msg *msg;
+       u32 next, first;
 
-       /* Send one deferred data message, if send window not full: */
-       buf = l_ptr->next_out;
-       if (buf) {
-               struct tipc_msg *msg = buf_msg(buf);
-               u32 next = msg_seqno(msg);
-               u32 first = buf_seqno(l_ptr->first_out);
+       skb_queue_walk_from(outqueue, skb) {
+               msg = buf_msg(skb);
+               next = msg_seqno(msg);
+               first = buf_seqno(skb_peek(outqueue));
 
                if (mod(next - first) < l_ptr->queue_limit[0]) {
                        msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
                        msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-                       tipc_bearer_send(l_ptr->bearer_id, buf,
-                                        &l_ptr->media_addr);
                        if (msg_user(msg) == MSG_BUNDLER)
-                               msg_set_type(msg, BUNDLE_CLOSED);
-                       l_ptr->next_out = buf->next;
-                       return 0;
+                               TIPC_SKB_CB(skb)->bundling = false;
+                       tipc_bearer_send(l_ptr->bearer_id, skb,
+                                        &l_ptr->media_addr);
+                       l_ptr->next_out = tipc_skb_queue_next(outqueue, skb);
+               } else {
+                       break;
                }
        }
-       return 1;
-}
-
-/*
- * push_queue(): push out the unsent messages of a link where
- *               congestion has abated. Node is locked
- */
-void tipc_link_push_queue(struct tipc_link *l_ptr)
-{
-       u32 res;
-
-       do {
-               res = tipc_link_push_packet(l_ptr);
-       } while (!res);
 }
 
 void tipc_link_reset_all(struct tipc_node *node)
@@ -1011,20 +960,20 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
        }
 }
 
-void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
+void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
                          u32 retransmits)
 {
        struct tipc_msg *msg;
 
-       if (!buf)
+       if (!skb)
                return;
 
-       msg = buf_msg(buf);
+       msg = buf_msg(skb);
 
        /* Detect repeated retransmit failures */
        if (l_ptr->last_retransmitted == msg_seqno(msg)) {
                if (++l_ptr->stale_count > 100) {
-                       link_retransmit_failure(l_ptr, buf);
+                       link_retransmit_failure(l_ptr, skb);
                        return;
                }
        } else {
@@ -1032,38 +981,29 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
                l_ptr->stale_count = 1;
        }
 
-       while (retransmits && (buf != l_ptr->next_out) && buf) {
-               msg = buf_msg(buf);
+       skb_queue_walk_from(&l_ptr->outqueue, skb) {
+               if (!retransmits || skb == l_ptr->next_out)
+                       break;
+               msg = buf_msg(skb);
                msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
                msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-               tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
-               buf = buf->next;
+               tipc_bearer_send(l_ptr->bearer_id, skb, &l_ptr->media_addr);
                retransmits--;
                l_ptr->stats.retransmitted++;
        }
-
-       l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
 }
 
-/**
- * link_insert_deferred_queue - insert deferred messages back into receive chain
- */
-static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
-                                                 struct sk_buff *buf)
+static void link_retrieve_defq(struct tipc_link *link,
+                              struct sk_buff_head *list)
 {
        u32 seq_no;
 
-       if (l_ptr->oldest_deferred_in == NULL)
-               return buf;
+       if (skb_queue_empty(&link->deferred_queue))
+               return;
 
-       seq_no = buf_seqno(l_ptr->oldest_deferred_in);
-       if (seq_no == mod(l_ptr->next_in_no)) {
-               l_ptr->newest_deferred_in->next = buf;
-               buf = l_ptr->oldest_deferred_in;
-               l_ptr->oldest_deferred_in = NULL;
-               l_ptr->deferred_inqueue_sz = 0;
-       }
-       return buf;
+       seq_no = buf_seqno(skb_peek(&link->deferred_queue));
+       if (seq_no == mod(link->next_in_no))
+               skb_queue_splice_tail_init(&link->deferred_queue, list);
 }
 
 /**
@@ -1123,43 +1063,42 @@ static int link_recv_buf_validate(struct sk_buff *buf)
 
 /**
  * tipc_rcv - process TIPC packets/messages arriving from off-node
- * @head: pointer to message buffer chain
+ * @skb: TIPC packet
  * @b_ptr: pointer to bearer message arrived on
  *
  * Invoked with no locks held.  Bearer pointer must point to a valid bearer
  * structure (i.e. cannot be NULL), but bearer can be inactive.
  */
-void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
+void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
 {
-       while (head) {
-               struct tipc_node *n_ptr;
-               struct tipc_link *l_ptr;
-               struct sk_buff *crs;
-               struct sk_buff *buf = head;
-               struct tipc_msg *msg;
-               u32 seq_no;
-               u32 ackd;
-               u32 released = 0;
+       struct sk_buff_head head;
+       struct tipc_node *n_ptr;
+       struct tipc_link *l_ptr;
+       struct sk_buff *skb1, *tmp;
+       struct tipc_msg *msg;
+       u32 seq_no;
+       u32 ackd;
+       u32 released;
 
-               head = head->next;
-               buf->next = NULL;
+       skb2list(skb, &head);
 
+       while ((skb = __skb_dequeue(&head))) {
                /* Ensure message is well-formed */
-               if (unlikely(!link_recv_buf_validate(buf)))
+               if (unlikely(!link_recv_buf_validate(skb)))
                        goto discard;
 
                /* Ensure message data is a single contiguous unit */
-               if (unlikely(skb_linearize(buf)))
+               if (unlikely(skb_linearize(skb)))
                        goto discard;
 
                /* Handle arrival of a non-unicast link message */
-               msg = buf_msg(buf);
+               msg = buf_msg(skb);
 
                if (unlikely(msg_non_seq(msg))) {
                        if (msg_user(msg) ==  LINK_CONFIG)
-                               tipc_disc_rcv(buf, b_ptr);
+                               tipc_disc_rcv(skb, b_ptr);
                        else
-                               tipc_bclink_rcv(buf);
+                               tipc_bclink_rcv(skb);
                        continue;
                }
 
@@ -1198,22 +1137,19 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
                if (n_ptr->bclink.recv_permitted)
                        tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
 
-               crs = l_ptr->first_out;
-               while ((crs != l_ptr->next_out) &&
-                      less_eq(buf_seqno(crs), ackd)) {
-                       struct sk_buff *next = crs->next;
-                       kfree_skb(crs);
-                       crs = next;
-                       released++;
-               }
-               if (released) {
-                       l_ptr->first_out = crs;
-                       l_ptr->out_queue_size -= released;
+               released = 0;
+               skb_queue_walk_safe(&l_ptr->outqueue, skb1, tmp) {
+                       if (skb1 == l_ptr->next_out ||
+                           more(buf_seqno(skb1), ackd))
+                               break;
+                        __skb_unlink(skb1, &l_ptr->outqueue);
+                        kfree_skb(skb1);
+                        released = 1;
                }
 
                /* Try sending any messages link endpoint has pending */
                if (unlikely(l_ptr->next_out))
-                       tipc_link_push_queue(l_ptr);
+                       tipc_link_push_packets(l_ptr);
 
                if (released && !skb_queue_empty(&l_ptr->waiting_sks)) {
                        link_prepare_wakeup(l_ptr);
@@ -1223,8 +1159,8 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
                /* Process the incoming packet */
                if (unlikely(!link_working_working(l_ptr))) {
                        if (msg_user(msg) == LINK_PROTOCOL) {
-                               tipc_link_proto_rcv(l_ptr, buf);
-                               head = link_insert_deferred_queue(l_ptr, head);
+                               tipc_link_proto_rcv(l_ptr, skb);
+                               link_retrieve_defq(l_ptr, &head);
                                tipc_node_unlock(n_ptr);
                                continue;
                        }
@@ -1234,8 +1170,7 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
 
                        if (link_working_working(l_ptr)) {
                                /* Re-insert buffer in front of queue */
-                               buf->next = head;
-                               head = buf;
+                               __skb_queue_head(&head, skb);
                                tipc_node_unlock(n_ptr);
                                continue;
                        }
@@ -1244,33 +1179,33 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
 
                /* Link is now in state WORKING_WORKING */
                if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
-                       link_handle_out_of_seq_msg(l_ptr, buf);
-                       head = link_insert_deferred_queue(l_ptr, head);
+                       link_handle_out_of_seq_msg(l_ptr, skb);
+                       link_retrieve_defq(l_ptr, &head);
                        tipc_node_unlock(n_ptr);
                        continue;
                }
                l_ptr->next_in_no++;
-               if (unlikely(l_ptr->oldest_deferred_in))
-                       head = link_insert_deferred_queue(l_ptr, head);
+               if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue)))
+                       link_retrieve_defq(l_ptr, &head);
 
                if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
                        l_ptr->stats.sent_acks++;
                        tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
                }
 
-               if (tipc_link_prepare_input(l_ptr, &buf)) {
+               if (tipc_link_prepare_input(l_ptr, &skb)) {
                        tipc_node_unlock(n_ptr);
                        continue;
                }
                tipc_node_unlock(n_ptr);
-               msg = buf_msg(buf);
-               if (tipc_link_input(l_ptr, buf) != 0)
+
+               if (tipc_link_input(l_ptr, skb) != 0)
                        goto discard;
                continue;
 unlock_discard:
                tipc_node_unlock(n_ptr);
 discard:
-               kfree_skb(buf);
+               kfree_skb(skb);
        }
 }
 
@@ -1353,48 +1288,37 @@ static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf)
  *
  * Returns increase in queue length (i.e. 0 or 1)
  */
-u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
-                       struct sk_buff *buf)
+u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
 {
-       struct sk_buff *queue_buf;
-       struct sk_buff **prev;
-       u32 seq_no = buf_seqno(buf);
-
-       buf->next = NULL;
+       struct sk_buff *skb1;
+       u32 seq_no = buf_seqno(skb);
 
        /* Empty queue ? */
-       if (*head == NULL) {
-               *head = *tail = buf;
+       if (skb_queue_empty(list)) {
+               __skb_queue_tail(list, skb);
                return 1;
        }
 
        /* Last ? */
-       if (less(buf_seqno(*tail), seq_no)) {
-               (*tail)->next = buf;
-               *tail = buf;
+       if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
+               __skb_queue_tail(list, skb);
                return 1;
        }
 
        /* Locate insertion point in queue, then insert; discard if duplicate */
-       prev = head;
-       queue_buf = *head;
-       for (;;) {
-               u32 curr_seqno = buf_seqno(queue_buf);
+       skb_queue_walk(list, skb1) {
+               u32 curr_seqno = buf_seqno(skb1);
 
                if (seq_no == curr_seqno) {
-                       kfree_skb(buf);
+                       kfree_skb(skb);
                        return 0;
                }
 
                if (less(seq_no, curr_seqno))
                        break;
-
-               prev = &queue_buf->next;
-               queue_buf = queue_buf->next;
        }
 
-       buf->next = queue_buf;
-       *prev = buf;
+       __skb_queue_before(list, skb1, skb);
        return 1;
 }
 
@@ -1424,15 +1348,14 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
                return;
        }
 
-       if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
-                               &l_ptr->newest_deferred_in, buf)) {
-               l_ptr->deferred_inqueue_sz++;
+       if (tipc_link_defer_pkt(&l_ptr->deferred_queue, buf)) {
                l_ptr->stats.deferred_recv++;
                TIPC_SKB_CB(buf)->deferred = true;
-               if ((l_ptr->deferred_inqueue_sz % 16) == 1)
+               if ((skb_queue_len(&l_ptr->deferred_queue) % 16) == 1)
                        tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
-       } else
+       } else {
                l_ptr->stats.duplicates++;
+       }
 }
 
 /*
@@ -1446,12 +1369,6 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
        u32 msg_size = sizeof(l_ptr->proto_msg);
        int r_flag;
 
-       /* Discard any previous message that was deferred due to congestion */
-       if (l_ptr->proto_msg_queue) {
-               kfree_skb(l_ptr->proto_msg_queue);
-               l_ptr->proto_msg_queue = NULL;
-       }
-
        /* Don't send protocol message during link changeover */
        if (l_ptr->exp_msg_count)
                return;
@@ -1474,8 +1391,8 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
                if (l_ptr->next_out)
                        next_sent = buf_seqno(l_ptr->next_out);
                msg_set_next_sent(msg, next_sent);
-               if (l_ptr->oldest_deferred_in) {
-                       u32 rec = buf_seqno(l_ptr->oldest_deferred_in);
+               if (!skb_queue_empty(&l_ptr->deferred_queue)) {
+                       u32 rec = buf_seqno(skb_peek(&l_ptr->deferred_queue));
                        gap = mod(rec - mod(l_ptr->next_in_no));
                }
                msg_set_seq_gap(msg, gap);
@@ -1663,7 +1580,7 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
                }
                if (msg_seq_gap(msg)) {
                        l_ptr->stats.recv_nacks++;
-                       tipc_link_retransmit(l_ptr, l_ptr->first_out,
+                       tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->outqueue),
                                             msg_seq_gap(msg));
                }
                break;
@@ -1682,7 +1599,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
                                  u32 selector)
 {
        struct tipc_link *tunnel;
-       struct sk_buff *buf;
+       struct sk_buff *skb;
        u32 length = msg_size(msg);
 
        tunnel = l_ptr->owner->active_links[selector & 1];
@@ -1691,14 +1608,14 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
                return;
        }
        msg_set_size(tunnel_hdr, length + INT_H_SIZE);
-       buf = tipc_buf_acquire(length + INT_H_SIZE);
-       if (!buf) {
+       skb = tipc_buf_acquire(length + INT_H_SIZE);
+       if (!skb) {
                pr_warn("%sunable to send tunnel msg\n", link_co_err);
                return;
        }
-       skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
-       skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
-       __tipc_link_xmit(tunnel, buf);
+       skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
+       skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
+       __tipc_link_xmit_skb(tunnel, skb);
 }
 
 
@@ -1710,10 +1627,10 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
  */
 void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
 {
-       u32 msgcount = l_ptr->out_queue_size;
-       struct sk_buff *crs = l_ptr->first_out;
+       u32 msgcount = skb_queue_len(&l_ptr->outqueue);
        struct tipc_link *tunnel = l_ptr->owner->active_links[0];
        struct tipc_msg tunnel_hdr;
+       struct sk_buff *skb;
        int split_bundles;
 
        if (!tunnel)
@@ -1724,14 +1641,12 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
        msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
        msg_set_msgcnt(&tunnel_hdr, msgcount);
 
-       if (!l_ptr->first_out) {
-               struct sk_buff *buf;
-
-               buf = tipc_buf_acquire(INT_H_SIZE);
-               if (buf) {
-                       skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
+       if (skb_queue_empty(&l_ptr->outqueue)) {
+               skb = tipc_buf_acquire(INT_H_SIZE);
+               if (skb) {
+                       skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
                        msg_set_size(&tunnel_hdr, INT_H_SIZE);
-                       __tipc_link_xmit(tunnel, buf);
+                       __tipc_link_xmit_skb(tunnel, skb);
                } else {
                        pr_warn("%sunable to send changeover msg\n",
                                link_co_err);
@@ -1742,8 +1657,8 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
        split_bundles = (l_ptr->owner->active_links[0] !=
                         l_ptr->owner->active_links[1]);
 
-       while (crs) {
-               struct tipc_msg *msg = buf_msg(crs);
+       skb_queue_walk(&l_ptr->outqueue, skb) {
+               struct tipc_msg *msg = buf_msg(skb);
 
                if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
                        struct tipc_msg *m = msg_get_wrapped(msg);
@@ -1761,7 +1676,6 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
                        tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
                                              msg_link_selector(msg));
                }
-               crs = crs->next;
        }
 }
 
@@ -1777,17 +1691,16 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
 void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
                              struct tipc_link *tunnel)
 {
-       struct sk_buff *iter;
+       struct sk_buff *skb;
        struct tipc_msg tunnel_hdr;
 
        tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
                 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
-       msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
+       msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue));
        msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
-       iter = l_ptr->first_out;
-       while (iter) {
-               struct sk_buff *outbuf;
-               struct tipc_msg *msg = buf_msg(iter);
+       skb_queue_walk(&l_ptr->outqueue, skb) {
+               struct sk_buff *outskb;
+               struct tipc_msg *msg = buf_msg(skb);
                u32 length = msg_size(msg);
 
                if (msg_user(msg) == MSG_BUNDLER)
@@ -1795,19 +1708,18 @@ void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
                msg_set_ack(msg, mod(l_ptr->next_in_no - 1));   /* Update */
                msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
                msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
-               outbuf = tipc_buf_acquire(length + INT_H_SIZE);
-               if (outbuf == NULL) {
+               outskb = tipc_buf_acquire(length + INT_H_SIZE);
+               if (outskb == NULL) {
                        pr_warn("%sunable to send duplicate msg\n",
                                link_co_err);
                        return;
                }
-               skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
-               skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
+               skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE);
+               skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data,
                                               length);
-               __tipc_link_xmit(tunnel, outbuf);
+               __tipc_link_xmit_skb(tunnel, outskb);
                if (!tipc_link_is_up(l_ptr))
                        return;
-               iter = iter->next;
        }
 }
 
index f463e7be801c68e0a0d0f51bad517ceaaf43c186..55812e87ca1e2a4b6cb460e067599edc9bbf158d 100644 (file)
@@ -119,20 +119,13 @@ struct tipc_stats {
  * @max_pkt: current maximum packet size for this link
  * @max_pkt_target: desired maximum packet size for this link
  * @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target)
- * @out_queue_size: # of messages in outbound message queue
- * @first_out: ptr to first outbound message in queue
- * @last_out: ptr to last outbound message in queue
+ * @outqueue: outbound message queue
  * @next_out_no: next sequence number to use for outbound messages
  * @last_retransmitted: sequence number of most recently retransmitted message
  * @stale_count: # of identical retransmit requests made by peer
  * @next_in_no: next sequence number to expect for inbound messages
- * @deferred_inqueue_sz: # of messages in inbound message queue
- * @oldest_deferred_in: ptr to first inbound message in queue
- * @newest_deferred_in: ptr to last inbound message in queue
+ * @deferred_queue: deferred queue saved OOS b'cast message received from node
  * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
- * @proto_msg_queue: ptr to (single) outbound control message
- * @retransm_queue_size: number of messages to retransmit
- * @retransm_queue_head: sequence number of first message to retransmit
  * @next_out: ptr to first unsent outbound message in queue
  * @waiting_sks: linked list of sockets waiting for link congestion to abate
  * @long_msg_seq_no: next identifier to use for outbound fragmented messages
@@ -176,24 +169,17 @@ struct tipc_link {
        u32 max_pkt_probes;
 
        /* Sending */
-       u32 out_queue_size;
-       struct sk_buff *first_out;
-       struct sk_buff *last_out;
+       struct sk_buff_head outqueue;
        u32 next_out_no;
        u32 last_retransmitted;
        u32 stale_count;
 
        /* Reception */
        u32 next_in_no;
-       u32 deferred_inqueue_sz;
-       struct sk_buff *oldest_deferred_in;
-       struct sk_buff *newest_deferred_in;
+       struct sk_buff_head deferred_queue;
        u32 unacked_window;
 
        /* Congestion handling */
-       struct sk_buff *proto_msg_queue;
-       u32 retransm_queue_size;
-       u32 retransm_queue_head;
        struct sk_buff *next_out;
        struct sk_buff_head waiting_sks;
 
@@ -227,18 +213,20 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
 void tipc_link_reset_all(struct tipc_node *node);
 void tipc_link_reset(struct tipc_link *l_ptr);
 void tipc_link_reset_list(unsigned int bearer_id);
-int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector);
-int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf);
+int tipc_link_xmit_skb(struct sk_buff *skb, u32 dest, u32 selector);
+int tipc_link_xmit(struct sk_buff_head *list, u32 dest, u32 selector);
+int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list);
 u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
 void tipc_link_bundle_rcv(struct sk_buff *buf);
 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
                          u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
-void tipc_link_push_queue(struct tipc_link *l_ptr);
-u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
-                       struct sk_buff *buf);
+void tipc_link_push_packets(struct tipc_link *l_ptr);
+u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf);
 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
 void tipc_link_retransmit(struct tipc_link *l_ptr,
                          struct sk_buff *start, u32 retransmits);
+struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
+                                   const struct sk_buff *skb);
 
 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info);
@@ -259,18 +247,14 @@ static inline u32 mod(u32 x)
        return x & 0xffffu;
 }
 
-static inline int between(u32 lower, u32 upper, u32 n)
+static inline int less_eq(u32 left, u32 right)
 {
-       if ((lower < n) && (n < upper))
-               return 1;
-       if ((upper < lower) && ((n > lower) || (n < upper)))
-               return 1;
-       return 0;
+       return mod(right - left) < 32768u;
 }
 
-static inline int less_eq(u32 left, u32 right)
+static inline int more(u32 left, u32 right)
 {
-       return mod(right - left) < 32768u;
+       return !less_eq(left, right);
 }
 
 static inline int less(u32 left, u32 right)
@@ -309,7 +293,7 @@ static inline int link_reset_reset(struct tipc_link *l_ptr)
 
 static inline int link_congested(struct tipc_link *l_ptr)
 {
-       return l_ptr->out_queue_size >= l_ptr->queue_limit[0];
+       return skb_queue_len(&l_ptr->outqueue) >= l_ptr->queue_limit[0];
 }
 
 #endif
index ec18076e81ecaa4f1a36c07f750221c815a8c0ba..5b0659791c0769f0b6bf4519634d117ef6fa5903 100644 (file)
@@ -162,15 +162,16 @@ err:
 /**
  * tipc_msg_build - create buffer chain containing specified header and data
  * @mhdr: Message header, to be prepended to data
- * @iov: User data
+ * @m: User message
  * @offset: Posision in iov to start copying from
  * @dsz: Total length of user data
  * @pktmax: Max packet size that can be used
- * @chain: Buffer or chain of buffers to be returned to caller
+ * @list: Buffer or chain of buffers to be returned to caller
+ *
  * Returns message data size or errno: -ENOMEM, -EFAULT
  */
-int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
-                  int offset, int dsz, int pktmax , struct sk_buff **chain)
+int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
+                  int dsz, int pktmax, struct sk_buff_head *list)
 {
        int mhsz = msg_hdr_sz(mhdr);
        int msz = mhsz + dsz;
@@ -179,22 +180,22 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
        int pktrem = pktmax;
        int drem = dsz;
        struct tipc_msg pkthdr;
-       struct sk_buff *buf, *prev;
+       struct sk_buff *skb;
        char *pktpos;
        int rc;
-       uint chain_sz = 0;
+
        msg_set_size(mhdr, msz);
 
        /* No fragmentation needed? */
        if (likely(msz <= pktmax)) {
-               buf = tipc_buf_acquire(msz);
-               *chain = buf;
-               if (unlikely(!buf))
+               skb = tipc_buf_acquire(msz);
+               if (unlikely(!skb))
                        return -ENOMEM;
-               skb_copy_to_linear_data(buf, mhdr, mhsz);
-               pktpos = buf->data + mhsz;
-               TIPC_SKB_CB(buf)->chain_sz = 1;
-               if (!dsz || !memcpy_fromiovecend(pktpos, iov, offset, dsz))
+               __skb_queue_tail(list, skb);
+               skb_copy_to_linear_data(skb, mhdr, mhsz);
+               pktpos = skb->data + mhsz;
+               if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iov, offset,
+                                                dsz))
                        return dsz;
                rc = -EFAULT;
                goto error;
@@ -207,15 +208,15 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
        msg_set_fragm_no(&pkthdr, pktno);
 
        /* Prepare first fragment */
-       *chain = buf = tipc_buf_acquire(pktmax);
-       if (!buf)
+       skb = tipc_buf_acquire(pktmax);
+       if (!skb)
                return -ENOMEM;
-       chain_sz = 1;
-       pktpos = buf->data;
-       skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE);
+       __skb_queue_tail(list, skb);
+       pktpos = skb->data;
+       skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
        pktpos += INT_H_SIZE;
        pktrem -= INT_H_SIZE;
-       skb_copy_to_linear_data_offset(buf, INT_H_SIZE, mhdr, mhsz);
+       skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
        pktpos += mhsz;
        pktrem -= mhsz;
 
@@ -223,7 +224,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
                if (drem < pktrem)
                        pktrem = drem;
 
-               if (memcpy_fromiovecend(pktpos, iov, offset, pktrem)) {
+               if (memcpy_fromiovecend(pktpos, m->msg_iov, offset, pktrem)) {
                        rc = -EFAULT;
                        goto error;
                }
@@ -238,43 +239,41 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
                        pktsz = drem + INT_H_SIZE;
                else
                        pktsz = pktmax;
-               prev = buf;
-               buf = tipc_buf_acquire(pktsz);
-               if (!buf) {
+               skb = tipc_buf_acquire(pktsz);
+               if (!skb) {
                        rc = -ENOMEM;
                        goto error;
                }
-               chain_sz++;
-               prev->next = buf;
+               __skb_queue_tail(list, skb);
                msg_set_type(&pkthdr, FRAGMENT);
                msg_set_size(&pkthdr, pktsz);
                msg_set_fragm_no(&pkthdr, ++pktno);
-               skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE);
-               pktpos = buf->data + INT_H_SIZE;
+               skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
+               pktpos = skb->data + INT_H_SIZE;
                pktrem = pktsz - INT_H_SIZE;
 
        } while (1);
-       TIPC_SKB_CB(*chain)->chain_sz = chain_sz;
-       msg_set_type(buf_msg(buf), LAST_FRAGMENT);
+       msg_set_type(buf_msg(skb), LAST_FRAGMENT);
        return dsz;
 error:
-       kfree_skb_list(*chain);
-       *chain = NULL;
+       __skb_queue_purge(list);
+       __skb_queue_head_init(list);
        return rc;
 }
 
 /**
  * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
- * @bbuf: the existing buffer ("bundle")
- * @buf:  buffer to be appended
+ * @list: the buffer chain of the existing buffer ("bundle")
+ * @skb:  buffer to be appended
  * @mtu:  max allowable size for the bundle buffer
  * Consumes buffer if successful
  * Returns true if bundling could be performed, otherwise false
  */
-bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu)
+bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu)
 {
-       struct tipc_msg *bmsg = buf_msg(bbuf);
-       struct tipc_msg *msg = buf_msg(buf);
+       struct sk_buff *bskb = skb_peek_tail(list);
+       struct tipc_msg *bmsg = buf_msg(bskb);
+       struct tipc_msg *msg = buf_msg(skb);
        unsigned int bsz = msg_size(bmsg);
        unsigned int msz = msg_size(msg);
        u32 start = align(bsz);
@@ -289,35 +288,36 @@ bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu)
                return false;
        if (likely(msg_user(bmsg) != MSG_BUNDLER))
                return false;
-       if (likely(msg_type(bmsg) != BUNDLE_OPEN))
+       if (likely(!TIPC_SKB_CB(bskb)->bundling))
                return false;
-       if (unlikely(skb_tailroom(bbuf) < (pad + msz)))
+       if (unlikely(skb_tailroom(bskb) < (pad + msz)))
                return false;
        if (unlikely(max < (start + msz)))
                return false;
 
-       skb_put(bbuf, pad + msz);
-       skb_copy_to_linear_data_offset(bbuf, start, buf->data, msz);
+       skb_put(bskb, pad + msz);
+       skb_copy_to_linear_data_offset(bskb, start, skb->data, msz);
        msg_set_size(bmsg, start + msz);
        msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
-       bbuf->next = buf->next;
-       kfree_skb(buf);
+       kfree_skb(skb);
        return true;
 }
 
 /**
  * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
- * @buf:  buffer to be appended and replaced
- * @mtu:  max allowable size for the bundle buffer, inclusive header
+ * @list: the buffer chain
+ * @skb: buffer to be appended and replaced
+ * @mtu: max allowable size for the bundle buffer, inclusive header
  * @dnode: destination node for message. (Not always present in header)
  * Replaces buffer if successful
  * Returns true if success, otherwise false
  */
-bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode)
+bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
+                         u32 mtu, u32 dnode)
 {
-       struct sk_buff *bbuf;
+       struct sk_buff *bskb;
        struct tipc_msg *bmsg;
-       struct tipc_msg *msg = buf_msg(*buf);
+       struct tipc_msg *msg = buf_msg(skb);
        u32 msz = msg_size(msg);
        u32 max = mtu - INT_H_SIZE;
 
@@ -330,20 +330,19 @@ bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode)
        if (msz > (max / 2))
                return false;
 
-       bbuf = tipc_buf_acquire(max);
-       if (!bbuf)
+       bskb = tipc_buf_acquire(max);
+       if (!bskb)
                return false;
 
-       skb_trim(bbuf, INT_H_SIZE);
-       bmsg = buf_msg(bbuf);
-       tipc_msg_init(bmsg, MSG_BUNDLER, BUNDLE_OPEN, INT_H_SIZE, dnode);
+       skb_trim(bskb, INT_H_SIZE);
+       bmsg = buf_msg(bskb);
+       tipc_msg_init(bmsg, MSG_BUNDLER, 0, INT_H_SIZE, dnode);
        msg_set_seqno(bmsg, msg_seqno(msg));
        msg_set_ack(bmsg, msg_ack(msg));
        msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
-       bbuf->next = (*buf)->next;
-       tipc_msg_bundle(bbuf, *buf, mtu);
-       *buf = bbuf;
-       return true;
+       TIPC_SKB_CB(bskb)->bundling = true;
+       __skb_queue_tail(list, bskb);
+       return tipc_msg_bundle(list, skb, mtu);
 }
 
 /**
@@ -429,22 +428,23 @@ int tipc_msg_eval(struct sk_buff *buf, u32 *dnode)
 /* tipc_msg_reassemble() - clone a buffer chain of fragments and
  *                         reassemble the clones into one message
  */
-struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain)
+struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list)
 {
-       struct sk_buff *buf = chain;
-       struct sk_buff *frag = buf;
+       struct sk_buff *skb;
+       struct sk_buff *frag = NULL;
        struct sk_buff *head = NULL;
        int hdr_sz;
 
        /* Copy header if single buffer */
-       if (!buf->next) {
-               hdr_sz = skb_headroom(buf) + msg_hdr_sz(buf_msg(buf));
-               return __pskb_copy(buf, hdr_sz, GFP_ATOMIC);
+       if (skb_queue_len(list) == 1) {
+               skb = skb_peek(list);
+               hdr_sz = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
+               return __pskb_copy(skb, hdr_sz, GFP_ATOMIC);
        }
 
        /* Clone all fragments and reassemble */
-       while (buf) {
-               frag = skb_clone(buf, GFP_ATOMIC);
+       skb_queue_walk(list, skb) {
+               frag = skb_clone(skb, GFP_ATOMIC);
                if (!frag)
                        goto error;
                frag->next = NULL;
@@ -452,7 +452,6 @@ struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain)
                        break;
                if (!head)
                        goto error;
-               buf = buf->next;
        }
        return frag;
 error:
index 0ea7b695ac4d891a7556ae2f08110d11306369f9..d5c83d7ecb479f30de9fe9b8f8aadbac5aa0c64b 100644 (file)
@@ -464,11 +464,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
 #define FRAGMENT               1
 #define LAST_FRAGMENT          2
 
-/* Bundling protocol message types
- */
-#define BUNDLE_OPEN             0
-#define BUNDLE_CLOSED           1
-
 /*
  * Link management protocol message types
  */
@@ -739,13 +734,14 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
 
 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
 
-bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu);
+bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu);
 
-bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode);
+bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
+                         u32 mtu, u32 dnode);
 
-int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
-                  int offset, int dsz, int mtu , struct sk_buff **chain);
+int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
+                  int dsz, int mtu, struct sk_buff_head *list);
 
-struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain);
+struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
 
 #endif
index 376d2bb51d8da19bd00ee48f4b374ca098f36413..56248db75274913fa4e14c7d8e57fda36b730505 100644 (file)
@@ -114,9 +114,9 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
        return buf;
 }
 
-void named_cluster_distribute(struct sk_buff *buf)
+void named_cluster_distribute(struct sk_buff *skb)
 {
-       struct sk_buff *obuf;
+       struct sk_buff *oskb;
        struct tipc_node *node;
        u32 dnode;
 
@@ -127,15 +127,15 @@ void named_cluster_distribute(struct sk_buff *buf)
                        continue;
                if (!tipc_node_active_links(node))
                        continue;
-               obuf = skb_copy(buf, GFP_ATOMIC);
-               if (!obuf)
+               oskb = skb_copy(skb, GFP_ATOMIC);
+               if (!oskb)
                        break;
-               msg_set_destnode(buf_msg(obuf), dnode);
-               tipc_link_xmit(obuf, dnode, dnode);
+               msg_set_destnode(buf_msg(oskb), dnode);
+               tipc_link_xmit_skb(oskb, dnode, dnode);
        }
        rcu_read_unlock();
 
-       kfree_skb(buf);
+       kfree_skb(skb);
 }
 
 /**
@@ -190,15 +190,15 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ)
 
 /**
  * named_distribute - prepare name info for bulk distribution to another node
- * @msg_list: list of messages (buffers) to be returned from this function
+ * @list: list of messages (buffers) to be returned from this function
  * @dnode: node to be updated
  * @pls: linked list of publication items to be packed into buffer chain
  */
-static void named_distribute(struct list_head *msg_list, u32 dnode,
+static void named_distribute(struct sk_buff_head *list, u32 dnode,
                             struct publ_list *pls)
 {
        struct publication *publ;
-       struct sk_buff *buf = NULL;
+       struct sk_buff *skb = NULL;
        struct distr_item *item = NULL;
        uint dsz = pls->size * ITEM_SIZE;
        uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE;
@@ -207,15 +207,15 @@ static void named_distribute(struct list_head *msg_list, u32 dnode,
 
        list_for_each_entry(publ, &pls->list, local_list) {
                /* Prepare next buffer: */
-               if (!buf) {
+               if (!skb) {
                        msg_rem = min_t(uint, rem, msg_dsz);
                        rem -= msg_rem;
-                       buf = named_prepare_buf(PUBLICATION, msg_rem, dnode);
-                       if (!buf) {
+                       skb = named_prepare_buf(PUBLICATION, msg_rem, dnode);
+                       if (!skb) {
                                pr_warn("Bulk publication failure\n");
                                return;
                        }
-                       item = (struct distr_item *)msg_data(buf_msg(buf));
+                       item = (struct distr_item *)msg_data(buf_msg(skb));
                }
 
                /* Pack publication into message: */
@@ -225,8 +225,8 @@ static void named_distribute(struct list_head *msg_list, u32 dnode,
 
                /* Append full buffer to list: */
                if (!msg_rem) {
-                       list_add_tail((struct list_head *)buf, msg_list);
-                       buf = NULL;
+                       __skb_queue_tail(list, skb);
+                       skb = NULL;
                }
        }
 }
@@ -236,27 +236,57 @@ static void named_distribute(struct list_head *msg_list, u32 dnode,
  */
 void tipc_named_node_up(u32 dnode)
 {
-       LIST_HEAD(msg_list);
-       struct sk_buff *buf_chain;
+       struct sk_buff_head head;
+
+       __skb_queue_head_init(&head);
 
        read_lock_bh(&tipc_nametbl_lock);
-       named_distribute(&msg_list, dnode, &publ_cluster);
-       named_distribute(&msg_list, dnode, &publ_zone);
+       named_distribute(&head, dnode, &publ_cluster);
+       named_distribute(&head, dnode, &publ_zone);
        read_unlock_bh(&tipc_nametbl_lock);
 
-       /* Convert circular list to linear list and send: */
-       buf_chain = (struct sk_buff *)msg_list.next;
-       ((struct sk_buff *)msg_list.prev)->next = NULL;
-       tipc_link_xmit(buf_chain, dnode, dnode);
+       tipc_link_xmit(&head, dnode, dnode);
+}
+
+static void tipc_publ_subscribe(struct publication *publ, u32 addr)
+{
+       struct tipc_node *node;
+
+       if (in_own_node(addr))
+               return;
+
+       node = tipc_node_find(addr);
+       if (!node) {
+               pr_warn("Node subscription rejected, unknown node 0x%x\n",
+                       addr);
+               return;
+       }
+
+       tipc_node_lock(node);
+       list_add_tail(&publ->nodesub_list, &node->publ_list);
+       tipc_node_unlock(node);
+}
+
+static void tipc_publ_unsubscribe(struct publication *publ, u32 addr)
+{
+       struct tipc_node *node;
+
+       node = tipc_node_find(addr);
+       if (!node)
+               return;
+
+       tipc_node_lock(node);
+       list_del_init(&publ->nodesub_list);
+       tipc_node_unlock(node);
 }
 
 /**
- * named_purge_publ - remove publication associated with a failed node
+ * tipc_publ_purge - remove publication associated with a failed node
  *
  * Invoked for each publication issued by a newly failed node.
  * Removes publication structure from name table & deletes it.
  */
-static void named_purge_publ(struct publication *publ)
+static void tipc_publ_purge(struct publication *publ, u32 addr)
 {
        struct publication *p;
 
@@ -264,7 +294,7 @@ static void named_purge_publ(struct publication *publ)
        p = tipc_nametbl_remove_publ(publ->type, publ->lower,
                                     publ->node, publ->ref, publ->key);
        if (p)
-               tipc_nodesub_unsubscribe(&p->subscr);
+               tipc_publ_unsubscribe(p, addr);
        write_unlock_bh(&tipc_nametbl_lock);
 
        if (p != publ) {
@@ -277,6 +307,14 @@ static void named_purge_publ(struct publication *publ)
        kfree(p);
 }
 
+void tipc_publ_notify(struct list_head *nsub_list, u32 addr)
+{
+       struct publication *publ, *tmp;
+
+       list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
+               tipc_publ_purge(publ, addr);
+}
+
 /**
  * tipc_update_nametbl - try to process a nametable update and notify
  *                      subscribers
@@ -294,9 +332,7 @@ static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype)
                                                TIPC_CLUSTER_SCOPE, node,
                                                ntohl(i->ref), ntohl(i->key));
                if (publ) {
-                       tipc_nodesub_subscribe(&publ->subscr, node, publ,
-                                              (net_ev_handler)
-                                              named_purge_publ);
+                       tipc_publ_subscribe(publ, node);
                        return true;
                }
        } else if (dtype == WITHDRAWAL) {
@@ -304,7 +340,7 @@ static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype)
                                                node, ntohl(i->ref),
                                                ntohl(i->key));
                if (publ) {
-                       tipc_nodesub_unsubscribe(&publ->subscr);
+                       tipc_publ_unsubscribe(publ, node);
                        kfree(publ);
                        return true;
                }
index b9e75feb3434e76fc96a5e71a07e94fbcc709888..cef55cedcfb29c76b8367486d1e28cb376118339 100644 (file)
@@ -74,5 +74,6 @@ void tipc_named_node_up(u32 dnode);
 void tipc_named_rcv(struct sk_buff *buf);
 void tipc_named_reinit(void);
 void tipc_named_process_backlog(void);
+void tipc_publ_notify(struct list_head *nsub_list, u32 addr);
 
 #endif
index 7cfb7a4aa58fc0efca8527df3225aee79d445c0d..772be1cd8bf6ff3b7216c931528ef3058cfff29d 100644 (file)
@@ -144,7 +144,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
        publ->key = key;
        INIT_LIST_HEAD(&publ->local_list);
        INIT_LIST_HEAD(&publ->pport_list);
-       INIT_LIST_HEAD(&publ->subscr.nodesub_list);
+       INIT_LIST_HEAD(&publ->nodesub_list);
        return publ;
 }
 
index b38ebecac7665dc926d5120b4af4053db96d8d59..c62877826655279446c2dc1437f5e7b16ec8ae55 100644 (file)
@@ -37,8 +37,6 @@
 #ifndef _TIPC_NAME_TABLE_H
 #define _TIPC_NAME_TABLE_H
 
-#include "node_subscr.h"
-
 struct tipc_subscription;
 struct tipc_port_list;
 
@@ -56,7 +54,7 @@ struct tipc_port_list;
  * @node: network address of publishing port's node
  * @ref: publishing port
  * @key: publication key
- * @subscr: subscription to "node down" event (for off-node publications only)
+ * @nodesub_list: subscription to "node down" event (off-node publication only)
  * @local_list: adjacent entries in list of publications made by this node
  * @pport_list: adjacent entries in list of publications made by this port
  * @node_list: adjacent matching name seq publications with >= node scope
@@ -73,7 +71,7 @@ struct publication {
        u32 node;
        u32 ref;
        u32 key;
-       struct tipc_node_subscr subscr;
+       struct list_head nodesub_list;
        struct list_head local_list;
        struct list_head pport_list;
        struct list_head node_list;
index 82e5edddc376083cb204dd616833bfaaa5dee191..69b96be09a86f057a3761d89266252741a2107a6 100644 (file)
@@ -113,9 +113,10 @@ struct tipc_node *tipc_node_create(u32 addr)
        spin_lock_init(&n_ptr->lock);
        INIT_HLIST_NODE(&n_ptr->hash);
        INIT_LIST_HEAD(&n_ptr->list);
-       INIT_LIST_HEAD(&n_ptr->nsub);
+       INIT_LIST_HEAD(&n_ptr->publ_list);
        INIT_LIST_HEAD(&n_ptr->conn_sks);
        __skb_queue_head_init(&n_ptr->waiting_sks);
+       __skb_queue_head_init(&n_ptr->bclink.deferred_queue);
 
        hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
 
@@ -381,8 +382,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
 
        /* Flush broadcast link info associated with lost node */
        if (n_ptr->bclink.recv_permitted) {
-               kfree_skb_list(n_ptr->bclink.deferred_head);
-               n_ptr->bclink.deferred_size = 0;
+               __skb_queue_purge(&n_ptr->bclink.deferred_queue);
 
                if (n_ptr->bclink.reasm_buf) {
                        kfree_skb(n_ptr->bclink.reasm_buf);
@@ -574,7 +574,7 @@ void tipc_node_unlock(struct tipc_node *node)
                skb_queue_splice_init(&node->waiting_sks, &waiting_sks);
 
        if (flags & TIPC_NOTIFY_NODE_DOWN) {
-               list_replace_init(&node->nsub, &nsub_list);
+               list_replace_init(&node->publ_list, &nsub_list);
                list_replace_init(&node->conn_sks, &conn_sks);
        }
        node->action_flags &= ~(TIPC_WAKEUP_USERS | TIPC_NOTIFY_NODE_DOWN |
@@ -591,7 +591,7 @@ void tipc_node_unlock(struct tipc_node *node)
                tipc_node_abort_sock_conns(&conn_sks);
 
        if (!list_empty(&nsub_list))
-               tipc_nodesub_notify(&nsub_list);
+               tipc_publ_notify(&nsub_list, addr);
 
        if (flags & TIPC_WAKEUP_BCAST_USERS)
                tipc_bclink_wakeup_users();
index 005fbcef32123b37f063ceaf29e6843889f7b69a..cbe0e950f1ccb81a63a3e0a19719cdef6fb2c6c5 100644 (file)
@@ -37,7 +37,6 @@
 #ifndef _TIPC_NODE_H
 #define _TIPC_NODE_H
 
-#include "node_subscr.h"
 #include "addr.h"
 #include "net.h"
 #include "bearer.h"
@@ -72,9 +71,7 @@ enum {
  * @last_in: sequence # of last in-sequence b'cast message received from node
  * @last_sent: sequence # of last b'cast message sent by node
  * @oos_state: state tracker for handling OOS b'cast messages
- * @deferred_size: number of OOS b'cast messages in deferred queue
- * @deferred_head: oldest OOS b'cast message received from node
- * @deferred_tail: newest OOS b'cast message received from node
+ * @deferred_queue: deferred queue saved OOS b'cast message received from node
  * @reasm_buf: broadcast reassembly queue head from node
  * @recv_permitted: true if node is allowed to receive b'cast messages
  */
@@ -84,8 +81,7 @@ struct tipc_node_bclink {
        u32 last_sent;
        u32 oos_state;
        u32 deferred_size;
-       struct sk_buff *deferred_head;
-       struct sk_buff *deferred_tail;
+       struct sk_buff_head deferred_queue;
        struct sk_buff *reasm_buf;
        bool recv_permitted;
 };
@@ -104,7 +100,7 @@ struct tipc_node_bclink {
  * @link_cnt: number of links to node
  * @signature: node instance identifier
  * @link_id: local and remote bearer ids of changing link, if any
- * @nsub: list of "node down" subscriptions monitoring node
+ * @publ_list: list of publications
  * @rcu: rcu struct for tipc_node
  */
 struct tipc_node {
@@ -121,7 +117,7 @@ struct tipc_node {
        int working_links;
        u32 signature;
        u32 link_id;
-       struct list_head nsub;
+       struct list_head publ_list;
        struct sk_buff_head waiting_sks;
        struct list_head conn_sks;
        struct rcu_head rcu;
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
deleted file mode 100644 (file)
index 2d13eea..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * net/tipc/node_subscr.c: TIPC "node down" subscription handling
- *
- * Copyright (c) 1995-2006, Ericsson AB
- * Copyright (c) 2005, 2010-2011, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- *    contributors may be used to endorse or promote products derived from
- *    this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "core.h"
-#include "node_subscr.h"
-#include "node.h"
-
-/**
- * tipc_nodesub_subscribe - create "node down" subscription for specified node
- */
-void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
-                           void *usr_handle, net_ev_handler handle_down)
-{
-       if (in_own_node(addr)) {
-               node_sub->node = NULL;
-               return;
-       }
-
-       node_sub->node = tipc_node_find(addr);
-       if (!node_sub->node) {
-               pr_warn("Node subscription rejected, unknown node 0x%x\n",
-                       addr);
-               return;
-       }
-       node_sub->handle_node_down = handle_down;
-       node_sub->usr_handle = usr_handle;
-
-       tipc_node_lock(node_sub->node);
-       list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub);
-       tipc_node_unlock(node_sub->node);
-}
-
-/**
- * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any)
- */
-void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
-{
-       if (!node_sub->node)
-               return;
-
-       tipc_node_lock(node_sub->node);
-       list_del_init(&node_sub->nodesub_list);
-       tipc_node_unlock(node_sub->node);
-}
-
-/**
- * tipc_nodesub_notify - notify subscribers that a node is unreachable
- *
- * Note: node is locked by caller
- */
-void tipc_nodesub_notify(struct list_head *nsub_list)
-{
-       struct tipc_node_subscr *ns, *safe;
-       net_ev_handler handle_node_down;
-
-       list_for_each_entry_safe(ns, safe, nsub_list, nodesub_list) {
-               handle_node_down = ns->handle_node_down;
-               if (handle_node_down) {
-                       ns->handle_node_down = NULL;
-                       handle_node_down(ns->usr_handle);
-               }
-       }
-}
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
deleted file mode 100644 (file)
index d91b8cc..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * net/tipc/node_subscr.h: Include file for TIPC "node down" subscription handling
- *
- * Copyright (c) 1995-2006, Ericsson AB
- * Copyright (c) 2005, 2010-2011, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- *    contributors may be used to endorse or promote products derived from
- *    this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _TIPC_NODE_SUBSCR_H
-#define _TIPC_NODE_SUBSCR_H
-
-#include "addr.h"
-
-typedef void (*net_ev_handler) (void *usr_handle);
-
-/**
- * struct tipc_node_subscr - "node down" subscription entry
- * @node: ptr to node structure of interest (or NULL, if none)
- * @handle_node_down: routine to invoke when node fails
- * @usr_handle: argument to pass to routine when node fails
- * @nodesub_list: adjacent entries in list of subscriptions for the node
- */
-struct tipc_node_subscr {
-       struct tipc_node *node;
-       net_ev_handler handle_node_down;
-       void *usr_handle;
-       struct list_head nodesub_list;
-};
-
-void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
-                           void *usr_handle, net_ev_handler handle_down);
-void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub);
-void tipc_nodesub_notify(struct list_head *nsub_list);
-
-#endif
index 6aa8c6a1ab10c105610c07a87b4e246cbc0790a8..9658d9b638764d08f3767757a46feddbbfca5265 100644 (file)
@@ -244,12 +244,12 @@ static void tsk_advance_rx_queue(struct sock *sk)
  */
 static void tsk_rej_rx_queue(struct sock *sk)
 {
-       struct sk_buff *buf;
+       struct sk_buff *skb;
        u32 dnode;
 
-       while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
-               if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT))
-                       tipc_link_xmit(buf, dnode, 0);
+       while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
+               if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
+                       tipc_link_xmit_skb(skb, dnode, 0);
        }
 }
 
@@ -462,7 +462,7 @@ static int tipc_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk;
-       struct sk_buff *buf;
+       struct sk_buff *skb;
        u32 dnode;
 
        /*
@@ -481,11 +481,11 @@ static int tipc_release(struct socket *sock)
         */
        dnode = tsk_peer_node(tsk);
        while (sock->state != SS_DISCONNECTING) {
-               buf = __skb_dequeue(&sk->sk_receive_queue);
-               if (buf == NULL)
+               skb = __skb_dequeue(&sk->sk_receive_queue);
+               if (skb == NULL)
                        break;
-               if (TIPC_SKB_CB(buf)->handle != NULL)
-                       kfree_skb(buf);
+               if (TIPC_SKB_CB(skb)->handle != NULL)
+                       kfree_skb(skb);
                else {
                        if ((sock->state == SS_CONNECTING) ||
                            (sock->state == SS_CONNECTED)) {
@@ -493,8 +493,8 @@ static int tipc_release(struct socket *sock)
                                tsk->connected = 0;
                                tipc_node_remove_conn(dnode, tsk->ref);
                        }
-                       if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT))
-                               tipc_link_xmit(buf, dnode, 0);
+                       if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
+                               tipc_link_xmit_skb(skb, dnode, 0);
                }
        }
 
@@ -502,12 +502,12 @@ static int tipc_release(struct socket *sock)
        tipc_sk_ref_discard(tsk->ref);
        k_cancel_timer(&tsk->timer);
        if (tsk->connected) {
-               buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
+               skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
                                      SHORT_H_SIZE, 0, dnode, tipc_own_addr,
                                      tsk_peer_port(tsk),
                                      tsk->ref, TIPC_ERR_NO_PORT);
-               if (buf)
-                       tipc_link_xmit(buf, dnode, tsk->ref);
+               if (skb)
+                       tipc_link_xmit_skb(skb, dnode, tsk->ref);
                tipc_node_remove_conn(dnode, tsk->ref);
        }
        k_term_timer(&tsk->timer);
@@ -700,7 +700,7 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
  * tipc_sendmcast - send multicast message
  * @sock: socket structure
  * @seq: destination address
- * @iov: message data to send
+ * @msg: message to send
  * @dsz: total length of message data
  * @timeo: timeout to wait for wakeup
  *
@@ -708,11 +708,11 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
  * Returns the number of bytes sent on success, or errno
  */
 static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
-                         struct iovec *iov, size_t dsz, long timeo)
+                         struct msghdr *msg, size_t dsz, long timeo)
 {
        struct sock *sk = sock->sk;
        struct tipc_msg *mhdr = &tipc_sk(sk)->phdr;
-       struct sk_buff *buf;
+       struct sk_buff_head head;
        uint mtu;
        int rc;
 
@@ -727,12 +727,13 @@ static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
 
 new_mtu:
        mtu = tipc_bclink_get_mtu();
-       rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf);
+       __skb_queue_head_init(&head);
+       rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head);
        if (unlikely(rc < 0))
                return rc;
 
        do {
-               rc = tipc_bclink_xmit(buf);
+               rc = tipc_bclink_xmit(&head);
                if (likely(rc >= 0)) {
                        rc = dsz;
                        break;
@@ -744,7 +745,7 @@ new_mtu:
                tipc_sk(sk)->link_cong = 1;
                rc = tipc_wait_for_sndmsg(sock, &timeo);
                if (rc)
-                       kfree_skb_list(buf);
+                       __skb_queue_purge(&head);
        } while (!rc);
        return rc;
 }
@@ -905,9 +906,9 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
        struct tipc_msg *mhdr = &tsk->phdr;
-       struct iovec *iov = m->msg_iov;
        u32 dnode, dport;
-       struct sk_buff *buf;
+       struct sk_buff_head head;
+       struct sk_buff *skb;
        struct tipc_name_seq *seq = &dest->addr.nameseq;
        u32 mtu;
        long timeo;
@@ -951,7 +952,7 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
        timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
 
        if (dest->addrtype == TIPC_ADDR_MCAST) {
-               rc = tipc_sendmcast(sock, seq, iov, dsz, timeo);
+               rc = tipc_sendmcast(sock, seq, m, dsz, timeo);
                goto exit;
        } else if (dest->addrtype == TIPC_ADDR_NAME) {
                u32 type = dest->addr.name.name.type;
@@ -982,13 +983,15 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
 
 new_mtu:
        mtu = tipc_node_get_mtu(dnode, tsk->ref);
-       rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf);
+       __skb_queue_head_init(&head);
+       rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head);
        if (rc < 0)
                goto exit;
 
        do {
-               TIPC_SKB_CB(buf)->wakeup_pending = tsk->link_cong;
-               rc = tipc_link_xmit(buf, dnode, tsk->ref);
+               skb = skb_peek(&head);
+               TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
+               rc = tipc_link_xmit(&head, dnode, tsk->ref);
                if (likely(rc >= 0)) {
                        if (sock->state != SS_READY)
                                sock->state = SS_CONNECTING;
@@ -1002,7 +1005,7 @@ new_mtu:
                tsk->link_cong = 1;
                rc = tipc_wait_for_sndmsg(sock, &timeo);
                if (rc)
-                       kfree_skb_list(buf);
+                       __skb_queue_purge(&head);
        } while (!rc);
 exit:
        if (iocb)
@@ -1059,7 +1062,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
        struct tipc_msg *mhdr = &tsk->phdr;
-       struct sk_buff *buf;
+       struct sk_buff_head head;
        DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
        u32 ref = tsk->ref;
        int rc = -EINVAL;
@@ -1094,12 +1097,13 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
 next:
        mtu = tsk->max_pkt;
        send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
-       rc = tipc_msg_build(mhdr, m->msg_iov, sent, send, mtu, &buf);
+       __skb_queue_head_init(&head);
+       rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head);
        if (unlikely(rc < 0))
                goto exit;
        do {
                if (likely(!tsk_conn_cong(tsk))) {
-                       rc = tipc_link_xmit(buf, dnode, ref);
+                       rc = tipc_link_xmit(&head, dnode, ref);
                        if (likely(!rc)) {
                                tsk->sent_unacked++;
                                sent += send;
@@ -1117,7 +1121,7 @@ next:
                }
                rc = tipc_wait_for_sndpkt(sock, &timeo);
                if (rc)
-                       kfree_skb_list(buf);
+                       __skb_queue_purge(&head);
        } while (!rc);
 exit:
        if (iocb)
@@ -1262,20 +1266,20 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
 
 static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
 {
-       struct sk_buff *buf = NULL;
+       struct sk_buff *skb = NULL;
        struct tipc_msg *msg;
        u32 peer_port = tsk_peer_port(tsk);
        u32 dnode = tsk_peer_node(tsk);
 
        if (!tsk->connected)
                return;
-       buf = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode,
+       skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode,
                              tipc_own_addr, peer_port, tsk->ref, TIPC_OK);
-       if (!buf)
+       if (!skb)
                return;
-       msg = buf_msg(buf);
+       msg = buf_msg(skb);
        msg_set_msgcnt(msg, ack);
-       tipc_link_xmit(buf, dnode, msg_link_selector(msg));
+       tipc_link_xmit_skb(skb, dnode, msg_link_selector(msg));
 }
 
 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
@@ -1730,20 +1734,20 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf)
 /**
  * tipc_backlog_rcv - handle incoming message from backlog queue
  * @sk: socket
- * @buf: message
+ * @skb: message
  *
  * Caller must hold socket lock, but not port lock.
  *
  * Returns 0
  */
-static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
+static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
        int rc;
        u32 onode;
        struct tipc_sock *tsk = tipc_sk(sk);
-       uint truesize = buf->truesize;
+       uint truesize = skb->truesize;
 
-       rc = filter_rcv(sk, buf);
+       rc = filter_rcv(sk, skb);
 
        if (likely(!rc)) {
                if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
@@ -1751,25 +1755,25 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
                return 0;
        }
 
-       if ((rc < 0) && !tipc_msg_reverse(buf, &onode, -rc))
+       if ((rc < 0) && !tipc_msg_reverse(skb, &onode, -rc))
                return 0;
 
-       tipc_link_xmit(buf, onode, 0);
+       tipc_link_xmit_skb(skb, onode, 0);
 
        return 0;
 }
 
 /**
  * tipc_sk_rcv - handle incoming message
- * @buf: buffer containing arriving message
+ * @skb: buffer containing arriving message
  * Consumes buffer
  * Returns 0 if success, or errno: -EHOSTUNREACH
  */
-int tipc_sk_rcv(struct sk_buff *buf)
+int tipc_sk_rcv(struct sk_buff *skb)
 {
        struct tipc_sock *tsk;
        struct sock *sk;
-       u32 dport = msg_destport(buf_msg(buf));
+       u32 dport = msg_destport(buf_msg(skb));
        int rc = TIPC_OK;
        uint limit;
        u32 dnode;
@@ -1777,7 +1781,7 @@ int tipc_sk_rcv(struct sk_buff *buf)
        /* Validate destination and message */
        tsk = tipc_sk_get(dport);
        if (unlikely(!tsk)) {
-               rc = tipc_msg_eval(buf, &dnode);
+               rc = tipc_msg_eval(skb, &dnode);
                goto exit;
        }
        sk = &tsk->sk;
@@ -1786,12 +1790,12 @@ int tipc_sk_rcv(struct sk_buff *buf)
        spin_lock_bh(&sk->sk_lock.slock);
 
        if (!sock_owned_by_user(sk)) {
-               rc = filter_rcv(sk, buf);
+               rc = filter_rcv(sk, skb);
        } else {
                if (sk->sk_backlog.len == 0)
                        atomic_set(&tsk->dupl_rcvcnt, 0);
-               limit = rcvbuf_limit(sk, buf) + atomic_read(&tsk->dupl_rcvcnt);
-               if (sk_add_backlog(sk, buf, limit))
+               limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt);
+               if (sk_add_backlog(sk, skb, limit))
                        rc = -TIPC_ERR_OVERLOAD;
        }
        spin_unlock_bh(&sk->sk_lock.slock);
@@ -1799,10 +1803,10 @@ int tipc_sk_rcv(struct sk_buff *buf)
        if (likely(!rc))
                return 0;
 exit:
-       if ((rc < 0) && !tipc_msg_reverse(buf, &dnode, -rc))
+       if ((rc < 0) && !tipc_msg_reverse(skb, &dnode, -rc))
                return -EHOSTUNREACH;
 
-       tipc_link_xmit(buf, dnode, 0);
+       tipc_link_xmit_skb(skb, dnode, 0);
        return (rc < 0) ? -EHOSTUNREACH : 0;
 }
 
@@ -2060,7 +2064,7 @@ static int tipc_shutdown(struct socket *sock, int how)
 {
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
-       struct sk_buff *buf;
+       struct sk_buff *skb;
        u32 dnode;
        int res;
 
@@ -2075,23 +2079,23 @@ static int tipc_shutdown(struct socket *sock, int how)
 
 restart:
                /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
-               buf = __skb_dequeue(&sk->sk_receive_queue);
-               if (buf) {
-                       if (TIPC_SKB_CB(buf)->handle != NULL) {
-                               kfree_skb(buf);
+               skb = __skb_dequeue(&sk->sk_receive_queue);
+               if (skb) {
+                       if (TIPC_SKB_CB(skb)->handle != NULL) {
+                               kfree_skb(skb);
                                goto restart;
                        }
-                       if (tipc_msg_reverse(buf, &dnode, TIPC_CONN_SHUTDOWN))
-                               tipc_link_xmit(buf, dnode, tsk->ref);
+                       if (tipc_msg_reverse(skb, &dnode, TIPC_CONN_SHUTDOWN))
+                               tipc_link_xmit_skb(skb, dnode, tsk->ref);
                        tipc_node_remove_conn(dnode, tsk->ref);
                } else {
                        dnode = tsk_peer_node(tsk);
-                       buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
+                       skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
                                              TIPC_CONN_MSG, SHORT_H_SIZE,
                                              0, dnode, tipc_own_addr,
                                              tsk_peer_port(tsk),
                                              tsk->ref, TIPC_CONN_SHUTDOWN);
-                       tipc_link_xmit(buf, dnode, tsk->ref);
+                       tipc_link_xmit_skb(skb, dnode, tsk->ref);
                }
                tsk->connected = 0;
                sock->state = SS_DISCONNECTING;
@@ -2120,7 +2124,7 @@ static void tipc_sk_timeout(unsigned long ref)
 {
        struct tipc_sock *tsk;
        struct sock *sk;
-       struct sk_buff *buf = NULL;
+       struct sk_buff *skb = NULL;
        u32 peer_port, peer_node;
 
        tsk = tipc_sk_get(ref);
@@ -2138,20 +2142,20 @@ static void tipc_sk_timeout(unsigned long ref)
 
        if (tsk->probing_state == TIPC_CONN_PROBING) {
                /* Previous probe not answered -> self abort */
-               buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
+               skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
                                      SHORT_H_SIZE, 0, tipc_own_addr,
                                      peer_node, ref, peer_port,
                                      TIPC_ERR_NO_PORT);
        } else {
-               buf = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE,
+               skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE,
                                      0, peer_node, tipc_own_addr,
                                      peer_port, ref, TIPC_OK);
                tsk->probing_state = TIPC_CONN_PROBING;
                k_start_timer(&tsk->timer, tsk->probing_interval);
        }
        bh_unlock_sock(sk);
-       if (buf)
-               tipc_link_xmit(buf, peer_node, ref);
+       if (skb)
+               tipc_link_xmit_skb(skb, peer_node, ref);
 exit:
        tipc_sk_put(tsk);
 }
index 5eee625d113f795150c211ee32e346eb8dfe66d0..4450d62266023de0697edfe82cc55db9693ee230 100644 (file)
@@ -1459,6 +1459,9 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
        struct scm_cookie tmp_scm;
        int max_level;
        int data_len = 0;
+       struct iov_iter from;
+
+       iov_iter_init(&from, WRITE, msg->msg_iov, msg->msg_iovlen, len);
 
        if (NULL == siocb->scm)
                siocb->scm = &tmp_scm;
@@ -1516,7 +1519,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
        skb_put(skb, len - data_len);
        skb->data_len = data_len;
        skb->len = len;
-       err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
+       err = skb_copy_datagram_from_iter(skb, 0, &from, len);
        if (err)
                goto out_free;
 
@@ -1638,6 +1641,9 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
        bool fds_sent = false;
        int max_level;
        int data_len;
+       struct iov_iter from;
+
+       iov_iter_init(&from, WRITE, msg->msg_iov, msg->msg_iovlen, len);
 
        if (NULL == siocb->scm)
                siocb->scm = &tmp_scm;
@@ -1694,8 +1700,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
                skb_put(skb, size - data_len);
                skb->data_len = data_len;
                skb->len = size;
-               err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov,
-                                                  sent, size);
+               err = skb_copy_datagram_from_iter(skb, 0, &from, size);
                if (err) {
                        kfree_skb(skb);
                        goto out_err;
index 85d232bed87d21f3c23cd695b83defef5a6f22c1..1d0e39c9a3e2a5deb62130bc7fbdc31a4b5288a9 100644 (file)
@@ -1013,7 +1013,7 @@ static int vsock_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
                goto out;
        }
 
-       err = transport->dgram_enqueue(vsk, remote_addr, msg->msg_iov, len);
+       err = transport->dgram_enqueue(vsk, remote_addr, msg, len);
 
 out:
        release_sock(sk);
@@ -1617,7 +1617,7 @@ static int vsock_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
                 */
 
                written = transport->stream_enqueue(
-                               vsk, msg->msg_iov,
+                               vsk, msg,
                                len - total_written);
                if (written < 0) {
                        err = -ENOMEM;
@@ -1739,7 +1739,7 @@ vsock_stream_recvmsg(struct kiocb *kiocb,
                                break;
 
                        read = transport->stream_dequeue(
-                                       vsk, msg->msg_iov,
+                                       vsk, msg,
                                        len - copied, flags);
                        if (read < 0) {
                                err = -ENOMEM;
index a57ddef7d5afcf295b2e4579bb59351b22d6b0aa..c1c03895297369089ca1eb765803518cfb55aceb 100644 (file)
@@ -1697,7 +1697,7 @@ static int vmci_transport_dgram_bind(struct vsock_sock *vsk,
 static int vmci_transport_dgram_enqueue(
        struct vsock_sock *vsk,
        struct sockaddr_vm *remote_addr,
-       struct iovec *iov,
+       struct msghdr *msg,
        size_t len)
 {
        int err;
@@ -1714,7 +1714,7 @@ static int vmci_transport_dgram_enqueue(
        if (!dg)
                return -ENOMEM;
 
-       memcpy_fromiovec(VMCI_DG_PAYLOAD(dg), iov, len);
+       memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len);
 
        dg->dst = vmci_make_handle(remote_addr->svm_cid,
                                   remote_addr->svm_port);
@@ -1835,22 +1835,22 @@ static int vmci_transport_connect(struct vsock_sock *vsk)
 
 static ssize_t vmci_transport_stream_dequeue(
        struct vsock_sock *vsk,
-       struct iovec *iov,
+       struct msghdr *msg,
        size_t len,
        int flags)
 {
        if (flags & MSG_PEEK)
-               return vmci_qpair_peekv(vmci_trans(vsk)->qpair, iov, len, 0);
+               return vmci_qpair_peekv(vmci_trans(vsk)->qpair, msg->msg_iov, len, 0);
        else
-               return vmci_qpair_dequev(vmci_trans(vsk)->qpair, iov, len, 0);
+               return vmci_qpair_dequev(vmci_trans(vsk)->qpair, msg->msg_iov, len, 0);
 }
 
 static ssize_t vmci_transport_stream_enqueue(
        struct vsock_sock *vsk,
-       struct iovec *iov,
+       struct msghdr *msg,
        size_t len)
 {
-       return vmci_qpair_enquev(vmci_trans(vsk)->qpair, iov, len, 0);
+       return vmci_qpair_enquev(vmci_trans(vsk)->qpair, msg->msg_iov, len, 0);
 }
 
 static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk)
index 59e785bfde6518124939f03795f17e214574268b..d9149b68b9bc5c1d100d654d256048b30e268cdb 100644 (file)
@@ -1170,7 +1170,7 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
        skb_reset_transport_header(skb);
        skb_put(skb, len);
 
-       rc = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
+       rc = memcpy_from_msg(skb_transport_header(skb), msg, len);
        if (rc)
                goto out_kfree_skb;
 
index 42ded997b223b7ece3d8535000d4defeea2ba8f5..c6ff94ab1ad65a883e5b969437d05afb837e1a02 100644 (file)
@@ -216,6 +216,8 @@ static char *snd_pcm_format_names[] = {
        FORMAT(DSD_U8),
        FORMAT(DSD_U16_LE),
        FORMAT(DSD_U32_LE),
+       FORMAT(DSD_U16_BE),
+       FORMAT(DSD_U32_BE),
 };
 
 const char *snd_pcm_format_name(snd_pcm_format_t format)
index ae7a0feb3b76001f54555187c19343bce352f0c8..ebe8444de6c6ea8f44a5cacfb39b963939d9880d 100644 (file)
@@ -152,6 +152,14 @@ static struct pcm_format_data pcm_formats[(INT)SNDRV_PCM_FORMAT_LAST+1] = {
                .width = 32, .phys = 32, .le = 1, .signd = 0,
                .silence = { 0x69, 0x69, 0x69, 0x69 },
        },
+       [SNDRV_PCM_FORMAT_DSD_U16_BE] = {
+               .width = 16, .phys = 16, .le = 0, .signd = 0,
+               .silence = { 0x69, 0x69 },
+       },
+       [SNDRV_PCM_FORMAT_DSD_U32_BE] = {
+               .width = 32, .phys = 32, .le = 0, .signd = 0,
+               .silence = { 0x69, 0x69, 0x69, 0x69 },
+       },
        /* FIXME: the following three formats are not defined properly yet */
        [SNDRV_PCM_FORMAT_MPEG] = {
                .le = -1, .signd = -1,
index 16660f312043a71fac284dd7948baab3491d05f3..48b6c5a3884f3b1ed729d542fd286ba2840a938b 100644 (file)
@@ -298,7 +298,8 @@ enum {
 
 /* quirks for ATI/AMD HDMI */
 #define AZX_DCAPS_PRESET_ATI_HDMI \
-       (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB)
+       (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB|\
+        AZX_DCAPS_NO_MSI64)
 
 /* quirks for Nvidia */
 #define AZX_DCAPS_PRESET_NVIDIA \
@@ -1486,6 +1487,7 @@ static int azx_first_init(struct azx *chip)
        struct snd_card *card = chip->card;
        int err;
        unsigned short gcap;
+       unsigned int dma_bits = 64;
 
 #if BITS_PER_LONG != 64
        /* Fix up base address on ULI M5461 */
@@ -1509,9 +1511,14 @@ static int azx_first_init(struct azx *chip)
                return -ENXIO;
        }
 
-       if (chip->msi)
+       if (chip->msi) {
+               if (chip->driver_caps & AZX_DCAPS_NO_MSI64) {
+                       dev_dbg(card->dev, "Disabling 64bit MSI\n");
+                       pci->no_64bit_msi = true;
+               }
                if (pci_enable_msi(pci) < 0)
                        chip->msi = 0;
+       }
 
        if (azx_acquire_irq(chip, 0) < 0)
                return -EBUSY;
@@ -1522,9 +1529,14 @@ static int azx_first_init(struct azx *chip)
        gcap = azx_readw(chip, GCAP);
        dev_dbg(card->dev, "chipset global capabilities = 0x%x\n", gcap);
 
+       /* AMD devices support 40 or 48bit DMA, take the safe one */
+       if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
+               dma_bits = 40;
+
        /* disable SB600 64bit support for safety */
        if (chip->pci->vendor == PCI_VENDOR_ID_ATI) {
                struct pci_dev *p_smbus;
+               dma_bits = 40;
                p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
                                         PCI_DEVICE_ID_ATI_SBX00_SMBUS,
                                         NULL);
@@ -1554,9 +1566,11 @@ static int azx_first_init(struct azx *chip)
        }
 
        /* allow 64bit DMA address if supported by H/W */
-       if ((gcap & AZX_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
-               pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64));
-       else {
+       if (!(gcap & AZX_GCAP_64OK))
+               dma_bits = 32;
+       if (!pci_set_dma_mask(pci, DMA_BIT_MASK(dma_bits))) {
+               pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(dma_bits));
+       } else {
                pci_set_dma_mask(pci, DMA_BIT_MASK(32));
                pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32));
        }
index 949cd437eeb264798aec5d9b2f5c5e61a87fc294..5016014e57f2f1dc65f68d5b71db8d12f065493f 100644 (file)
@@ -171,6 +171,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
 #define AZX_DCAPS_PM_RUNTIME   (1 << 26)       /* runtime PM support */
 #define AZX_DCAPS_I915_POWERWELL (1 << 27)     /* HSW i915 powerwell support */
 #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28)  /* CORBRP clears itself after reset */
+#define AZX_DCAPS_NO_MSI64      (1 << 29)      /* Stick to 32-bit MSIs */
 
 /* HD Audio class code */
 #define PCI_CLASS_MULTIMEDIA_HD_AUDIO  0x0403
index 8fea1b86df25ebe5e66ea56bfb9babdae031a117..14f16be3f3747a3c72f1ba2938d2a1ca3f8007b2 100644 (file)
@@ -4818,7 +4818,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
-       SND_PCI_QUIRK(0x103c, 0x2246, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
index 7c83bab69deef832690c98c7babb00ded92d5784..8c9bf4b7aaf0e003db413347efe1ca2ae09053fe 100644 (file)
@@ -593,10 +593,10 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
        if (mixer->chip->shutdown)
                ret = -ENODEV;
        else
-               ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
+               ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
                                  USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
                                  0, wIndex,
-                                 &tmp, sizeof(tmp), 1000);
+                                 &tmp, sizeof(tmp));
        up_read(&mixer->chip->shutdown_rwsem);
 
        if (ret < 0) {
index a5941f80fc5bc6321d26ce523953312acae2e09f..60dfe0d28771bbc244ae8b4e41b737d8281c6f04 100644 (file)
@@ -1193,12 +1193,12 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        /* iFi Audio micro/nano iDSD */
        case USB_ID(0x20b1, 0x3008):
                if (fp->altsetting == 2)
-                       return SNDRV_PCM_FMTBIT_DSD_U32_LE;
+                       return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
        /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
        case USB_ID(0x20b1, 0x2009):
                if (fp->altsetting == 3)
-                       return SNDRV_PCM_FMTBIT_DSD_U32_LE;
+                       return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
        default:
                break;
index 3aaca49de3257eed0bd905ac26112cacd49588dd..aacdb59f30dedcd780ee29e2903d928194a42a5c 100644 (file)
@@ -1933,7 +1933,7 @@ out:
 
 int kvm_vgic_create(struct kvm *kvm)
 {
-       int i, vcpu_lock_idx = -1, ret = 0;
+       int i, vcpu_lock_idx = -1, ret;
        struct kvm_vcpu *vcpu;
 
        mutex_lock(&kvm->lock);
@@ -1948,6 +1948,7 @@ int kvm_vgic_create(struct kvm *kvm)
         * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
         * that no other VCPUs are run while we create the vgic.
         */
+       ret = -EBUSY;
        kvm_for_each_vcpu(i, vcpu, kvm) {
                if (!mutex_trylock(&vcpu->mutex))
                        goto out_unlock;
@@ -1955,11 +1956,10 @@ int kvm_vgic_create(struct kvm *kvm)
        }
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
-               if (vcpu->arch.has_run_once) {
-                       ret = -EBUSY;
+               if (vcpu->arch.has_run_once)
                        goto out_unlock;
-               }
        }
+       ret = 0;
 
        spin_lock_init(&kvm->arch.vgic.lock);
        kvm->arch.vgic.in_kernel = true;
index 25ffac9e947d9d3e2d554e6c351dfa51811c0354..3cee7b167052b58e07c147abb65985865e39e0f9 100644 (file)
@@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
 
 static bool largepages_enabled = true;
 
-bool kvm_is_mmio_pfn(pfn_t pfn)
+bool kvm_is_reserved_pfn(pfn_t pfn)
 {
        if (pfn_valid(pfn))
-               return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
+               return PageReserved(pfn_to_page(pfn));
 
        return true;
 }
@@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
        else if ((vma->vm_flags & VM_PFNMAP)) {
                pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
                        vma->vm_pgoff;
-               BUG_ON(!kvm_is_mmio_pfn(pfn));
+               BUG_ON(!kvm_is_reserved_pfn(pfn));
        } else {
                if (async && vma_is_valid(vma, write_fault))
                        *async = true;
@@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
        if (is_error_noslot_pfn(pfn))
                return KVM_ERR_PTR_BAD_PAGE;
 
-       if (kvm_is_mmio_pfn(pfn)) {
+       if (kvm_is_reserved_pfn(pfn)) {
                WARN_ON(1);
                return KVM_ERR_PTR_BAD_PAGE;
        }
@@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
 
 void kvm_release_pfn_clean(pfn_t pfn)
 {
-       if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
+       if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
                put_page(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
@@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn)
 
 void kvm_set_pfn_dirty(pfn_t pfn)
 {
-       if (!kvm_is_mmio_pfn(pfn)) {
+       if (!kvm_is_reserved_pfn(pfn)) {
                struct page *page = pfn_to_page(pfn);
                if (!PageReserved(page))
                        SetPageDirty(page);
@@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
 
 void kvm_set_pfn_accessed(pfn_t pfn)
 {
-       if (!kvm_is_mmio_pfn(pfn))
+       if (!kvm_is_reserved_pfn(pfn))
                mark_page_accessed(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
 
 void kvm_get_pfn(pfn_t pfn)
 {
-       if (!kvm_is_mmio_pfn(pfn))
+       if (!kvm_is_reserved_pfn(pfn))
                get_page(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_get_pfn);