]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/linville...
authorDavid S. Miller <davem@davemloft.net>
Fri, 10 Dec 2010 17:50:47 +0000 (09:50 -0800)
committerDavid S. Miller <davem@davemloft.net>
Fri, 10 Dec 2010 17:50:47 +0000 (09:50 -0800)
Conflicts:
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c

126 files changed:
Documentation/networking/dccp.txt
Documentation/networking/ip-sysctl.txt
MAINTAINERS
drivers/atm/lanai.c
drivers/isdn/hisax/config.c
drivers/isdn/icn/icn.c
drivers/net/Kconfig
drivers/net/arm/am79c961a.c
drivers/net/atl1c/atl1c_hw.c
drivers/net/au1000_eth.c
drivers/net/b44.c
drivers/net/benet/be.h
drivers/net/benet/be_cmds.c
drivers/net/benet/be_main.c
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_cmn.c
drivers/net/bnx2x/bnx2x_init_ops.h
drivers/net/bnx2x/bnx2x_main.c
drivers/net/bonding/bond_main.c
drivers/net/caif/caif_shm_u5500.c
drivers/net/caif/caif_shmcore.c
drivers/net/can/Kconfig
drivers/net/can/Makefile
drivers/net/can/slcan.c [new file with mode: 0644]
drivers/net/cris/eth_v10.c
drivers/net/cxgb4/t4_hw.c
drivers/net/cxgb4vf/cxgb4vf_main.c
drivers/net/cxgb4vf/t4vf_hw.c
drivers/net/e1000/e1000_main.c
drivers/net/ehea/ehea_ethtool.c
drivers/net/ehea/ehea_main.c
drivers/net/ifb.c
drivers/net/irda/sh_sir.c
drivers/net/ixgbe/ixgbe_82599.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/ixgbe/ixgbe_type.h
drivers/net/jme.c
drivers/net/pch_gbe/pch_gbe_ethtool.c
drivers/net/pch_gbe/pch_gbe_main.c
drivers/net/pch_gbe/pch_gbe_param.c
drivers/net/phy/marvell.c
drivers/net/ppp_generic.c
drivers/net/qlge/qlge_main.c
drivers/net/sc92031.c
drivers/net/sfc/efx.c
drivers/net/sfc/net_driver.h
drivers/net/sfc/nic.c
drivers/net/stmmac/stmmac_ethtool.c
drivers/net/stmmac/stmmac_main.c
drivers/net/tulip/dmfe.c
drivers/net/ucc_geth.h
drivers/net/usb/Kconfig
drivers/net/usb/Makefile
drivers/net/usb/cdc_ncm.c [new file with mode: 0644]
drivers/net/usb/hso.c
drivers/net/usb/usbnet.c
drivers/net/via-rhine.c
drivers/net/vxge/vxge-ethtool.c
drivers/net/vxge/vxge-main.c
drivers/net/wan/x25_asy.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/xilinx_emaclite.c
drivers/vhost/net.c
include/linux/dccp.h
include/linux/filter.h
include/linux/inetdevice.h
include/linux/jhash.h
include/linux/marvell_phy.h
include/linux/netdevice.h
include/linux/snmp.h
include/linux/usb/usbnet.h
include/net/af_unix.h
include/net/dst.h
include/net/inet_sock.h
include/net/inet_timewait_sock.h
include/net/sock.h
net/9p/protocol.c
net/bridge/br_device.c
net/bridge/br_netfilter.c
net/ceph/Makefile
net/ceph/buffer.c
net/core/datagram.c
net/core/dev.c
net/core/ethtool.c
net/core/filter.c
net/core/request_sock.c
net/core/sock.c
net/core/timestamping.c
net/dccp/Makefile
net/dccp/dccp.h
net/dccp/input.c
net/dccp/output.c
net/dccp/proto.c
net/dccp/qpolicy.c [new file with mode: 0644]
net/decnet/af_decnet.c
net/decnet/dn_route.c
net/econet/af_econet.c
net/ieee802154/af_ieee802154.c
net/ipv4/arp.c
net/ipv4/devinet.c
net/ipv4/fib_trie.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_gre.c
net/ipv4/proc.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ndisc.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/udp.c
net/l2tp/l2tp_ip.c
net/llc/af_llc.c
net/packet/af_packet.c
net/sctp/socket.c
net/unix/af_unix.c
net/unix/garbage.c
net/x25/x25_link.c
net/xfrm/xfrm_hash.c
net/xfrm/xfrm_policy.c

index 271d524a4c8d53dc4fab4aa91fee28ea89fe4f62..b395ca6a49f296e7b1ddda4e6a580f8691b22532 100644 (file)
@@ -47,6 +47,26 @@ http://linux-net.osdl.org/index.php/DCCP_Testing#Experimental_DCCP_source_tree
 
 Socket options
 ==============
+DCCP_SOCKOPT_QPOLICY_ID sets the dequeuing policy for outgoing packets. It takes
+a policy ID as argument and can only be set before the connection (i.e. changes
+during an established connection are not supported). Currently, two policies are
+defined: the "simple" policy (DCCPQ_POLICY_SIMPLE), which does nothing special,
+and a priority-based variant (DCCPQ_POLICY_PRIO). The latter allows to pass an
+u32 priority value as ancillary data to sendmsg(), where higher numbers indicate
+a higher packet priority (similar to SO_PRIORITY). This ancillary data needs to
+be formatted using a cmsg(3) message header filled in as follows:
+       cmsg->cmsg_level = SOL_DCCP;
+       cmsg->cmsg_type  = DCCP_SCM_PRIORITY;
+       cmsg->cmsg_len   = CMSG_LEN(sizeof(uint32_t));  /* or CMSG_LEN(4) */
+
+DCCP_SOCKOPT_QPOLICY_TXQLEN sets the maximum length of the output queue. A zero
+value is always interpreted as unbounded queue length. If different from zero,
+the interpretation of this parameter depends on the current dequeuing policy
+(see above): the "simple" policy will enforce a fixed queue size by returning
+EAGAIN, whereas the "prio" policy enforces a fixed queue length by dropping the
+lowest-priority packet first. The default value for this parameter is
+initialised from /proc/sys/net/dccp/default/tx_qlen.
+
 DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of
 service codes (RFC 4340, sec. 8.1.2); if this socket option is not set,
 the socket will fall back to 0 (which means that no meaningful service code
index ae5522703d16e8b579e3ed3c0b2e03f1d707bea5..2193a5d124c5982a85398bc42094a91225236df0 100644 (file)
@@ -144,6 +144,7 @@ tcp_adv_win_scale - INTEGER
        Count buffering overhead as bytes/2^tcp_adv_win_scale
        (if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale),
        if it is <= 0.
+       Possible values are [-31, 31], inclusive.
        Default: 2
 
 tcp_allowed_congestion_control - STRING
index 8b6ca96435eeec015930a4f130c76da0210dd753..9206cb4629135aeb7e8b1f5cfedb4aeb256a3391 100644 (file)
@@ -1359,7 +1359,7 @@ F:        include/net/bluetooth/
 
 BONDING DRIVER
 M:     Jay Vosburgh <fubar@us.ibm.com>
-L:     bonding-devel@lists.sourceforge.net
+L:     netdev@vger.kernel.org
 W:     http://sourceforge.net/projects/bonding/
 S:     Supported
 F:     drivers/net/bonding/
index cbe15a86c6698b2044b45469fa57f8d199aca15e..930051d941a7817a65b75ca7cc7fe72f9e045d00 100644 (file)
@@ -2241,11 +2241,8 @@ static int __devinit lanai_dev_open(struct atm_dev *atmdev)
        memcpy(atmdev->esi, eeprom_mac(lanai), ESI_LEN);
        lanai_timed_poll_start(lanai);
        printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=0x%lx, irq=%u "
-           "(%02X-%02X-%02X-%02X-%02X-%02X)\n", lanai->number,
-           (int) lanai->pci->revision, (unsigned long) lanai->base,
-           lanai->pci->irq,
-           atmdev->esi[0], atmdev->esi[1], atmdev->esi[2],
-           atmdev->esi[3], atmdev->esi[4], atmdev->esi[5]);
+               "(%pMF)\n", lanai->number, (int) lanai->pci->revision,
+               (unsigned long) lanai->base, lanai->pci->irq, atmdev->esi);
        printk(KERN_NOTICE DEV_LABEL "(itf %d): LANAI%s, serialno=%u(0x%X), "
            "board_rev=%d\n", lanai->number,
            lanai->type==lanai2 ? "2" : "HB", (unsigned int) lanai->serialno,
index b133378d4dc9b1707c749102c345c3c2f7b2d7d4..c110f8679babd4407c44172456272722a879cf8b 100644 (file)
@@ -1917,7 +1917,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
 #ifdef CONFIG_PCI
 #include <linux/pci.h>
 
-static struct pci_device_id hisax_pci_tbl[] __devinitdata = {
+static struct pci_device_id hisax_pci_tbl[] __devinitdata __used = {
 #ifdef CONFIG_HISAX_FRITZPCI
        {PCI_VDEVICE(AVM,      PCI_DEVICE_ID_AVM_A1)                    },
 #endif
index 2e847a90bad0d601f8bcc5f9a7bd4d7440f929c0..f2b5bab5e6a191761d2f56331c792ca8f7f78182 100644 (file)
@@ -1627,7 +1627,7 @@ __setup("icn=", icn_setup);
 static int __init icn_init(void)
 {
        char *p;
-       char rev[10];
+       char rev[20];
 
        memset(&dev, 0, sizeof(icn_dev));
        dev.memaddr = (membase & 0x0ffc000);
@@ -1637,9 +1637,10 @@ static int __init icn_init(void)
        spin_lock_init(&dev.devlock);
 
        if ((p = strchr(revision, ':'))) {
-               strcpy(rev, p + 1);
+               strncpy(rev, p + 1, 20);
                p = strchr(rev, '$');
-               *p = 0;
+               if (p)
+                       *p = 0;
        } else
                strcpy(rev, " ??? ");
        printk(KERN_NOTICE "ICN-ISDN-driver Rev%smem=0x%08lx\n", rev,
index a11dc735752c2278478d2a040fe651223d3c70e6..a20693fcb3210518e9437448cf79ea8d116483f3 100644 (file)
@@ -2543,10 +2543,10 @@ config PCH_GBE
        depends on PCI
        select MII
        ---help---
-         This is a gigabit ethernet driver for Topcliff PCH.
-         Topcliff PCH is the platform controller hub that is used in Intel's
+         This is a gigabit ethernet driver for EG20T PCH.
+         EG20T PCH is the platform controller hub that is used in Intel's
          general embedded platform.
-         Topcliff PCH has Gigabit Ethernet interface.
+         EG20T PCH has Gigabit Ethernet interface.
          Using this interface, it is able to access system devices connected
          to Gigabit Ethernet.
          This driver enables Gigabit Ethernet function.
index 62f21106efec224d3447f408439a55bb7d4743e3..0c9217f48b72060d77c6ac20eaa592011cb4a009 100644 (file)
@@ -340,14 +340,6 @@ am79c961_close(struct net_device *dev)
        return 0;
 }
 
-/*
- * Get the current statistics.
- */
-static struct net_device_stats *am79c961_getstats (struct net_device *dev)
-{
-       return &dev->stats;
-}
-
 static void am79c961_mc_hash(char *addr, unsigned short *hash)
 {
        if (addr[0] & 0x01) {
@@ -665,7 +657,6 @@ static const struct net_device_ops am79c961_netdev_ops = {
        .ndo_open               = am79c961_open,
        .ndo_stop               = am79c961_close,
        .ndo_start_xmit         = am79c961_sendpacket,
-       .ndo_get_stats          = am79c961_getstats,
        .ndo_set_multicast_list = am79c961_setmulticastlist,
        .ndo_tx_timeout         = am79c961_timeout,
        .ndo_validate_addr      = eth_validate_addr,
index 919080b2c3a50eb48bea7311ac0db4e8d3e8103e..1bf67200994827fd76250e718b3bdf4a67a609e8 100644 (file)
@@ -82,7 +82,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
        addr[0] = addr[1] = 0;
        AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
        if (atl1c_check_eeprom_exist(hw)) {
-               if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b) {
+               if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
                        /* Enable OTP CLK */
                        if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
                                otp_ctrl_data |= OTP_CTRL_CLK_EN;
index 43489f89c142f7d10e25f0454287ef28939fd228..53eff9ba6e9500a3ea2327a55a42fcb9d9aeefea 100644 (file)
@@ -155,10 +155,10 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset)
        spin_lock_irqsave(&aup->lock, flags);
 
        if (force_reset || (!aup->mac_enabled)) {
-               writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
+               writel(MAC_EN_CLOCK_ENABLE, aup->enable);
                au_sync_delay(2);
                writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
-                               | MAC_EN_CLOCK_ENABLE), &aup->enable);
+                               | MAC_EN_CLOCK_ENABLE), aup->enable);
                au_sync_delay(2);
 
                aup->mac_enabled = 1;
@@ -503,9 +503,9 @@ static void au1000_reset_mac_unlocked(struct net_device *dev)
 
        au1000_hard_stop(dev);
 
-       writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
+       writel(MAC_EN_CLOCK_ENABLE, aup->enable);
        au_sync_delay(2);
-       writel(0, &aup->enable);
+       writel(0, aup->enable);
        au_sync_delay(2);
 
        aup->tx_full = 0;
@@ -1119,7 +1119,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
        /* set a random MAC now in case platform_data doesn't provide one */
        random_ether_addr(dev->dev_addr);
 
-       writel(0, &aup->enable);
+       writel(0, aup->enable);
        aup->mac_enabled = 0;
 
        pd = pdev->dev.platform_data;
index c6e86315b3f8df37aa2ac1a767ac46fc1b5eba60..2e2b76258ab42cbc6b0dc73f70bd9547a5846857 100644 (file)
@@ -381,11 +381,11 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
        __b44_set_flow_ctrl(bp, pause_enab);
 }
 
-#ifdef SSB_DRIVER_MIPS
-extern char *nvram_get(char *name);
+#ifdef CONFIG_BCM47XX
+#include <asm/mach-bcm47xx/nvram.h>
 static void b44_wap54g10_workaround(struct b44 *bp)
 {
-       const char *str;
+       char buf[20];
        u32 val;
        int err;
 
@@ -394,10 +394,9 @@ static void b44_wap54g10_workaround(struct b44 *bp)
         * see https://dev.openwrt.org/ticket/146
         * check and reset bit "isolate"
         */
-       str = nvram_get("boardnum");
-       if (!str)
+       if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
                return;
-       if (simple_strtoul(str, NULL, 0) == 2) {
+       if (simple_strtoul(buf, NULL, 0) == 2) {
                err = __b44_readphy(bp, 0, MII_BMCR, &val);
                if (err)
                        goto error;
index b61a1dfebcafb0e83b903a87b00c90b7dfcb09e7..9cab32328bbacc8103c3a52a5b2668e605481fda 100644 (file)
@@ -220,7 +220,9 @@ struct be_rx_obj {
        struct be_rx_stats stats;
        u8 rss_id;
        bool rx_post_starved;   /* Zero rx frags have been posted to BE */
-       u32 cache_line_barrier[16];
+       u16 last_frag_index;
+       u16 rsvd;
+       u32 cache_line_barrier[15];
 };
 
 struct be_vf_cfg {
index 3865b2bc65e6abab69ab071d13dae8930c2372e2..171a08caf2be0c156205ce09417d269423c9c365 100644 (file)
@@ -470,25 +470,14 @@ int be_cmd_fw_init(struct be_adapter *adapter)
        spin_lock(&adapter->mbox_lock);
 
        wrb = (u8 *)wrb_from_mbox(adapter);
-       if (lancer_chip(adapter)) {
-               *wrb++ = 0xFF;
-               *wrb++ = 0x34;
-               *wrb++ = 0x12;
-               *wrb++ = 0xFF;
-               *wrb++ = 0xFF;
-               *wrb++ = 0x78;
-               *wrb++ = 0x56;
-               *wrb = 0xFF;
-       } else {
-               *wrb++ = 0xFF;
-               *wrb++ = 0x12;
-               *wrb++ = 0x34;
-               *wrb++ = 0xFF;
-               *wrb++ = 0xFF;
-               *wrb++ = 0x56;
-               *wrb++ = 0x78;
-               *wrb = 0xFF;
-       }
+       *wrb++ = 0xFF;
+       *wrb++ = 0x12;
+       *wrb++ = 0x34;
+       *wrb++ = 0xFF;
+       *wrb++ = 0xFF;
+       *wrb++ = 0x56;
+       *wrb++ = 0x78;
+       *wrb = 0xFF;
 
        status = be_mbox_notify_wait(adapter);
 
@@ -1285,7 +1274,7 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
 
                i = 0;
                netdev_for_each_mc_addr(ha, netdev)
-                       memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
+                       memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
        } else {
                req->promiscuous = 1;
        }
index 102567ee68c2e22d8efb3cb0c4deff7891056515..0b35e4a8bf193fd90fd586278340bcc85281dbf7 100644 (file)
@@ -911,11 +911,17 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
        rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
        num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
 
-       for (i = 0; i < num_rcvd; i++) {
-               page_info = get_rx_page_info(adapter, rxo, rxq_idx);
-               put_page(page_info->page);
-               memset(page_info, 0, sizeof(*page_info));
-               index_inc(&rxq_idx, rxq->len);
+        /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
+       if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
+
+               rxo->last_frag_index = rxq_idx;
+
+               for (i = 0; i < num_rcvd; i++) {
+                       page_info = get_rx_page_info(adapter, rxo, rxq_idx);
+                       put_page(page_info->page);
+                       memset(page_info, 0, sizeof(*page_info));
+                       index_inc(&rxq_idx, rxq->len);
+               }
        }
 }
 
@@ -1016,9 +1022,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
        u8 vtm;
 
        num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
-       /* Is it a flush compl that has no data */
-       if (unlikely(num_rcvd == 0))
-               return;
 
        skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
        if (unlikely(!skb)) {
@@ -1075,10 +1078,6 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
        u8 pkt_type;
 
        num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
-       /* Is it a flush compl that has no data */
-       if (unlikely(num_rcvd == 0))
-               return;
-
        pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
        vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
        rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
@@ -1349,7 +1348,7 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
        while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
                be_rx_compl_discard(adapter, rxo, rxcp);
                be_rx_compl_reset(rxcp);
-               be_cq_notify(adapter, rx_cq->id, true, 1);
+               be_cq_notify(adapter, rx_cq->id, false, 1);
        }
 
        /* Then free posted rx buffer that were not used */
@@ -1576,6 +1575,9 @@ static int be_rx_queues_create(struct be_adapter *adapter)
        adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
        for_all_rx_queues(adapter, rxo, i) {
                rxo->adapter = adapter;
+               /* Init last_frag_index so that the frag index in the first
+                * completion will never match */
+               rxo->last_frag_index = 0xffff;
                rxo->rx_eq.max_eqd = BE_MAX_EQD;
                rxo->rx_eq.enable_aic = true;
 
@@ -1697,10 +1699,9 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
        return IRQ_HANDLED;
 }
 
-static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
-                       struct be_eth_rx_compl *rxcp)
+static inline bool do_gro(struct be_rx_obj *rxo,
+                       struct be_eth_rx_compl *rxcp, u8 err)
 {
-       int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
        int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
 
        if (err)
@@ -1717,6 +1718,8 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
        struct be_queue_info *rx_cq = &rxo->cq;
        struct be_eth_rx_compl *rxcp;
        u32 work_done;
+       u16 frag_index, num_rcvd;
+       u8 err;
 
        rxo->stats.rx_polls++;
        for (work_done = 0; work_done < budget; work_done++) {
@@ -1724,10 +1727,22 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
                if (!rxcp)
                        break;
 
-               if (do_gro(adapter, rxo, rxcp))
-                       be_rx_compl_process_gro(adapter, rxo, rxcp);
-               else
-                       be_rx_compl_process(adapter, rxo, rxcp);
+               err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
+               frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
+                                                               rxcp);
+               num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
+                                                               rxcp);
+
+               /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
+               if (likely(frag_index != rxo->last_frag_index &&
+                               num_rcvd != 0)) {
+                       rxo->last_frag_index = frag_index;
+
+                       if (do_gro(rxo, rxcp, err))
+                               be_rx_compl_process_gro(adapter, rxo, rxcp);
+                       else
+                               be_rx_compl_process(adapter, rxo, rxcp);
+               }
 
                be_rx_compl_reset(rxcp);
        }
@@ -2583,10 +2598,12 @@ static void be_netdev_init(struct net_device *netdev)
        int i;
 
        netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
-               NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
+               NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
+               NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_GRO | NETIF_F_TSO6;
 
-       netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
+       netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
+               NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 
        if (lancer_chip(adapter))
                netdev->vlan_features |= NETIF_F_TSO6;
@@ -2899,7 +2916,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
        pci_set_drvdata(pdev, adapter);
 
        status = be_dev_family_check(adapter);
-       if (!status)
+       if (status)
                goto free_netdev;
 
        adapter->netdev = netdev;
index cfc25cf064d3ad84ec28e3bfd8d0cf15d7669e84..7e4d682f0df11e9ad33c61b016b1205b33bc30e6 100644 (file)
@@ -20,8 +20,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.60.00-6"
-#define DRV_MODULE_RELDATE      "2010/11/29"
+#define DRV_MODULE_VERSION      "1.60.00-7"
+#define DRV_MODULE_RELDATE      "2010/12/08"
 #define BNX2X_BC_VER            0x040200
 
 #define BNX2X_MULTI_QUEUE
@@ -1336,7 +1336,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 
 #define BNX2X_ILT_ZALLOC(x, y, size) \
        do { \
-               x = pci_alloc_consistent(bp->pdev, size, y); \
+               x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
                if (x) \
                        memset(x, 0, size); \
        } while (0)
@@ -1344,7 +1344,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 #define BNX2X_ILT_FREE(x, y, size) \
        do { \
                if (x) { \
-                       pci_free_consistent(bp->pdev, size, x, y); \
+                       dma_free_coherent(&bp->pdev->dev, size, x, y); \
                        x = NULL; \
                        y = 0; \
                } \
index a4555edbe9ce3ec03cd5e2eb68704dc5abcbd43d..236c00c3f568a599e56fd8c00ce2f377cbefdc59 100644 (file)
@@ -1795,15 +1795,15 @@ exit_lbl:
 }
 #endif
 
-static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
-                                    struct eth_tx_parse_bd_e2 *pbd,
-                                    u32 xmit_type)
+static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
+                                       u32 xmit_type)
 {
-       pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
-               ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
+       *parsing_data |= (skb_shinfo(skb)->gso_size <<
+                             ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
+                             ETH_TX_PARSE_BD_E2_LSO_MSS;
        if ((xmit_type & XMIT_GSO_V6) &&
            (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
-               pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
+               *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
 }
 
 /**
@@ -1848,15 +1848,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
  * @return header len
  */
 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
-       struct eth_tx_parse_bd_e2 *pbd,
-       u32 xmit_type)
+       u32 *parsing_data, u32 xmit_type)
 {
-       pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
-               ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
+       *parsing_data |= ((tcp_hdrlen(skb)/4) <<
+               ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+               ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
 
-       pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
-                                         skb->data) / 2) <<
-               ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
+       *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) <<
+               ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
+               ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
 
        return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
 }
@@ -1925,6 +1925,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
        struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
        struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+       u32 pbd_e2_parsing_data = 0;
        u16 pkt_prod, bd_prod;
        int nbd, fp_index;
        dma_addr_t mapping;
@@ -2046,8 +2047,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
                /* Set PBD in checksum offload case */
                if (xmit_type & XMIT_CSUM)
-                       hlen = bnx2x_set_pbd_csum_e2(bp,
-                                                    skb, pbd_e2, xmit_type);
+                       hlen = bnx2x_set_pbd_csum_e2(bp, skb,
+                                                    &pbd_e2_parsing_data,
+                                                    xmit_type);
        } else {
                pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
                memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
@@ -2089,10 +2091,18 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
                                                 hlen, bd_prod, ++nbd);
                if (CHIP_IS_E2(bp))
-                       bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
+                       bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
+                                            xmit_type);
                else
                        bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
        }
+
+       /* Set the PBD's parsing_data field if not zero
+        * (for the chips newer than 57711).
+        */
+       if (pbd_e2_parsing_data)
+               pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
+
        tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
 
        /* Handle fragmented skb */
index a306b0e46b613417c630e79398c5f8250204195d..66df29fcf7516e45b7794fee314d0122fca6d42b 100644 (file)
@@ -838,7 +838,7 @@ static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
 /****************************************************************************
 * SRC initializations
 ****************************************************************************/
-
+#ifdef BCM_CNIC
 /* called during init func stage */
 static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
                              dma_addr_t t2_mapping, int src_cid_count)
@@ -862,5 +862,5 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
                    U64_HI((u64)t2_mapping +
                           (src_cid_count-1) * sizeof(struct src_ent)));
 }
-
+#endif
 #endif /* BNX2X_INIT_OPS_H */
index 1552fc3c1351d68741f40263990700b60eea9f06..0068a1dbc064e52537cc894d588b3a161d654e92 100644 (file)
@@ -8957,7 +8957,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
        dev->netdev_ops = &bnx2x_netdev_ops;
        bnx2x_set_ethtool_ops(dev);
        dev->features |= NETIF_F_SG;
-       dev->features |= NETIF_F_HW_CSUM;
+       dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
        if (bp->flags & USING_DAC_FLAG)
                dev->features |= NETIF_F_HIGHDMA;
        dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
@@ -8965,7 +8965,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
        dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
 
        dev->vlan_features |= NETIF_F_SG;
-       dev->vlan_features |= NETIF_F_HW_CSUM;
+       dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
        if (bp->flags & USING_DAC_FLAG)
                dev->vlan_features |= NETIF_F_HIGHDMA;
        dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
index 0273ad0b57bb00167591414ca2937ce84e6794b1..bb33b3b347fabf10263748ca02c626751aee3eca 100644 (file)
@@ -1570,7 +1570,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        /* If this is the first slave, then we need to set the master's hardware
         * address to be the same as the slave's. */
-       if (bond->slave_cnt == 0)
+       if (is_zero_ether_addr(bond->dev->dev_addr))
                memcpy(bond->dev->dev_addr, slave_dev->dev_addr,
                       slave_dev->addr_len);
 
index 1cd90da86f130ea588070634db35959a47ca7b87..32b1c6fb2de1a74e5b97254cdf075ca933464734 100644 (file)
@@ -5,7 +5,7 @@
  * License terms: GNU General Public License (GPL) version 2
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
 
 #include <linux/version.h>
 #include <linux/init.h>
index 19f9c065666745633752c3e59b312d2a4e9812c0..80511167f35bd4bcfb53cf424eb58ff4174a558b 100644 (file)
@@ -6,7 +6,7 @@
  * License terms: GNU General Public License (GPL) version 2
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
 
 #include <linux/spinlock.h>
 #include <linux/sched.h>
index 080574b0fff0c222510df389641c669d5fad1429..d5a9db60ade9cf783273f57a7abac5679aea0003 100644 (file)
@@ -12,6 +12,27 @@ config CAN_VCAN
          This driver can also be built as a module.  If so, the module
          will be called vcan.
 
+config CAN_SLCAN
+       tristate "Serial / USB serial CAN Adaptors (slcan)"
+       depends on CAN
+       default N
+       ---help---
+         CAN driver for several 'low cost' CAN interfaces that are attached
+         via serial lines or via USB-to-serial adapters using the LAWICEL
+         ASCII protocol. The driver implements the tty linediscipline N_SLCAN.
+
+         As only the sending and receiving of CAN frames is implemented, this
+         driver should work with the (serial/USB) CAN hardware from:
+         www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de
+
+         Userspace tools to attach the SLCAN line discipline (slcan_attach,
+         slcand) can be found in the can-utils at the SocketCAN SVN, see
+         http://developer.berlios.de/projects/socketcan for details.
+
+         The slcan driver supports up to 10 CAN netdevices by default which
+         can be changed by the 'maxdev=xx' module option. This driver can
+         also be built as a module. If so, the module will be called slcan.
+
 config CAN_DEV
        tristate "Platform CAN drivers with Netlink support"
        depends on CAN
index 90af15a4f106a7031a8eafce16b6f2b9d3cc0eaf..07ca159ba3f91ab643a167cda786d46a488c2e05 100644 (file)
@@ -3,6 +3,7 @@
 #
 
 obj-$(CONFIG_CAN_VCAN)         += vcan.o
+obj-$(CONFIG_CAN_SLCAN)                += slcan.o
 
 obj-$(CONFIG_CAN_DEV)          += can-dev.o
 can-dev-y                      := dev.o
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
new file mode 100644 (file)
index 0000000..b423965
--- /dev/null
@@ -0,0 +1,756 @@
+/*
+ * slcan.c - serial line CAN interface driver (using tty line discipline)
+ *
+ * This file is derived from linux/drivers/net/slip.c
+ *
+ * slip.c Authors  : Laurence Culhane <loz@holmes.demon.co.uk>
+ *                   Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
+ * slcan.c Author  : Oliver Hartkopp <socketcan@hartkopp.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307. You can also get it
+ * at http://www.gnu.org/licenses/gpl.html
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * Send feedback to <socketcan-users@lists.berlios.de>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <asm/system.h>
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/can.h>
+
+static __initdata const char banner[] =
+       KERN_INFO "slcan: serial line CAN interface driver\n";
+
+MODULE_ALIAS_LDISC(N_SLCAN);
+MODULE_DESCRIPTION("serial line CAN interface");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
+
+#define SLCAN_MAGIC 0x53CA
+
+static int maxdev = 10;                /* MAX number of SLCAN channels;
+                                  This can be overridden with
+                                  insmod slcan.ko maxdev=nnn   */
+module_param(maxdev, int, 0);
+MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
+
+/* maximum rx buffer len: extended CAN frame with timestamp */
+#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
+
+struct slcan {
+       int                     magic;
+
+       /* Various fields. */
+       struct tty_struct       *tty;           /* ptr to TTY structure      */
+       struct net_device       *dev;           /* easy for intr handling    */
+       spinlock_t              lock;
+
+       /* These are pointers to the malloc()ed frame buffers. */
+       unsigned char           rbuff[SLC_MTU]; /* receiver buffer           */
+       int                     rcount;         /* received chars counter    */
+       unsigned char           xbuff[SLC_MTU]; /* transmitter buffer        */
+       unsigned char           *xhead;         /* pointer to next XMIT byte */
+       int                     xleft;          /* bytes left in XMIT queue  */
+
+       unsigned long           flags;          /* Flag values/ mode etc     */
+#define SLF_INUSE              0               /* Channel in use            */
+#define SLF_ERROR              1               /* Parity, etc. error        */
+
+       unsigned char           leased;
+       dev_t                   line;
+       pid_t                   pid;
+};
+
+static struct net_device **slcan_devs;
+
+ /************************************************************************
+  *                    SLCAN ENCAPSULATION FORMAT                       *
+  ************************************************************************/
+
+/*
+ * A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended
+ * frame format) a data length code (can_dlc) which can be from 0 to 8
+ * and up to <can_dlc> data bytes as payload.
+ * Additionally a CAN frame may become a remote transmission frame if the
+ * RTR-bit is set. This causes another ECU to send a CAN frame with the
+ * given can_id.
+ *
+ * The SLCAN ASCII representation of these different frame types is:
+ * <type> <id> <dlc> <data>*
+ *
+ * Extended frames (29 bit) are defined by capital characters in the type.
+ * RTR frames are defined as 'r' types - normal frames have 't' type:
+ * t => 11 bit data frame
+ * r => 11 bit RTR frame
+ * T => 29 bit data frame
+ * R => 29 bit RTR frame
+ *
+ * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64).
+ * The <dlc> is a one byte ASCII number ('0' - '8')
+ * The <data> section has at much ASCII Hex bytes as defined by the <dlc>
+ *
+ * Examples:
+ *
+ * t1230 : can_id 0x123, can_dlc 0, no data
+ * t4563112233 : can_id 0x456, can_dlc 3, data 0x11 0x22 0x33
+ * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, can_dlc 2, data 0xAA 0x55
+ * r1230 : can_id 0x123, can_dlc 0, no data, remote transmission request
+ *
+ */
+
+ /************************************************************************
+  *                    STANDARD SLCAN DECAPSULATION                     *
+  ************************************************************************/
+
+static int asc2nibble(char c)
+{
+
+       if ((c >= '0') && (c <= '9'))
+               return c - '0';
+
+       if ((c >= 'A') && (c <= 'F'))
+               return c - 'A' + 10;
+
+       if ((c >= 'a') && (c <= 'f'))
+               return c - 'a' + 10;
+
+       return 16; /* error */
+}
+
+/* Send one completely decapsulated can_frame to the network layer */
+static void slc_bump(struct slcan *sl)
+{
+       struct sk_buff *skb;
+       struct can_frame cf;
+       int i, dlc_pos, tmp;
+       unsigned long ultmp;
+       char cmd = sl->rbuff[0];
+
+       if ((cmd != 't') && (cmd != 'T') && (cmd != 'r') && (cmd != 'R'))
+               return;
+
+       if (cmd & 0x20) /* tiny chars 'r' 't' => standard frame format */
+               dlc_pos = 4; /* dlc position tiiid */
+       else
+               dlc_pos = 9; /* dlc position Tiiiiiiiid */
+
+       if (!((sl->rbuff[dlc_pos] >= '0') && (sl->rbuff[dlc_pos] < '9')))
+               return;
+
+       cf.can_dlc = sl->rbuff[dlc_pos] - '0'; /* get can_dlc from ASCII val */
+
+       sl->rbuff[dlc_pos] = 0; /* terminate can_id string */
+
+       if (strict_strtoul(sl->rbuff+1, 16, &ultmp))
+               return;
+
+       cf.can_id = ultmp;
+
+       if (!(cmd & 0x20)) /* NO tiny chars => extended frame format */
+               cf.can_id |= CAN_EFF_FLAG;
+
+       if ((cmd | 0x20) == 'r') /* RTR frame */
+               cf.can_id |= CAN_RTR_FLAG;
+
+       *(u64 *) (&cf.data) = 0; /* clear payload */
+
+       for (i = 0, dlc_pos++; i < cf.can_dlc; i++) {
+
+               tmp = asc2nibble(sl->rbuff[dlc_pos++]);
+               if (tmp > 0x0F)
+                       return;
+               cf.data[i] = (tmp << 4);
+               tmp = asc2nibble(sl->rbuff[dlc_pos++]);
+               if (tmp > 0x0F)
+                       return;
+               cf.data[i] |= tmp;
+       }
+
+
+       skb = dev_alloc_skb(sizeof(struct can_frame));
+       if (!skb)
+               return;
+
+       skb->dev = sl->dev;
+       skb->protocol = htons(ETH_P_CAN);
+       skb->pkt_type = PACKET_BROADCAST;
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+       memcpy(skb_put(skb, sizeof(struct can_frame)),
+              &cf, sizeof(struct can_frame));
+       netif_rx(skb);
+
+       sl->dev->stats.rx_packets++;
+       sl->dev->stats.rx_bytes += cf.can_dlc;
+}
+
+/* parse tty input stream */
+static void slcan_unesc(struct slcan *sl, unsigned char s)
+{
+
+       if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
+               if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
+                   (sl->rcount > 4))  {
+                       slc_bump(sl);
+               }
+               sl->rcount = 0;
+       } else {
+               if (!test_bit(SLF_ERROR, &sl->flags))  {
+                       if (sl->rcount < SLC_MTU)  {
+                               sl->rbuff[sl->rcount++] = s;
+                               return;
+                       } else {
+                               sl->dev->stats.rx_over_errors++;
+                               set_bit(SLF_ERROR, &sl->flags);
+                       }
+               }
+       }
+}
+
+ /************************************************************************
+  *                    STANDARD SLCAN ENCAPSULATION                     *
+  ************************************************************************/
+
+/* Encapsulate one can_frame and stuff into a TTY queue. */
+static void slc_encaps(struct slcan *sl, struct can_frame *cf)
+{
+       int actual, idx, i;
+       char cmd;
+
+       if (cf->can_id & CAN_RTR_FLAG)
+               cmd = 'R'; /* becomes 'r' in standard frame format */
+       else
+               cmd = 'T'; /* becomes 't' in standard frame format */
+
+       if (cf->can_id & CAN_EFF_FLAG)
+               sprintf(sl->xbuff, "%c%08X%d", cmd,
+                       cf->can_id & CAN_EFF_MASK, cf->can_dlc);
+       else
+               sprintf(sl->xbuff, "%c%03X%d", cmd | 0x20,
+                       cf->can_id & CAN_SFF_MASK, cf->can_dlc);
+
+       idx = strlen(sl->xbuff);
+
+       for (i = 0; i < cf->can_dlc; i++)
+               sprintf(&sl->xbuff[idx + 2*i], "%02X", cf->data[i]);
+
+       strcat(sl->xbuff, "\r"); /* add terminating character */
+
+       /* Order of next two lines is *very* important.
+        * When we are sending a little amount of data,
+        * the transfer may be completed inside the ops->write()
+        * routine, because it's running with interrupts enabled.
+        * In this case we *never* got WRITE_WAKEUP event,
+        * if we did not request it before write operation.
+        *       14 Oct 1994  Dmitry Gorodchanin.
+        */
+       set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+       actual = sl->tty->ops->write(sl->tty, sl->xbuff, strlen(sl->xbuff));
+       sl->xleft = strlen(sl->xbuff) - actual;
+       sl->xhead = sl->xbuff + actual;
+       sl->dev->stats.tx_bytes += cf->can_dlc;
+}
+
+/*
+ * Called by the driver when there's room for more data.  If we have
+ * more packets to send, we send them here.
+ */
+static void slcan_write_wakeup(struct tty_struct *tty)
+{
+       int actual;
+       struct slcan *sl = (struct slcan *) tty->disc_data;
+
+       /* First make sure we're connected. */
+       if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
+               return;
+
+       if (sl->xleft <= 0)  {
+               /* Now serial buffer is almost free & we can start
+                * transmission of another packet */
+               sl->dev->stats.tx_packets++;
+               clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+               netif_wake_queue(sl->dev);
+               return;
+       }
+
+       actual = tty->ops->write(tty, sl->xhead, sl->xleft);
+       sl->xleft -= actual;
+       sl->xhead += actual;
+}
+
+/* Send a can_frame to a TTY queue. */
+static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct slcan *sl = netdev_priv(dev);
+
+       if (skb->len != sizeof(struct can_frame))
+               goto out;
+
+       spin_lock(&sl->lock);
+       if (!netif_running(dev))  {
+               spin_unlock(&sl->lock);
+               printk(KERN_WARNING "%s: xmit: iface is down\n", dev->name);
+               goto out;
+       }
+       if (sl->tty == NULL) {
+               spin_unlock(&sl->lock);
+               goto out;
+       }
+
+       netif_stop_queue(sl->dev);
+       slc_encaps(sl, (struct can_frame *) skb->data); /* encaps & send */
+       spin_unlock(&sl->lock);
+
+out:
+       kfree_skb(skb);
+       return NETDEV_TX_OK;
+}
+
+
+/******************************************
+ *   Routines looking at netdevice side.
+ ******************************************/
+
+/* Netdevice UP -> DOWN routine */
+static int slc_close(struct net_device *dev)
+{
+       struct slcan *sl = netdev_priv(dev);
+
+       spin_lock_bh(&sl->lock);
+       if (sl->tty) {
+               /* TTY discipline is running. */
+               clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+       }
+       netif_stop_queue(dev);
+       sl->rcount   = 0;
+       sl->xleft    = 0;
+       spin_unlock_bh(&sl->lock);
+
+       return 0;
+}
+
+/* Netdevice DOWN -> UP routine */
+static int slc_open(struct net_device *dev)
+{
+       struct slcan *sl = netdev_priv(dev);
+
+       if (sl->tty == NULL)
+               return -ENODEV;
+
+       sl->flags &= (1 << SLF_INUSE);
+       netif_start_queue(dev);
+       return 0;
+}
+
+/* Hook the destructor so we can free slcan devs at the right point in time */
+static void slc_free_netdev(struct net_device *dev)
+{
+       int i = dev->base_addr;
+       free_netdev(dev);
+       slcan_devs[i] = NULL;
+}
+
+static const struct net_device_ops slc_netdev_ops = {
+       .ndo_open               = slc_open,
+       .ndo_stop               = slc_close,
+       .ndo_start_xmit         = slc_xmit,
+};
+
+static void slc_setup(struct net_device *dev)
+{
+       dev->netdev_ops         = &slc_netdev_ops;
+       dev->destructor         = slc_free_netdev;
+
+       dev->hard_header_len    = 0;
+       dev->addr_len           = 0;
+       dev->tx_queue_len       = 10;
+
+       dev->mtu                = sizeof(struct can_frame);
+       dev->type               = ARPHRD_CAN;
+
+       /* New-style flags. */
+       dev->flags              = IFF_NOARP;
+       dev->features           = NETIF_F_NO_CSUM;
+}
+
+/******************************************
+  Routines looking at TTY side.
+ ******************************************/
+
+/*
+ * Handle the 'receiver data ready' interrupt.
+ * This function is called by the 'tty_io' module in the kernel when
+ * a block of SLCAN data has been received, which can now be decapsulated
+ * and sent on to some IP layer for further processing. This will not
+ * be re-entered while running but other ldisc functions may be called
+ * in parallel
+ */
+
+static void slcan_receive_buf(struct tty_struct *tty,
+                             const unsigned char *cp, char *fp, int count)
+{
+       struct slcan *sl = (struct slcan *) tty->disc_data;
+
+       if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
+               return;
+
+       /* Read the characters out of the buffer */
+       while (count--) {
+               if (fp && *fp++) {
+                       if (!test_and_set_bit(SLF_ERROR, &sl->flags))
+                               sl->dev->stats.rx_errors++;
+                       cp++;
+                       continue;
+               }
+               slcan_unesc(sl, *cp++);
+       }
+}
+
+/************************************
+ *  slcan_open helper routines.
+ ************************************/
+
+/* Collect hanged up channels */
+static void slc_sync(void)
+{
+       int i;
+       struct net_device *dev;
+       struct slcan      *sl;
+
+       for (i = 0; i < maxdev; i++) {
+               dev = slcan_devs[i];
+               if (dev == NULL)
+                       break;
+
+               sl = netdev_priv(dev);
+               if (sl->tty || sl->leased)
+                       continue;
+               if (dev->flags & IFF_UP)
+                       dev_close(dev);
+       }
+}
+
+/* Find a free SLCAN channel, and link in this `tty' line. */
+static struct slcan *slc_alloc(dev_t line)
+{
+       int i;
+       struct net_device *dev = NULL;
+       struct slcan       *sl;
+
+       if (slcan_devs == NULL)
+               return NULL;    /* Master array missing ! */
+
+       for (i = 0; i < maxdev; i++) {
+               dev = slcan_devs[i];
+               if (dev == NULL)
+                       break;
+
+       }
+
+       /* Sorry, too many, all slots in use */
+       if (i >= maxdev)
+               return NULL;
+
+       if (dev) {
+               sl = netdev_priv(dev);
+               if (test_bit(SLF_INUSE, &sl->flags)) {
+                       unregister_netdevice(dev);
+                       dev = NULL;
+                       slcan_devs[i] = NULL;
+               }
+       }
+
+       if (!dev) {
+               char name[IFNAMSIZ];
+               sprintf(name, "slcan%d", i);
+
+               dev = alloc_netdev(sizeof(*sl), name, slc_setup);
+               if (!dev)
+                       return NULL;
+               dev->base_addr  = i;
+       }
+
+       sl = netdev_priv(dev);
+
+       /* Initialize channel control data */
+       sl->magic = SLCAN_MAGIC;
+       sl->dev = dev;
+       spin_lock_init(&sl->lock);
+       slcan_devs[i] = dev;
+
+       return sl;
+}
+
+/*
+ * Open the high-level part of the SLCAN channel.
+ * This function is called by the TTY module when the
+ * SLCAN line discipline is called for.  Because we are
+ * sure the tty line exists, we only have to link it to
+ * a free SLCAN channel...
+ *
+ * Called in process context serialized from other ldisc calls.
+ */
+
+static int slcan_open(struct tty_struct *tty)
+{
+       struct slcan *sl;
+       int err;
+
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
+       if (tty->ops->write == NULL)
+               return -EOPNOTSUPP;
+
+       /* RTnetlink lock is misused here to serialize concurrent
+          opens of slcan channels. There are better ways, but it is
+          the simplest one.
+        */
+       rtnl_lock();
+
+       /* Collect hanged up channels. */
+       slc_sync();
+
+       sl = tty->disc_data;
+
+       err = -EEXIST;
+       /* First make sure we're not already connected. */
+       if (sl && sl->magic == SLCAN_MAGIC)
+               goto err_exit;
+
+       /* OK.  Find a free SLCAN channel to use. */
+       err = -ENFILE;
+       sl = slc_alloc(tty_devnum(tty));
+       if (sl == NULL)
+               goto err_exit;
+
+       sl->tty = tty;
+       tty->disc_data = sl;
+       sl->line = tty_devnum(tty);
+       sl->pid = current->pid;
+
+       if (!test_bit(SLF_INUSE, &sl->flags)) {
+               /* Perform the low-level SLCAN initialization. */
+               sl->rcount   = 0;
+               sl->xleft    = 0;
+
+               set_bit(SLF_INUSE, &sl->flags);
+
+               err = register_netdevice(sl->dev);
+               if (err)
+                       goto err_free_chan;
+       }
+
+       /* Done.  We have linked the TTY line to a channel. */
+       rtnl_unlock();
+       tty->receive_room = 65536;      /* We don't flow control */
+       return sl->dev->base_addr;
+
+err_free_chan:
+       sl->tty = NULL;
+       tty->disc_data = NULL;
+       clear_bit(SLF_INUSE, &sl->flags);
+
+err_exit:
+       rtnl_unlock();
+
+       /* Count references from TTY module */
+       return err;
+}
+
+/*
+ * Close down a SLCAN channel.
+ * This means flushing out any pending queues, and then returning. This
+ * call is serialized against other ldisc functions.
+ *
+ * We also use this method for a hangup event.
+ */
+
+static void slcan_close(struct tty_struct *tty)
+{
+       struct slcan *sl = (struct slcan *) tty->disc_data;
+
+       /* First make sure we're connected. */
+       if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
+               return;
+
+       tty->disc_data = NULL;
+       sl->tty = NULL;
+       if (!sl->leased)
+               sl->line = 0;
+
+       /* Flush network side */
+       unregister_netdev(sl->dev);
+       /* This will complete via sl_free_netdev */
+}
+
+static int slcan_hangup(struct tty_struct *tty)
+{
+       slcan_close(tty);
+       return 0;
+}
+
+/* Perform I/O control on an active SLCAN channel. */
+static int slcan_ioctl(struct tty_struct *tty, struct file *file,
+                      unsigned int cmd, unsigned long arg)
+{
+       struct slcan *sl = (struct slcan *) tty->disc_data;
+       unsigned int tmp;
+
+       /* First make sure we're connected. */
+       if (!sl || sl->magic != SLCAN_MAGIC)
+               return -EINVAL;
+
+       switch (cmd) {
+       case SIOCGIFNAME:
+               tmp = strlen(sl->dev->name) + 1;
+               if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
+                       return -EFAULT;
+               return 0;
+
+       case SIOCSIFHWADDR:
+               return -EINVAL;
+
+       default:
+               return tty_mode_ioctl(tty, file, cmd, arg);
+       }
+}
+
+static struct tty_ldisc_ops slc_ldisc = {
+       .owner          = THIS_MODULE,
+       .magic          = TTY_LDISC_MAGIC,
+       .name           = "slcan",
+       .open           = slcan_open,
+       .close          = slcan_close,
+       .hangup         = slcan_hangup,
+       .ioctl          = slcan_ioctl,
+       .receive_buf    = slcan_receive_buf,
+       .write_wakeup   = slcan_write_wakeup,
+};
+
+static int __init slcan_init(void)
+{
+       int status;
+
+       if (maxdev < 4)
+               maxdev = 4; /* Sanity */
+
+       printk(banner);
+       printk(KERN_INFO "slcan: %d dynamic interface channels.\n", maxdev);
+
+       slcan_devs = kzalloc(sizeof(struct net_device *)*maxdev, GFP_KERNEL);
+       if (!slcan_devs) {
+               printk(KERN_ERR "slcan: can't allocate slcan device array!\n");
+               return -ENOMEM;
+       }
+
+       /* Fill in our line protocol discipline, and register it */
+       status = tty_register_ldisc(N_SLCAN, &slc_ldisc);
+       if (status)  {
+               printk(KERN_ERR "slcan: can't register line discipline\n");
+               kfree(slcan_devs);
+       }
+       return status;
+}
+
+static void __exit slcan_exit(void)
+{
+       int i;
+       struct net_device *dev;
+       struct slcan *sl;
+       unsigned long timeout = jiffies + HZ;
+       int busy = 0;
+
+       if (slcan_devs == NULL)
+               return;
+
+       /* First of all: check for active disciplines and hangup them.
+        */
+       do {
+               if (busy)
+                       msleep_interruptible(100);
+
+               busy = 0;
+               for (i = 0; i < maxdev; i++) {
+                       dev = slcan_devs[i];
+                       if (!dev)
+                               continue;
+                       sl = netdev_priv(dev);
+                       spin_lock_bh(&sl->lock);
+                       if (sl->tty) {
+                               busy++;
+                               tty_hangup(sl->tty);
+                       }
+                       spin_unlock_bh(&sl->lock);
+               }
+       } while (busy && time_before(jiffies, timeout));
+
+       /* FIXME: hangup is async so we should wait when doing this second
+          phase */
+
+       for (i = 0; i < maxdev; i++) {
+               dev = slcan_devs[i];
+               if (!dev)
+                       continue;
+               slcan_devs[i] = NULL;
+
+               sl = netdev_priv(dev);
+               if (sl->tty) {
+                       printk(KERN_ERR "%s: tty discipline still running\n",
+                              dev->name);
+                       /* Intentionally leak the control block. */
+                       dev->destructor = NULL;
+               }
+
+               unregister_netdev(dev);
+       }
+
+       kfree(slcan_devs);
+       slcan_devs = NULL;
+
+       i = tty_unregister_ldisc(N_SLCAN);
+       if (i)
+               printk(KERN_ERR "slcan: can't unregister ldisc (err %d)\n", i);
+}
+
+module_init(slcan_init);
+module_exit(slcan_exit);
index 81475cc80e1cfd77f364afecde37bf21d9261052..80c2feeefec5542266a4444c341a864c8a905cf9 100644 (file)
@@ -59,7 +59,6 @@ static struct sockaddr default_mac = {
 
 /* Information that need to be kept for each board. */
 struct net_local {
-       struct net_device_stats stats;
        struct mii_if_info mii_if;
 
        /* Tx control lock.  This protects the transmit buffer ring
@@ -1059,7 +1058,7 @@ e100_tx_timeout(struct net_device *dev)
 
        /* remember we got an error */
 
-       np->stats.tx_errors++;
+       dev->stats.tx_errors++;
 
        /* reset the TX DMA in case it has hung on something */
 
@@ -1157,7 +1156,7 @@ e100rxtx_interrupt(int irq, void *dev_id)
                         * allocate a new buffer to put a packet in.
                         */
                        e100_rx(dev);
-                       np->stats.rx_packets++;
+                       dev->stats.rx_packets++;
                        /* restart/continue on the channel, for safety */
                        *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
                        /* clear dma channel 1 eop/descr irq bits */
@@ -1173,8 +1172,8 @@ e100rxtx_interrupt(int irq, void *dev_id)
        /* Report any packets that have been sent */
        while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST &&
               (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
-               np->stats.tx_bytes += myFirstTxDesc->skb->len;
-               np->stats.tx_packets++;
+               dev->stats.tx_bytes += myFirstTxDesc->skb->len;
+               dev->stats.tx_packets++;
 
                /* dma is ready with the transmission of the data in tx_skb, so now
                   we can release the skb memory */
@@ -1197,7 +1196,6 @@ static irqreturn_t
 e100nw_interrupt(int irq, void *dev_id)
 {
        struct net_device *dev = (struct net_device *)dev_id;
-       struct net_local *np = netdev_priv(dev);
        unsigned long irqbits = *R_IRQ_MASK0_RD;
 
        /* check for underrun irq */
@@ -1205,13 +1203,13 @@ e100nw_interrupt(int irq, void *dev_id)
                SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
                *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
                SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
-               np->stats.tx_errors++;
+               dev->stats.tx_errors++;
                D(printk("ethernet receiver underrun!\n"));
        }
 
        /* check for overrun irq */
        if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) {
-               update_rx_stats(&np->stats); /* this will ack the irq */
+               update_rx_stats(&dev->stats); /* this will ack the irq */
                D(printk("ethernet receiver overrun!\n"));
        }
        /* check for excessive collision irq */
@@ -1219,7 +1217,7 @@ e100nw_interrupt(int irq, void *dev_id)
                SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
                *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
                SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
-               np->stats.tx_errors++;
+               dev->stats.tx_errors++;
                D(printk("ethernet excessive collisions!\n"));
        }
        return IRQ_HANDLED;
@@ -1250,7 +1248,7 @@ e100_rx(struct net_device *dev)
        spin_unlock(&np->led_lock);
 
        length = myNextRxDesc->descr.hw_len - 4;
-       np->stats.rx_bytes += length;
+       dev->stats.rx_bytes += length;
 
 #ifdef ETHDEBUG
        printk("Got a packet of length %d:\n", length);
@@ -1268,7 +1266,7 @@ e100_rx(struct net_device *dev)
                /* Small packet, copy data */
                skb = dev_alloc_skb(length - ETHER_HEAD_LEN);
                if (!skb) {
-                       np->stats.rx_errors++;
+                       dev->stats.rx_errors++;
                        printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
                        goto update_nextrxdesc;
                }
@@ -1294,7 +1292,7 @@ e100_rx(struct net_device *dev)
                int align;
                struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
                if (!new_skb) {
-                       np->stats.rx_errors++;
+                       dev->stats.rx_errors++;
                        printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
                        goto update_nextrxdesc;
                }
@@ -1333,8 +1331,6 @@ e100_rx(struct net_device *dev)
 static int
 e100_close(struct net_device *dev)
 {
-       struct net_local *np = netdev_priv(dev);
-
        printk(KERN_INFO "Closing %s.\n", dev->name);
 
        netif_stop_queue(dev);
@@ -1366,8 +1362,8 @@ e100_close(struct net_device *dev)
 
        /* Update the statistics here. */
 
-       update_rx_stats(&np->stats);
-       update_tx_stats(&np->stats);
+       update_rx_stats(&dev->stats);
+       update_tx_stats(&dev->stats);
 
        /* Stop speed/duplex timers */
        del_timer(&speed_timer);
@@ -1545,11 +1541,11 @@ e100_get_stats(struct net_device *dev)
 
        spin_lock_irqsave(&lp->lock, flags);
 
-       update_rx_stats(&lp->stats);
-       update_tx_stats(&lp->stats);
+       update_rx_stats(&dev->stats);
+       update_tx_stats(&dev->stats);
 
        spin_unlock_irqrestore(&lp->lock, flags);
-       return &lp->stats;
+       return &dev->stats;
 }
 
 /*
index bb813d94aea8ef5ae11db63ce6ac795fd0a62c39..e97521c801ea6e4a90714bd2038d4c0610b8806a 100644 (file)
@@ -2408,7 +2408,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
                if (index < NEXACT_MAC)
                        ret++;
                else if (hash)
-                       *hash |= (1 << hash_mac_addr(addr[i]));
+                       *hash |= (1ULL << hash_mac_addr(addr[i]));
        }
        return ret;
 }
index 9246d2fa6cf9bfa835c34ded95b6c8febddebe77..f54af48edb939641eb1b4654328fdc46bb7b4769 100644 (file)
@@ -814,40 +814,48 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
 }
 
 /*
- * Collect up to maxaddrs worth of a netdevice's unicast addresses into an
- * array of addrss pointers and return the number collected.
+ * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
+ * at a specified offset within the list, into an array of addrss pointers and
+ * return the number collected.
  */
-static inline int collect_netdev_uc_list_addrs(const struct net_device *dev,
-                                              const u8 **addr,
-                                              unsigned int maxaddrs)
+static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
+                                                       const u8 **addr,
+                                                       unsigned int offset,
+                                                       unsigned int maxaddrs)
 {
+       unsigned int index = 0;
        unsigned int naddr = 0;
        const struct netdev_hw_addr *ha;
 
-       for_each_dev_addr(dev, ha) {
-               addr[naddr++] = ha->addr;
-               if (naddr >= maxaddrs)
-                       break;
-       }
+       for_each_dev_addr(dev, ha)
+               if (index++ >= offset) {
+                       addr[naddr++] = ha->addr;
+                       if (naddr >= maxaddrs)
+                               break;
+               }
        return naddr;
 }
 
 /*
- * Collect up to maxaddrs worth of a netdevice's multicast addresses into an
- * array of addrss pointers and return the number collected.
+ * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
+ * at a specified offset within the list, into an array of addrss pointers and
+ * return the number collected.
  */
-static inline int collect_netdev_mc_list_addrs(const struct net_device *dev,
-                                              const u8 **addr,
-                                              unsigned int maxaddrs)
+static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
+                                                       const u8 **addr,
+                                                       unsigned int offset,
+                                                       unsigned int maxaddrs)
 {
+       unsigned int index = 0;
        unsigned int naddr = 0;
        const struct netdev_hw_addr *ha;
 
-       netdev_for_each_mc_addr(ha, dev) {
-               addr[naddr++] = ha->addr;
-               if (naddr >= maxaddrs)
-                       break;
-       }
+       netdev_for_each_mc_addr(ha, dev)
+               if (index++ >= offset) {
+                       addr[naddr++] = ha->addr;
+                       if (naddr >= maxaddrs)
+                               break;
+               }
        return naddr;
 }
 
@@ -860,16 +868,20 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
        u64 mhash = 0;
        u64 uhash = 0;
        bool free = true;
-       u16 filt_idx[7];
+       unsigned int offset, naddr;
        const u8 *addr[7];
-       int ret, naddr = 0;
+       int ret;
        const struct port_info *pi = netdev_priv(dev);
 
        /* first do the secondary unicast addresses */
-       naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr));
-       if (naddr > 0) {
+       for (offset = 0; ; offset += naddr) {
+               naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
+                                                    ARRAY_SIZE(addr));
+               if (naddr == 0)
+                       break;
+
                ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
-                                         naddr, addr, filt_idx, &uhash, sleep);
+                                         naddr, addr, NULL, &uhash, sleep);
                if (ret < 0)
                        return ret;
 
@@ -877,12 +889,17 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
        }
 
        /* next set up the multicast addresses */
-       naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr));
-       if (naddr > 0) {
+       for (offset = 0; ; offset += naddr) {
+               naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
+                                                    ARRAY_SIZE(addr));
+               if (naddr == 0)
+                       break;
+
                ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
-                                         naddr, addr, filt_idx, &mhash, sleep);
+                                         naddr, addr, NULL, &mhash, sleep);
                if (ret < 0)
                        return ret;
+               free = false;
        }
 
        return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
index f7d7f976064b6cf6c888a524e34726147fb03728..35fc803a6a04074518add1b74a49a22df3442f8f 100644 (file)
@@ -1014,48 +1014,72 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
                        unsigned int naddr, const u8 **addr, u16 *idx,
                        u64 *hash, bool sleep_ok)
 {
-       int i, ret;
+       int offset, ret = 0;
+       unsigned nfilters = 0;
+       unsigned int rem = naddr;
        struct fw_vi_mac_cmd cmd, rpl;
-       struct fw_vi_mac_exact *p;
-       size_t len16;
 
-       if (naddr > ARRAY_SIZE(cmd.u.exact))
+       if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
                return -EINVAL;
-       len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
-                                     u.exact[naddr]), 16);
 
-       memset(&cmd, 0, sizeof(cmd));
-       cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
-                                    FW_CMD_REQUEST |
-                                    FW_CMD_WRITE |
-                                    (free ? FW_CMD_EXEC : 0) |
-                                    FW_VI_MAC_CMD_VIID(viid));
-       cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
-                                           FW_CMD_LEN16(len16));
+       for (offset = 0; offset < naddr; /**/) {
+               unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
+                                        ? rem
+                                        : ARRAY_SIZE(cmd.u.exact));
+               size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+                                                    u.exact[fw_naddr]), 16);
+               struct fw_vi_mac_exact *p;
+               int i;
 
-       for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) {
-               p->valid_to_idx =
-                       cpu_to_be16(FW_VI_MAC_CMD_VALID |
-                                   FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
-               memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
-       }
+               memset(&cmd, 0, sizeof(cmd));
+               cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
+                                            FW_CMD_REQUEST |
+                                            FW_CMD_WRITE |
+                                            (free ? FW_CMD_EXEC : 0) |
+                                            FW_VI_MAC_CMD_VIID(viid));
+               cmd.freemacs_to_len16 =
+                       cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
+                                   FW_CMD_LEN16(len16));
+
+               for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
+                       p->valid_to_idx = cpu_to_be16(
+                               FW_VI_MAC_CMD_VALID |
+                               FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+                       memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
+               }
+
+
+               ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
+                                       sleep_ok);
+               if (ret && ret != -ENOMEM)
+                       break;
 
-       ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok);
-       if (ret)
-               return ret;
-
-       for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) {
-               u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
-
-               if (idx)
-                       idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES
-                                 ? 0xffff
-                                 : index);
-               if (index < FW_CLS_TCAM_NUM_ENTRIES)
-                       ret++;
-               else if (hash)
-                       *hash |= (1 << hash_mac_addr(addr[i]));
+               for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
+                       u16 index = FW_VI_MAC_CMD_IDX_GET(
+                               be16_to_cpu(p->valid_to_idx));
+
+                       if (idx)
+                               idx[offset+i] =
+                                       (index >= FW_CLS_TCAM_NUM_ENTRIES
+                                        ? 0xffff
+                                        : index);
+                       if (index < FW_CLS_TCAM_NUM_ENTRIES)
+                               nfilters++;
+                       else if (hash)
+                               *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
+               }
+
+               free = false;
+               offset += fw_naddr;
+               rem -= fw_naddr;
        }
+
+       /*
+        * If there were no errors or we merely ran out of room in our MAC
+        * address arena, return the number of filters actually written.
+        */
+       if (ret == 0 || ret == -ENOMEM)
+               ret = nfilters;
        return ret;
 }
 
index dcb7f82c27017af001c0a567de84d732efbb717b..06c7d1c675175f561e0a596f7f72ceabced2b40a 100644 (file)
@@ -31,7 +31,7 @@
 
 char e1000_driver_name[] = "e1000";
 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#define DRV_VERSION "7.3.21-k6-NAPI"
+#define DRV_VERSION "7.3.21-k8-NAPI"
 const char e1000_driver_version[] = DRV_VERSION;
 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
 
@@ -485,9 +485,6 @@ void e1000_down(struct e1000_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        u32 rctl, tctl;
 
-       /* signal that we're down so the interrupt handler does not
-        * reschedule our watchdog timer */
-       set_bit(__E1000_DOWN, &adapter->flags);
 
        /* disable receives in the hardware */
        rctl = er32(RCTL);
@@ -508,6 +505,13 @@ void e1000_down(struct e1000_adapter *adapter)
 
        e1000_irq_disable(adapter);
 
+       /*
+        * Setting DOWN must be after irq_disable to prevent
+        * a screaming interrupt.  Setting DOWN also prevents
+        * timers and tasks from rescheduling.
+        */
+       set_bit(__E1000_DOWN, &adapter->flags);
+
        del_timer_sync(&adapter->tx_fifo_stall_timer);
        del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->phy_info_timer);
index 75b099ce49c9b47e361adc66da99d148b58640f3..1f37ee6b2a2626282fd5a772cc21f821321b9379 100644 (file)
@@ -261,6 +261,13 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
 
 }
 
+static int ehea_set_flags(struct net_device *dev, u32 data)
+{
+       return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO
+                                       | ETH_FLAG_TXVLAN
+                                       | ETH_FLAG_RXVLAN);
+}
+
 const struct ethtool_ops ehea_ethtool_ops = {
        .get_settings = ehea_get_settings,
        .get_drvinfo = ehea_get_drvinfo,
@@ -273,6 +280,8 @@ const struct ethtool_ops ehea_ethtool_ops = {
        .get_ethtool_stats = ehea_get_ethtool_stats,
        .get_rx_csum = ehea_get_rx_csum,
        .set_settings = ehea_set_settings,
+       .get_flags = ethtool_op_get_flags,
+       .set_flags = ehea_set_flags,
        .nway_reset = ehea_nway_reset,          /* Restart autonegotiation */
 };
 
index a84c389d3db71a78d47d2a3b0c54a0f15736c53c..69f61523fcc8483eed3d1d5b27de515316b824ef 100644 (file)
@@ -400,6 +400,7 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
                        skb_arr_rq1[index] = netdev_alloc_skb(dev,
                                                              EHEA_L_PKT_SIZE);
                        if (!skb_arr_rq1[index]) {
+                               ehea_info("Unable to allocate enough skb in the array\n");
                                pr->rq1_skba.os_skbs = fill_wqes - i;
                                break;
                        }
@@ -422,13 +423,20 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
        struct net_device *dev = pr->port->netdev;
        int i;
 
-       for (i = 0; i < pr->rq1_skba.len; i++) {
+       if (nr_rq1a > pr->rq1_skba.len) {
+               ehea_error("NR_RQ1A bigger than skb array len\n");
+               return;
+       }
+
+       for (i = 0; i < nr_rq1a; i++) {
                skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
-               if (!skb_arr_rq1[i])
+               if (!skb_arr_rq1[i]) {
+                       ehea_info("No enough memory to allocate skb array\n");
                        break;
+               }
        }
        /* Ring doorbell */
-       ehea_update_rq1a(pr->qp, nr_rq1a);
+       ehea_update_rq1a(pr->qp, i);
 }
 
 static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -675,7 +683,7 @@ static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
        int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
                              pr->port->vgrp);
 
-       if (use_lro) {
+       if (skb->dev->features & NETIF_F_LRO) {
                if (vlan_extracted)
                        lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
                                                     pr->port->vgrp,
@@ -735,8 +743,10 @@ static int ehea_proc_rwqes(struct net_device *dev,
 
                                        skb = netdev_alloc_skb(dev,
                                                               EHEA_L_PKT_SIZE);
-                                       if (!skb)
+                                       if (!skb) {
+                                               ehea_info("Not enough memory to allocate skb\n");
                                                break;
+                                       }
                                }
                                skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
                                                 cqe->num_bytes_transfered - 4);
@@ -777,7 +787,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
                }
                cqe = ehea_poll_rq1(qp, &wqe_index);
        }
-       if (use_lro)
+       if (dev->features & NETIF_F_LRO)
                lro_flush_all(&pr->lro_mgr);
 
        pr->rx_packets += processed;
@@ -3266,6 +3276,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
                      | NETIF_F_LLTX;
        dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
 
+       if (use_lro)
+               dev->features |= NETIF_F_LRO;
+
        INIT_WORK(&port->reset_task, ehea_reset_port);
 
        ret = register_netdev(dev);
index ab9f675c5b8b8b86473d07b60b3ad933523b3ab5..fe337bd121aa74d3eb60cf866e31b8f76fa84573 100644 (file)
@@ -104,6 +104,8 @@ static void ri_tasklet(unsigned long dev)
                        rcu_read_unlock();
                        dev_kfree_skb(skb);
                        stats->tx_dropped++;
+                       if (skb_queue_len(&dp->tq) != 0)
+                               goto resched;
                        break;
                }
                rcu_read_unlock();
index 00b38bccd6d0fd51f19cfe383f71a1f5952d6a43..52a7c86af66347c388d24d29b043fe52e6a2ca5f 100644 (file)
@@ -258,7 +258,7 @@ static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate)
 
        /* Baud Rate Error Correction x 10000 */
        u32 rate_err_array[] = {
-               0000, 0625, 1250, 1875,
+                  0,  625, 1250, 1875,
                2500, 3125, 3750, 4375,
                5000, 5625, 6250, 6875,
                7500, 8125, 8750, 9375,
index 385ccebb826cbe65e5216a5ecc83674844140369..6827dddc383e0f99334f9e76b131695ba6a58773 100644 (file)
@@ -96,6 +96,8 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
 static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
 {
        s32 ret_val = 0;
+       u32 reg_anlp1 = 0;
+       u32 i = 0;
        u16 list_offset, data_offset, data_value;
 
        if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
@@ -122,14 +124,34 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
                        IXGBE_WRITE_FLUSH(hw);
                        hw->eeprom.ops.read(hw, ++data_offset, &data_value);
                }
-               /* Now restart DSP by setting Restart_AN */
-               IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
-                   (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
 
                /* Release the semaphore */
                ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
                /* Delay obtaining semaphore again to allow FW access */
                msleep(hw->eeprom.semaphore_delay);
+
+               /* Now restart DSP by setting Restart_AN and clearing LMS */
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
+                               IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
+                               IXGBE_AUTOC_AN_RESTART));
+
+               /* Wait for AN to leave state 0 */
+               for (i = 0; i < 10; i++) {
+                       msleep(4);
+                       reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+                       if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
+                               break;
+               }
+               if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
+                       hw_dbg(hw, "sfp module setup not complete\n");
+                       ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+                       goto setup_sfp_out;
+               }
+
+               /* Restart DSP by setting Restart_AN and return to SFI mode */
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
+                               IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
+                               IXGBE_AUTOC_AN_RESTART));
        }
 
 setup_sfp_out:
index f97353cdb60727eae424b0382e77afa5171eaf40..fdb35d040d23249c077449c5ad603ef6ea63bbc1 100644 (file)
@@ -3800,7 +3800,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
        /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
        if (hw->mac.ops.enable_tx_laser &&
            ((hw->phy.multispeed_fiber) ||
-            ((hw->phy.type == ixgbe_media_type_fiber) &&
+            ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
              (hw->mac.type == ixgbe_mac_82599EB))))
                hw->mac.ops.enable_tx_laser(hw);
 
@@ -4122,7 +4122,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
        /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
        if (hw->mac.ops.disable_tx_laser &&
            ((hw->phy.multispeed_fiber) ||
-            ((hw->phy.type == ixgbe_media_type_fiber) &&
+            ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
              (hw->mac.type == ixgbe_mac_82599EB))))
                hw->mac.ops.disable_tx_laser(hw);
 
@@ -4987,6 +4987,9 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
                adapter->rx_ring[i] = NULL;
        }
 
+       adapter->num_tx_queues = 0;
+       adapter->num_rx_queues = 0;
+
        ixgbe_free_q_vectors(adapter);
        ixgbe_reset_interrupt_capability(adapter);
 }
@@ -7212,7 +7215,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
        if (hw->mac.ops.disable_tx_laser &&
            ((hw->phy.multispeed_fiber) ||
-            ((hw->phy.type == ixgbe_media_type_fiber) &&
+            ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
              (hw->mac.type == ixgbe_mac_82599EB))))
                hw->mac.ops.disable_tx_laser(hw);
 
index ef816dd5a8f01ac9356d1f10d4aa3ddbc57cae28..0f80893edabf67326b3b574a2e3f517744d8bae7 100644 (file)
 #define IXGBE_ANLP1_PAUSE               0x0C00
 #define IXGBE_ANLP1_SYM_PAUSE           0x0400
 #define IXGBE_ANLP1_ASM_PAUSE           0x0800
+#define IXGBE_ANLP1_AN_STATE_MASK       0x000f0000
+
 
 /* SW Semaphore Register bitmasks */
 #define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
@@ -2641,6 +2643,7 @@ struct ixgbe_info {
 #define IXGBE_ERR_NO_SPACE                      -25
 #define IXGBE_ERR_OVERTEMP                      -26
 #define IXGBE_ERR_RAR_INDEX                     -27
+#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE        -30
 #define IXGBE_ERR_PBA_SECTION                   -31
 #define IXGBE_ERR_INVALID_ARGUMENT              -32
 #define IXGBE_NOT_IMPLEMENTED                   0x7FFFFFFF
index c57d9a43cecacbf8a668a0c4b1cca5e7b93c3521..2411e72ba572d00f6a76646e0f7a34da500f7e53 100644 (file)
@@ -2076,12 +2076,11 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
        }
 
        if (new_mtu > 1900) {
-               netdev->features &= ~(NETIF_F_HW_CSUM |
-                               NETIF_F_TSO |
-                               NETIF_F_TSO6);
+               netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                               NETIF_F_TSO | NETIF_F_TSO6);
        } else {
                if (test_bit(JME_FLAG_TXCSUM, &jme->flags))
-                       netdev->features |= NETIF_F_HW_CSUM;
+                       netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
                if (test_bit(JME_FLAG_TSO, &jme->flags))
                        netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
        }
@@ -2514,10 +2513,12 @@ jme_set_tx_csum(struct net_device *netdev, u32 on)
        if (on) {
                set_bit(JME_FLAG_TXCSUM, &jme->flags);
                if (netdev->mtu <= 1900)
-                       netdev->features |= NETIF_F_HW_CSUM;
+                       netdev->features |=
+                               NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
        } else {
                clear_bit(JME_FLAG_TXCSUM, &jme->flags);
-               netdev->features &= ~NETIF_F_HW_CSUM;
+               netdev->features &=
+                               ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
        }
 
        return 0;
@@ -2797,7 +2798,8 @@ jme_init_one(struct pci_dev *pdev,
        netdev->netdev_ops = &jme_netdev_ops;
        netdev->ethtool_ops             = &jme_ethtool_ops;
        netdev->watchdog_timeo          = TX_TIMEOUT;
-       netdev->features                =       NETIF_F_HW_CSUM |
+       netdev->features                =       NETIF_F_IP_CSUM |
+                                               NETIF_F_IPV6_CSUM |
                                                NETIF_F_SG |
                                                NETIF_F_TSO |
                                                NETIF_F_TSO6 |
index c8cc32c0edc9678d6174693412e977e43a4ebf29..c8c873b31a899a47ae002826efff438893646178 100644 (file)
@@ -468,18 +468,6 @@ static int pch_gbe_set_rx_csum(struct net_device *netdev, u32 data)
        return 0;
 }
 
-/**
- * pch_gbe_get_tx_csum - Report whether transmit checksums are turned on or off
- * @netdev:  Network interface device structure
- * Returns
- *     true(1):  Checksum On
- *     false(0): Checksum Off
- */
-static u32 pch_gbe_get_tx_csum(struct net_device *netdev)
-{
-       return (netdev->features & NETIF_F_HW_CSUM) != 0;
-}
-
 /**
  * pch_gbe_set_tx_csum - Turn transmit checksums on or off
  * @netdev: Network interface device structure
@@ -493,11 +481,7 @@ static int pch_gbe_set_tx_csum(struct net_device *netdev, u32 data)
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
        adapter->tx_csum = data;
-       if (data)
-               netdev->features |= NETIF_F_HW_CSUM;
-       else
-               netdev->features &= ~NETIF_F_HW_CSUM;
-       return 0;
+       return ethtool_op_set_tx_ipv6_csum(netdev, data);
 }
 
 /**
@@ -572,7 +556,6 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = {
        .set_pauseparam = pch_gbe_set_pauseparam,
        .get_rx_csum = pch_gbe_get_rx_csum,
        .set_rx_csum = pch_gbe_set_rx_csum,
-       .get_tx_csum = pch_gbe_get_tx_csum,
        .set_tx_csum = pch_gbe_set_tx_csum,
        .get_strings = pch_gbe_get_strings,
        .get_ethtool_stats = pch_gbe_get_ethtool_stats,
index afb75066b14d1cce3505963d74d4f0b26d83f8fe..d7355306a738fbbe78b7848423ea203a6fd5c444 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
  *
  * This code was derived from the Intel e1000e Linux driver.
  *
@@ -2319,7 +2319,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
        netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
        netif_napi_add(netdev, &adapter->napi,
                       pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
-       netdev->features = NETIF_F_HW_CSUM | NETIF_F_GRO;
+       netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
        pch_gbe_set_ethtool_ops(netdev);
 
        pch_gbe_mac_reset_hw(&adapter->hw);
@@ -2358,9 +2358,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
        pch_gbe_check_options(adapter);
 
        if (adapter->tx_csum)
-               netdev->features |= NETIF_F_HW_CSUM;
+               netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
        else
-               netdev->features &= ~NETIF_F_HW_CSUM;
+               netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
 
        /* initialize the wol settings based on the eeprom settings */
        adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
@@ -2462,8 +2462,8 @@ static void __exit pch_gbe_exit_module(void)
 module_init(pch_gbe_init_module);
 module_exit(pch_gbe_exit_module);
 
-MODULE_DESCRIPTION("OKI semiconductor PCH Gigabit ethernet Driver");
-MODULE_AUTHOR("OKI semiconductor, <masa-korg@dsn.okisemi.com>");
+MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
+MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
index 2510146fc5607db7a26d9b54ad634f8941d65ae2..ef0996a0eaaaa9e39e84d183cbc074be9bff5080 100644 (file)
@@ -434,8 +434,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
                        .err  = "using default of "
                                __MODULE_STRING(PCH_GBE_DEFAULT_TXD),
                        .def  = PCH_GBE_DEFAULT_TXD,
-                       .arg  = { .r = { .min = PCH_GBE_MIN_TXD } },
-                       .arg  = { .r = { .max = PCH_GBE_MAX_TXD } }
+                       .arg  = { .r = { .min = PCH_GBE_MIN_TXD,
+                                        .max = PCH_GBE_MAX_TXD } }
                };
                struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
                tx_ring->count = TxDescriptors;
@@ -450,8 +450,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
                        .err  = "using default of "
                                __MODULE_STRING(PCH_GBE_DEFAULT_RXD),
                        .def  = PCH_GBE_DEFAULT_RXD,
-                       .arg  = { .r = { .min = PCH_GBE_MIN_RXD } },
-                       .arg  = { .r = { .max = PCH_GBE_MAX_RXD } }
+                       .arg  = { .r = { .min = PCH_GBE_MIN_RXD,
+                                        .max = PCH_GBE_MAX_RXD } }
                };
                struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
                rx_ring->count = RxDescriptors;
index f0bd1a1aba3ab3413a16eab296b4881d99ea1145..e8b9c53c304b63d2ba2b4a530504e5fafb08f6dc 100644 (file)
 #include <linux/ethtool.h>
 #include <linux/phy.h>
 #include <linux/marvell_phy.h>
+#include <linux/of.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/uaccess.h>
 
+#define MII_MARVELL_PHY_PAGE           22
+
 #define MII_M1011_IEVENT               0x13
 #define MII_M1011_IEVENT_CLEAR         0x0000
 
@@ -80,7 +83,6 @@
 #define MII_88E1121_PHY_LED_CTRL       16
 #define MII_88E1121_PHY_LED_PAGE       3
 #define MII_88E1121_PHY_LED_DEF                0x0030
-#define MII_88E1121_PHY_PAGE           22
 
 #define MII_M1011_PHY_STATUS           0x11
 #define MII_M1011_PHY_STATUS_1000      0x8000
@@ -186,13 +188,94 @@ static int marvell_config_aneg(struct phy_device *phydev)
        return 0;
 }
 
+#ifdef CONFIG_OF_MDIO
+/*
+ * Set and/or override some configuration registers based on the
+ * marvell,reg-init property stored in the of_node for the phydev.
+ *
+ * marvell,reg-init = <reg-page reg mask value>,...;
+ *
+ * There may be one or more sets of <reg-page reg mask value>:
+ *
+ * reg-page: which register bank to use.
+ * reg: the register.
+ * mask: if non-zero, ANDed with existing register value.
+ * value: ORed with the masked value and written to the regiser.
+ *
+ */
+static int marvell_of_reg_init(struct phy_device *phydev)
+{
+       const __be32 *paddr;
+       int len, i, saved_page, current_page, page_changed, ret;
+
+       if (!phydev->dev.of_node)
+               return 0;
+
+       paddr = of_get_property(phydev->dev.of_node, "marvell,reg-init", &len);
+       if (!paddr || len < (4 * sizeof(*paddr)))
+               return 0;
+
+       saved_page = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+       if (saved_page < 0)
+               return saved_page;
+       page_changed = 0;
+       current_page = saved_page;
+
+       ret = 0;
+       len /= sizeof(*paddr);
+       for (i = 0; i < len - 3; i += 4) {
+               u16 reg_page = be32_to_cpup(paddr + i);
+               u16 reg = be32_to_cpup(paddr + i + 1);
+               u16 mask = be32_to_cpup(paddr + i + 2);
+               u16 val_bits = be32_to_cpup(paddr + i + 3);
+               int val;
+
+               if (reg_page != current_page) {
+                       current_page = reg_page;
+                       page_changed = 1;
+                       ret = phy_write(phydev, MII_MARVELL_PHY_PAGE, reg_page);
+                       if (ret < 0)
+                               goto err;
+               }
+
+               val = 0;
+               if (mask) {
+                       val = phy_read(phydev, reg);
+                       if (val < 0) {
+                               ret = val;
+                               goto err;
+                       }
+                       val &= mask;
+               }
+               val |= val_bits;
+
+               ret = phy_write(phydev, reg, val);
+               if (ret < 0)
+                       goto err;
+
+       }
+err:
+       if (page_changed) {
+               i = phy_write(phydev, MII_MARVELL_PHY_PAGE, saved_page);
+               if (ret == 0)
+                       ret = i;
+       }
+       return ret;
+}
+#else
+static int marvell_of_reg_init(struct phy_device *phydev)
+{
+       return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
 static int m88e1121_config_aneg(struct phy_device *phydev)
 {
        int err, oldpage, mscr;
 
-       oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+       oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
 
-       err = phy_write(phydev, MII_88E1121_PHY_PAGE,
+       err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
                        MII_88E1121_PHY_MSCR_PAGE);
        if (err < 0)
                return err;
@@ -218,7 +301,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
                        return err;
        }
 
-       phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+       phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
 
        err = phy_write(phydev, MII_BMCR, BMCR_RESET);
        if (err < 0)
@@ -229,11 +312,11 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
        if (err < 0)
                return err;
 
-       oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+       oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
 
-       phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
+       phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
        phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
-       phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+       phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
 
        err = genphy_config_aneg(phydev);
 
@@ -244,9 +327,9 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
 {
        int err, oldpage, mscr;
 
-       oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+       oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
 
-       err = phy_write(phydev, MII_88E1121_PHY_PAGE,
+       err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
                        MII_88E1121_PHY_MSCR_PAGE);
        if (err < 0)
                return err;
@@ -258,7 +341,7 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
        if (err < 0)
                return err;
 
-       err = phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+       err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
        if (err < 0)
                return err;
 
@@ -368,6 +451,9 @@ static int m88e1111_config_init(struct phy_device *phydev)
                        return err;
        }
 
+       err = marvell_of_reg_init(phydev);
+       if (err < 0)
+               return err;
 
        err = phy_write(phydev, MII_BMCR, BMCR_RESET);
        if (err < 0)
@@ -398,7 +484,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
        int err;
 
        /* Change address */
-       err = phy_write(phydev, 0x16, 0x0002);
+       err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
        if (err < 0)
                return err;
 
@@ -408,7 +494,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
                return err;
 
        /* Change address */
-       err = phy_write(phydev, 0x16, 0x0003);
+       err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0003);
        if (err < 0)
                return err;
 
@@ -420,8 +506,42 @@ static int m88e1118_config_init(struct phy_device *phydev)
        if (err < 0)
                return err;
 
+       err = marvell_of_reg_init(phydev);
+       if (err < 0)
+               return err;
+
        /* Reset address */
-       err = phy_write(phydev, 0x16, 0x0);
+       err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
+       if (err < 0)
+               return err;
+
+       err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static int m88e1149_config_init(struct phy_device *phydev)
+{
+       int err;
+
+       /* Change address */
+       err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
+       if (err < 0)
+               return err;
+
+       /* Enable 1000 Mbit */
+       err = phy_write(phydev, 0x15, 0x1048);
+       if (err < 0)
+               return err;
+
+       err = marvell_of_reg_init(phydev);
+       if (err < 0)
+               return err;
+
+       /* Reset address */
+       err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
        if (err < 0)
                return err;
 
@@ -491,6 +611,10 @@ static int m88e1145_config_init(struct phy_device *phydev)
                }
        }
 
+       err = marvell_of_reg_init(phydev);
+       if (err < 0)
+               return err;
+
        return 0;
 }
 
@@ -684,6 +808,19 @@ static struct phy_driver marvell_drivers[] = {
                .config_intr = &marvell_config_intr,
                .driver = { .owner = THIS_MODULE },
        },
+       {
+               .phy_id = MARVELL_PHY_ID_88E1149R,
+               .phy_id_mask = MARVELL_PHY_ID_MASK,
+               .name = "Marvell 88E1149R",
+               .features = PHY_GBIT_FEATURES,
+               .flags = PHY_HAS_INTERRUPT,
+               .config_init = &m88e1149_config_init,
+               .config_aneg = &m88e1118_config_aneg,
+               .read_status = &genphy_read_status,
+               .ack_interrupt = &marvell_ack_interrupt,
+               .config_intr = &marvell_config_intr,
+               .driver = { .owner = THIS_MODULE },
+       },
        {
                .phy_id = MARVELL_PHY_ID_88E1240,
                .phy_id_mask = MARVELL_PHY_ID_MASK,
@@ -735,6 +872,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
        { 0x01410e10, 0xfffffff0 },
        { 0x01410cb0, 0xfffffff0 },
        { 0x01410cd0, 0xfffffff0 },
+       { 0x01410e50, 0xfffffff0 },
        { 0x01410e30, 0xfffffff0 },
        { 0x01410e90, 0xfffffff0 },
        { }
index 0c91598ae2806377aa901175afe75b2632ed9a3a..b708f68471a61b4b63bf06ed51886418c332ec23 100644 (file)
@@ -2580,16 +2580,16 @@ ppp_create_interface(struct net *net, int unit, int *retp)
         */
        dev_net_set(dev, net);
 
-       ret = -EEXIST;
        mutex_lock(&pn->all_ppp_mutex);
 
        if (unit < 0) {
                unit = unit_get(&pn->units_idr, ppp);
                if (unit < 0) {
-                       *retp = unit;
+                       ret = unit;
                        goto out2;
                }
        } else {
+               ret = -EEXIST;
                if (unit_find(&pn->units_idr, unit))
                        goto out2; /* unit already exists */
                /*
@@ -2664,10 +2664,10 @@ static void ppp_shutdown_interface(struct ppp *ppp)
                ppp->closing = 1;
                ppp_unlock(ppp);
                unregister_netdev(ppp->dev);
+               unit_put(&pn->units_idr, ppp->file.index);
        } else
                ppp_unlock(ppp);
 
-       unit_put(&pn->units_idr, ppp->file.index);
        ppp->file.dead = 1;
        ppp->owner = NULL;
        wake_up_interruptible(&ppp->file.rwait);
@@ -2855,8 +2855,7 @@ static void __exit ppp_cleanup(void)
  * by holding all_ppp_mutex
  */
 
-/* associate pointer with specified number */
-static int unit_set(struct idr *p, void *ptr, int n)
+static int __unit_alloc(struct idr *p, void *ptr, int n)
 {
        int unit, err;
 
@@ -2867,10 +2866,24 @@ again:
        }
 
        err = idr_get_new_above(p, ptr, n, &unit);
-       if (err == -EAGAIN)
-               goto again;
+       if (err < 0) {
+               if (err == -EAGAIN)
+                       goto again;
+               return err;
+       }
+
+       return unit;
+}
+
+/* associate pointer with specified number */
+static int unit_set(struct idr *p, void *ptr, int n)
+{
+       int unit;
 
-       if (unit != n) {
+       unit = __unit_alloc(p, ptr, n);
+       if (unit < 0)
+               return unit;
+       else if (unit != n) {
                idr_remove(p, unit);
                return -EINVAL;
        }
@@ -2881,19 +2894,7 @@ again:
 /* get new free unit number and associate pointer with it */
 static int unit_get(struct idr *p, void *ptr)
 {
-       int unit, err;
-
-again:
-       if (!idr_pre_get(p, GFP_KERNEL)) {
-               printk(KERN_ERR "PPP: No free memory for idr\n");
-               return -ENOMEM;
-       }
-
-       err = idr_get_new_above(p, ptr, 0, &unit);
-       if (err == -EAGAIN)
-               goto again;
-
-       return unit;
+       return __unit_alloc(p, ptr, 0);
 }
 
 /* put unit number back to a pool */
index d9a76260880b6d8e8c21ba815ba42f7ed04fdece..e4dbbbfec7232e9f75df168c50aaf106a3efe1e5 100644 (file)
@@ -62,15 +62,15 @@ static const u32 default_msg =
 /* NETIF_MSG_PKTDATA | */
     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
 
-static int debug = 0x00007fff; /* defaults above */
-module_param(debug, int, 0);
+static int debug = -1; /* defaults above */
+module_param(debug, int, 0664);
 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 
 #define MSIX_IRQ 0
 #define MSI_IRQ 1
 #define LEG_IRQ 2
 static int qlge_irq_type = MSIX_IRQ;
-module_param(qlge_irq_type, int, MSIX_IRQ);
+module_param(qlge_irq_type, int, 0664);
 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
 
 static int qlge_mpi_coredump;
index 417adf372828bca69bcc3fd8c274e29fdfb6ac8b..76290a8c3c146f8eec24bd8e93cf201469c1082a 100644 (file)
@@ -1449,7 +1449,8 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
        dev->irq = pdev->irq;
 
        /* faked with skb_copy_and_csum_dev */
-       dev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
+       dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
+               NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 
        dev->netdev_ops         = &sc92031_netdev_ops;
        dev->watchdog_timeo     = TX_TIMEOUT;
index f3e4043d70eee852a98d7fe7eddc8bef1e219a6f..2166c1d0a5332dfe47daa8c8af33520b9d4c7b88 100644 (file)
@@ -196,7 +196,9 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
 
 static void efx_remove_channels(struct efx_nic *efx);
 static void efx_remove_port(struct efx_nic *efx);
+static void efx_init_napi(struct efx_nic *efx);
 static void efx_fini_napi(struct efx_nic *efx);
+static void efx_fini_napi_channel(struct efx_channel *channel);
 static void efx_fini_struct(struct efx_nic *efx);
 static void efx_start_all(struct efx_nic *efx);
 static void efx_stop_all(struct efx_nic *efx);
@@ -334,8 +336,10 @@ void efx_process_channel_now(struct efx_channel *channel)
 
        /* Disable interrupts and wait for ISRs to complete */
        efx_nic_disable_interrupts(efx);
-       if (efx->legacy_irq)
+       if (efx->legacy_irq) {
                synchronize_irq(efx->legacy_irq);
+               efx->legacy_irq_enabled = false;
+       }
        if (channel->irq)
                synchronize_irq(channel->irq);
 
@@ -350,6 +354,8 @@ void efx_process_channel_now(struct efx_channel *channel)
        efx_channel_processed(channel);
 
        napi_enable(&channel->napi_str);
+       if (efx->legacy_irq)
+               efx->legacy_irq_enabled = true;
        efx_nic_enable_interrupts(efx);
 }
 
@@ -425,6 +431,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
 
                *channel = *old_channel;
 
+               channel->napi_dev = NULL;
                memset(&channel->eventq, 0, sizeof(channel->eventq));
 
                rx_queue = &channel->rx_queue;
@@ -735,9 +742,13 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
        if (rc)
                goto rollback;
 
+       efx_init_napi(efx);
+
        /* Destroy old channels */
-       for (i = 0; i < efx->n_channels; i++)
+       for (i = 0; i < efx->n_channels; i++) {
+               efx_fini_napi_channel(other_channel[i]);
                efx_remove_channel(other_channel[i]);
+       }
 out:
        /* Free unused channel structures */
        for (i = 0; i < efx->n_channels; i++)
@@ -1401,6 +1412,8 @@ static void efx_start_all(struct efx_nic *efx)
                efx_start_channel(channel);
        }
 
+       if (efx->legacy_irq)
+               efx->legacy_irq_enabled = true;
        efx_nic_enable_interrupts(efx);
 
        /* Switch to event based MCDI completions after enabling interrupts.
@@ -1461,8 +1474,10 @@ static void efx_stop_all(struct efx_nic *efx)
 
        /* Disable interrupts and wait for ISR to complete */
        efx_nic_disable_interrupts(efx);
-       if (efx->legacy_irq)
+       if (efx->legacy_irq) {
                synchronize_irq(efx->legacy_irq);
+               efx->legacy_irq_enabled = false;
+       }
        efx_for_each_channel(channel, efx) {
                if (channel->irq)
                        synchronize_irq(channel->irq);
@@ -1594,7 +1609,7 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
  *
  **************************************************************************/
 
-static int efx_init_napi(struct efx_nic *efx)
+static void efx_init_napi(struct efx_nic *efx)
 {
        struct efx_channel *channel;
 
@@ -1603,18 +1618,21 @@ static int efx_init_napi(struct efx_nic *efx)
                netif_napi_add(channel->napi_dev, &channel->napi_str,
                               efx_poll, napi_weight);
        }
-       return 0;
+}
+
+static void efx_fini_napi_channel(struct efx_channel *channel)
+{
+       if (channel->napi_dev)
+               netif_napi_del(&channel->napi_str);
+       channel->napi_dev = NULL;
 }
 
 static void efx_fini_napi(struct efx_nic *efx)
 {
        struct efx_channel *channel;
 
-       efx_for_each_channel(channel, efx) {
-               if (channel->napi_dev)
-                       netif_napi_del(&channel->napi_str);
-               channel->napi_dev = NULL;
-       }
+       efx_for_each_channel(channel, efx)
+               efx_fini_napi_channel(channel);
 }
 
 /**************************************************************************
@@ -2331,9 +2349,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
        if (rc)
                goto fail1;
 
-       rc = efx_init_napi(efx);
-       if (rc)
-               goto fail2;
+       efx_init_napi(efx);
 
        rc = efx->type->init(efx);
        if (rc) {
@@ -2364,7 +2380,6 @@ static int efx_pci_probe_main(struct efx_nic *efx)
        efx->type->fini(efx);
  fail3:
        efx_fini_napi(efx);
- fail2:
        efx_remove_all(efx);
  fail1:
        return rc;
index 0d19fbfc5c2c08e916c6bb74116c004ea0216051..4c12332434b71fc0ec44995ee92f4295198eafa0 100644 (file)
@@ -621,6 +621,7 @@ struct efx_filter_state;
  * @pci_dev: The PCI device
  * @type: Controller type attributes
  * @legacy_irq: IRQ number
+ * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
  * @workqueue: Workqueue for port reconfigures and the HW monitor.
  *     Work items do not hold and must not acquire RTNL.
  * @workqueue_name: Name of workqueue
@@ -702,6 +703,7 @@ struct efx_nic {
        struct pci_dev *pci_dev;
        const struct efx_nic_type *type;
        int legacy_irq;
+       bool legacy_irq_enabled;
        struct workqueue_struct *workqueue;
        char workqueue_name[16];
        struct work_struct reset_work;
index 9743cff15130ff044c98e6917ce58935e0e983af..399b12abe2fd88a4b3b624caa4452a686bd986d9 100644 (file)
@@ -1380,6 +1380,12 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
        u32 queues;
        int syserr;
 
+       /* Could this be ours?  If interrupts are disabled then the
+        * channel state may not be valid.
+        */
+       if (!efx->legacy_irq_enabled)
+               return result;
+
        /* Read the ISR which also ACKs the interrupts */
        efx_readd(efx, &reg, FR_BZ_INT_ISR0);
        queues = EFX_EXTRACT_DWORD(reg, 0, 31);
index f2695fd180ca08531cb857c51e91ff7ca0223237..fd719edc7f7c1842682c14e668d448af3c75361c 100644 (file)
@@ -197,16 +197,6 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
        }
 }
 
-static int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
-{
-       if (data)
-               netdev->features |= NETIF_F_HW_CSUM;
-       else
-               netdev->features &= ~NETIF_F_HW_CSUM;
-
-       return 0;
-}
-
 static u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
@@ -370,7 +360,7 @@ static struct ethtool_ops stmmac_ethtool_ops = {
        .get_link = ethtool_op_get_link,
        .get_rx_csum = stmmac_ethtool_get_rx_csum,
        .get_tx_csum = ethtool_op_get_tx_csum,
-       .set_tx_csum = stmmac_ethtool_set_tx_csum,
+       .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
        .get_sg = ethtool_op_get_sg,
        .set_sg = ethtool_op_set_sg,
        .get_pauseparam = stmmac_get_pauseparam,
index 730a6fd79ee005a0f7c452653af678ff23930ac0..c0dc78571c628f8b8ecb0afd8f175eeb679dddba 100644 (file)
@@ -1494,7 +1494,8 @@ static int stmmac_probe(struct net_device *dev)
        dev->netdev_ops = &stmmac_netdev_ops;
        stmmac_set_ethtool_ops(dev);
 
-       dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA);
+       dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA |
+               NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
        dev->watchdog_timeo = msecs_to_jiffies(watchdog);
 #ifdef STMMAC_VLAN_TAG_USED
        /* Both mac100 and gmac support receive VLAN tag detection */
@@ -1516,6 +1517,8 @@ static int stmmac_probe(struct net_device *dev)
                pr_warning("\tno valid MAC address;"
                        "please, use ifconfig or nwhwconfig!\n");
 
+       spin_lock_init(&priv->lock);
+
        ret = register_netdev(dev);
        if (ret) {
                pr_err("%s: ERROR %i registering the device\n",
@@ -1525,9 +1528,7 @@ static int stmmac_probe(struct net_device *dev)
 
        DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
            dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
-           (dev->features & NETIF_F_HW_CSUM) ? "on" : "off");
-
-       spin_lock_init(&priv->lock);
+           (dev->features & NETIF_F_IP_CSUM) ? "on" : "off");
 
        return ret;
 }
index a9f7d5d1a2695f95a968001a93c1449b9ac998be..7064e035757a16b927d35aa61991268089275744 100644 (file)
@@ -688,9 +688,6 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
 
        DMFE_DBUG(0, "dmfe_start_xmit", 0);
 
-       /* Resource flag check */
-       netif_stop_queue(dev);
-
        /* Too large packet check */
        if (skb->len > MAX_PACKET_SIZE) {
                pr_err("big packet = %d\n", (u16)skb->len);
@@ -698,6 +695,9 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
 
+       /* Resource flag check */
+       netif_stop_queue(dev);
+
        spin_lock_irqsave(&db->lock, flags);
 
        /* No Tx resource check, it never happen nromally */
index 05a95586f3c52f76ead1b26bb6545b3a18b1a076..055b87ab4f075b66afb7cc4d33c58b440d4f83be 100644 (file)
@@ -899,7 +899,8 @@ struct ucc_geth_hardware_statistics {
 #define UCC_GETH_UTFS_INIT                      512    /* Tx virtual FIFO size
                                                         */
 #define UCC_GETH_UTFET_INIT                     256    /* 1/2 utfs */
-#define UCC_GETH_UTFTT_INIT                     512
+#define UCC_GETH_UTFTT_INIT                     256    /* 1/2 utfs
+                                                          due to errata */
 /* Gigabit Ethernet (1000 Mbps) */
 #define UCC_GETH_URFS_GIGA_INIT                 4096/*2048*/   /* Rx virtual
                                                                   FIFO size */
index 52ffabe6db0eac43cc39a14ece7326f1d686b5aa..6f600cced6e1f5e10d3002fba267f052e6b05544 100644 (file)
@@ -196,6 +196,25 @@ config USB_NET_CDC_EEM
          IEEE 802 "local assignment" bit is set in the address, a "usbX"
          name is used instead.
 
+config USB_NET_CDC_NCM
+       tristate "CDC NCM support"
+       depends on USB_USBNET
+       default y
+       help
+         This driver provides support for CDC NCM (Network Control Model
+         Device USB Class Specification). The CDC NCM specification is
+         available from <http://www.usb.org/>.
+
+         Say "y" to link the driver statically, or "m" to build a
+         dynamically linked module.
+
+         This driver should work with at least the following devices:
+           * ST-Ericsson M700 LTE FDD/TDD Mobile Broadband Modem (ref. design)
+           * ST-Ericsson M5730 HSPA+ Mobile Broadband Modem (reference design)
+           * ST-Ericsson M570 HSPA+ Mobile Broadband Modem (reference design)
+           * ST-Ericsson M343 HSPA Mobile Broadband Modem (reference design)
+           * Ericsson F5521gw Mobile Broadband Module
+
 config USB_NET_DM9601
        tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
        depends on USB_USBNET
index a19b0259ae16c118165be08720553f0bbfa4d289..cac1703011877e9648680362b1a28461ea72dcb6 100644 (file)
@@ -26,4 +26,5 @@ obj-$(CONFIG_USB_CDC_PHONET)  += cdc-phonet.o
 obj-$(CONFIG_USB_IPHETH)       += ipheth.o
 obj-$(CONFIG_USB_SIERRA_NET)   += sierra_net.o
 obj-$(CONFIG_USB_NET_CX82310_ETH)      += cx82310_eth.o
+obj-$(CONFIG_USB_NET_CDC_NCM)  += cdc_ncm.o
 
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
new file mode 100644 (file)
index 0000000..593c104
--- /dev/null
@@ -0,0 +1,1213 @@
+/*
+ * cdc_ncm.c
+ *
+ * Copyright (C) ST-Ericsson 2010
+ * Contact: Alexey Orishko <alexey.orishko@stericsson.com>
+ * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
+ *
+ * USB Host Driver for Network Control Model (NCM)
+ * http://www.usb.org/developers/devclass_docs/NCM10.zip
+ *
+ * The NCM encoding, decoding and initialization logic
+ * derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose this file to be licensed under the terms
+ * of the GNU General Public License (GPL) Version 2 or the 2-clause
+ * BSD license listed below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/ctype.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/usb.h>
+#include <linux/version.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/usb/usbnet.h>
+#include <linux/usb/cdc.h>
+
+#define        DRIVER_VERSION                          "30-Nov-2010"
+
+/* CDC NCM subclass 3.2.1 */
+#define USB_CDC_NCM_NDP16_LENGTH_MIN           0x10
+
+/* Maximum NTB length */
+#define        CDC_NCM_NTB_MAX_SIZE_TX                 16384   /* bytes */
+#define        CDC_NCM_NTB_MAX_SIZE_RX                 16384   /* bytes */
+
+/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
+#define        CDC_NCM_MIN_DATAGRAM_SIZE               1514    /* bytes */
+
+#define        CDC_NCM_MIN_TX_PKT                      512     /* bytes */
+
+/* Default value for MaxDatagramSize */
+#define        CDC_NCM_MAX_DATAGRAM_SIZE               2048    /* bytes */
+
+/*
+ * Maximum amount of datagrams in NCM Datagram Pointer Table, not counting
+ * the last NULL entry. Any additional datagrams in NTB would be discarded.
+ */
+#define        CDC_NCM_DPT_DATAGRAMS_MAX               32
+
+/* Restart the timer, if amount of datagrams is less than given value */
+#define        CDC_NCM_RESTART_TIMER_DATAGRAM_CNT      3
+
+/* The following macro defines the minimum header space */
+#define        CDC_NCM_MIN_HDR_SIZE \
+       (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
+       (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
+
+struct connection_speed_change {
+       __le32  USBitRate; /* holds 3GPP downlink value, bits per second */
+       __le32  DSBitRate; /* holds 3GPP uplink value, bits per second */
+} __attribute__ ((packed));
+
+struct cdc_ncm_data {
+       struct usb_cdc_ncm_nth16 nth16;
+       struct usb_cdc_ncm_ndp16 ndp16;
+       struct usb_cdc_ncm_dpe16 dpe16[CDC_NCM_DPT_DATAGRAMS_MAX + 1];
+};
+
+struct cdc_ncm_ctx {
+       struct cdc_ncm_data rx_ncm;
+       struct cdc_ncm_data tx_ncm;
+       struct usb_cdc_ncm_ntb_parameters ncm_parm;
+       struct timer_list tx_timer;
+
+       const struct usb_cdc_ncm_desc *func_desc;
+       const struct usb_cdc_header_desc *header_desc;
+       const struct usb_cdc_union_desc *union_desc;
+       const struct usb_cdc_ether_desc *ether_desc;
+
+       struct net_device *netdev;
+       struct usb_device *udev;
+       struct usb_host_endpoint *in_ep;
+       struct usb_host_endpoint *out_ep;
+       struct usb_host_endpoint *status_ep;
+       struct usb_interface *intf;
+       struct usb_interface *control;
+       struct usb_interface *data;
+
+       struct sk_buff *tx_curr_skb;
+       struct sk_buff *tx_rem_skb;
+
+       spinlock_t mtx;
+
+       u32 tx_timer_pending;
+       u32 tx_curr_offset;
+       u32 tx_curr_last_offset;
+       u32 tx_curr_frame_num;
+       u32 rx_speed;
+       u32 tx_speed;
+       u32 rx_max;
+       u32 tx_max;
+       u32 max_datagram_size;
+       u16 tx_max_datagrams;
+       u16 tx_remainder;
+       u16 tx_modulus;
+       u16 tx_ndp_modulus;
+       u16 tx_seq;
+       u16 connected;
+       u8 data_claimed;
+       u8 control_claimed;
+};
+
+static void cdc_ncm_tx_timeout(unsigned long arg);
+static const struct driver_info cdc_ncm_info;
+static struct usb_driver cdc_ncm_driver;
+static struct ethtool_ops cdc_ncm_ethtool_ops;
+
+static const struct usb_device_id cdc_devs[] = {
+       { USB_INTERFACE_INFO(USB_CLASS_COMM,
+               USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+               .driver_info = (unsigned long)&cdc_ncm_info,
+       },
+       {
+       },
+};
+
+MODULE_DEVICE_TABLE(usb, cdc_devs);
+
+static void
+cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+       struct usbnet *dev = netdev_priv(net);
+
+       strncpy(info->driver, dev->driver_name, sizeof(info->driver));
+       strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
+       strncpy(info->fw_version, dev->driver_info->description,
+               sizeof(info->fw_version));
+       usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
+}
+
+static int
+cdc_ncm_do_request(struct cdc_ncm_ctx *ctx, struct usb_cdc_notification *req,
+                  void *data, u16 flags, u16 *actlen, u16 timeout)
+{
+       int err;
+
+       err = usb_control_msg(ctx->udev, (req->bmRequestType & USB_DIR_IN) ?
+                               usb_rcvctrlpipe(ctx->udev, 0) :
+                               usb_sndctrlpipe(ctx->udev, 0),
+                               req->bNotificationType, req->bmRequestType,
+                               req->wValue,
+                               req->wIndex, data,
+                               req->wLength, timeout);
+
+       if (err < 0) {
+               if (actlen)
+                       *actlen = 0;
+               return err;
+       }
+
+       if (actlen)
+               *actlen = err;
+
+       return 0;
+}
+
+static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
+{
+       struct usb_cdc_notification req;
+       u32 val;
+       __le16 max_datagram_size;
+       u8 flags;
+       u8 iface_no;
+       int err;
+
+       iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
+
+       req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
+       req.bNotificationType = USB_CDC_GET_NTB_PARAMETERS;
+       req.wValue = 0;
+       req.wIndex = cpu_to_le16(iface_no);
+       req.wLength = cpu_to_le16(sizeof(ctx->ncm_parm));
+
+       err = cdc_ncm_do_request(ctx, &req, &ctx->ncm_parm, 0, NULL, 1000);
+       if (err) {
+               pr_debug("failed GET_NTB_PARAMETERS\n");
+               return 1;
+       }
+
+       /* read correct set of parameters according to device mode */
+       ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
+       ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
+       ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
+       ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
+       ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
+
+       if (ctx->func_desc != NULL)
+               flags = ctx->func_desc->bmNetworkCapabilities;
+       else
+               flags = 0;
+
+       pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
+                "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
+                "wNdpOutAlignment=%u flags=0x%x\n",
+                ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
+                ctx->tx_ndp_modulus, flags);
+
+       /* max count of tx datagrams without terminating NULL entry */
+       ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
+
+       /* verify maximum size of received NTB in bytes */
+       if ((ctx->rx_max <
+           (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
+           (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) {
+               pr_debug("Using default maximum receive length=%d\n",
+                                               CDC_NCM_NTB_MAX_SIZE_RX);
+               ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
+       }
+
+       /* verify maximum size of transmitted NTB in bytes */
+       if ((ctx->tx_max <
+           (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
+           (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX)) {
+               pr_debug("Using default maximum transmit length=%d\n",
+                                               CDC_NCM_NTB_MAX_SIZE_TX);
+               ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
+       }
+
+       /*
+        * verify that the structure alignment is:
+        * - power of two
+        * - not greater than the maximum transmit length
+        * - not less than four bytes
+        */
+       val = ctx->tx_ndp_modulus;
+
+       if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
+           (val != ((-val) & val)) || (val >= ctx->tx_max)) {
+               pr_debug("Using default alignment: 4 bytes\n");
+               ctx->tx_ndp_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
+       }
+
+       /*
+        * verify that the payload alignment is:
+        * - power of two
+        * - not greater than the maximum transmit length
+        * - not less than four bytes
+        */
+       val = ctx->tx_modulus;
+
+       if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
+           (val != ((-val) & val)) || (val >= ctx->tx_max)) {
+               pr_debug("Using default transmit modulus: 4 bytes\n");
+               ctx->tx_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
+       }
+
+       /* verify the payload remainder */
+       if (ctx->tx_remainder >= ctx->tx_modulus) {
+               pr_debug("Using default transmit remainder: 0 bytes\n");
+               ctx->tx_remainder = 0;
+       }
+
+       /* adjust TX-remainder according to NCM specification. */
+       ctx->tx_remainder = ((ctx->tx_remainder - ETH_HLEN) &
+                                               (ctx->tx_modulus - 1));
+
+       /* additional configuration */
+
+       /* set CRC Mode */
+       req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
+       req.bNotificationType = USB_CDC_SET_CRC_MODE;
+       req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
+       req.wIndex = cpu_to_le16(iface_no);
+       req.wLength = 0;
+
+       err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
+       if (err)
+               pr_debug("Setting CRC mode off failed\n");
+
+       /* set NTB format */
+       req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
+       req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
+       req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
+       req.wIndex = cpu_to_le16(iface_no);
+       req.wLength = 0;
+
+       err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
+       if (err)
+               pr_debug("Setting NTB format to 16-bit failed\n");
+
+       /* set Max Datagram Size (MTU) */
+       req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
+       req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
+       req.wValue = 0;
+       req.wIndex = cpu_to_le16(iface_no);
+       req.wLength = cpu_to_le16(2);
+
+       err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000);
+       if (err) {
+               pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n",
+                        CDC_NCM_MIN_DATAGRAM_SIZE);
+               /* use default */
+               ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
+       } else {
+               ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
+
+               if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
+                       ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
+               else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
+                       ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
+       }
+
+       if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
+               ctx->netdev->mtu = ctx->max_datagram_size - ETH_HLEN;
+
+       return 0;
+}
+
+static void
+cdc_ncm_find_endpoints(struct cdc_ncm_ctx *ctx, struct usb_interface *intf)
+{
+       struct usb_host_endpoint *e;
+       u8 ep;
+
+       for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
+
+               e = intf->cur_altsetting->endpoint + ep;
+               switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+               case USB_ENDPOINT_XFER_INT:
+                       if (usb_endpoint_dir_in(&e->desc)) {
+                               if (ctx->status_ep == NULL)
+                                       ctx->status_ep = e;
+                       }
+                       break;
+
+               case USB_ENDPOINT_XFER_BULK:
+                       if (usb_endpoint_dir_in(&e->desc)) {
+                               if (ctx->in_ep == NULL)
+                                       ctx->in_ep = e;
+                       } else {
+                               if (ctx->out_ep == NULL)
+                                       ctx->out_ep = e;
+                       }
+                       break;
+
+               default:
+                       break;
+               }
+       }
+}
+
+static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
+{
+       if (ctx == NULL)
+               return;
+
+       del_timer_sync(&ctx->tx_timer);
+
+       if (ctx->data_claimed) {
+               usb_set_intfdata(ctx->data, NULL);
+               usb_driver_release_interface(driver_of(ctx->intf), ctx->data);
+       }
+
+       if (ctx->control_claimed) {
+               usb_set_intfdata(ctx->control, NULL);
+               usb_driver_release_interface(driver_of(ctx->intf),
+                                                               ctx->control);
+       }
+
+       if (ctx->tx_rem_skb != NULL) {
+               dev_kfree_skb_any(ctx->tx_rem_skb);
+               ctx->tx_rem_skb = NULL;
+       }
+
+       if (ctx->tx_curr_skb != NULL) {
+               dev_kfree_skb_any(ctx->tx_curr_skb);
+               ctx->tx_curr_skb = NULL;
+       }
+
+       kfree(ctx);
+}
+
+static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+       struct cdc_ncm_ctx *ctx;
+       struct usb_driver *driver;
+       u8 *buf;
+       int len;
+       int temp;
+       u8 iface_no;
+
+       ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+       if (ctx == NULL)
+               goto error;
+
+       memset(ctx, 0, sizeof(*ctx));
+
+       init_timer(&ctx->tx_timer);
+       spin_lock_init(&ctx->mtx);
+       ctx->netdev = dev->net;
+
+       /* store ctx pointer in device data field */
+       dev->data[0] = (unsigned long)ctx;
+
+       /* get some pointers */
+       driver = driver_of(intf);
+       buf = intf->cur_altsetting->extra;
+       len = intf->cur_altsetting->extralen;
+
+       ctx->udev = dev->udev;
+       ctx->intf = intf;
+
+       /* parse through descriptors associated with control interface */
+       while ((len > 0) && (buf[0] > 2) && (buf[0] <= len)) {
+
+               if (buf[1] != USB_DT_CS_INTERFACE)
+                       goto advance;
+
+               switch (buf[2]) {
+               case USB_CDC_UNION_TYPE:
+                       if (buf[0] < sizeof(*(ctx->union_desc)))
+                               break;
+
+                       ctx->union_desc =
+                                       (const struct usb_cdc_union_desc *)buf;
+
+                       ctx->control = usb_ifnum_to_if(dev->udev,
+                                       ctx->union_desc->bMasterInterface0);
+                       ctx->data = usb_ifnum_to_if(dev->udev,
+                                       ctx->union_desc->bSlaveInterface0);
+                       break;
+
+               case USB_CDC_ETHERNET_TYPE:
+                       if (buf[0] < sizeof(*(ctx->ether_desc)))
+                               break;
+
+                       ctx->ether_desc =
+                                       (const struct usb_cdc_ether_desc *)buf;
+
+                       dev->hard_mtu =
+                               le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+
+                       if (dev->hard_mtu <
+                           (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN))
+                               dev->hard_mtu =
+                                       CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN;
+
+                       else if (dev->hard_mtu >
+                                (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN))
+                               dev->hard_mtu =
+                                       CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN;
+                       break;
+
+               case USB_CDC_NCM_TYPE:
+                       if (buf[0] < sizeof(*(ctx->func_desc)))
+                               break;
+
+                       ctx->func_desc = (const struct usb_cdc_ncm_desc *)buf;
+                       break;
+
+               default:
+                       break;
+               }
+advance:
+               /* advance to next descriptor */
+               temp = buf[0];
+               buf += temp;
+               len -= temp;
+       }
+
+       /* check if we got everything */
+       if ((ctx->control == NULL) || (ctx->data == NULL) ||
+           (ctx->ether_desc == NULL))
+               goto error;
+
+       /* claim interfaces, if any */
+       if (ctx->data != intf) {
+               temp = usb_driver_claim_interface(driver, ctx->data, dev);
+               if (temp)
+                       goto error;
+               ctx->data_claimed = 1;
+       }
+
+       if (ctx->control != intf) {
+               temp = usb_driver_claim_interface(driver, ctx->control, dev);
+               if (temp)
+                       goto error;
+               ctx->control_claimed = 1;
+       }
+
+       iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
+
+       /* reset data interface */
+       temp = usb_set_interface(dev->udev, iface_no, 0);
+       if (temp)
+               goto error;
+
+       /* initialize data interface */
+       if (cdc_ncm_setup(ctx))
+               goto error;
+
+       /* configure data interface */
+       temp = usb_set_interface(dev->udev, iface_no, 1);
+       if (temp)
+               goto error;
+
+       cdc_ncm_find_endpoints(ctx, ctx->data);
+       cdc_ncm_find_endpoints(ctx, ctx->control);
+
+       if ((ctx->in_ep == NULL) || (ctx->out_ep == NULL) ||
+           (ctx->status_ep == NULL))
+               goto error;
+
+       dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
+
+       usb_set_intfdata(ctx->data, dev);
+       usb_set_intfdata(ctx->control, dev);
+       usb_set_intfdata(ctx->intf, dev);
+
+       temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
+       if (temp)
+               goto error;
+
+       dev_info(&dev->udev->dev, "MAC-Address: "
+                               "0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+                               dev->net->dev_addr[0], dev->net->dev_addr[1],
+                               dev->net->dev_addr[2], dev->net->dev_addr[3],
+                               dev->net->dev_addr[4], dev->net->dev_addr[5]);
+
+       dev->in = usb_rcvbulkpipe(dev->udev,
+               ctx->in_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+       dev->out = usb_sndbulkpipe(dev->udev,
+               ctx->out_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+       dev->status = ctx->status_ep;
+       dev->rx_urb_size = ctx->rx_max;
+
+       /*
+        * We should get an event when network connection is "connected" or
+        * "disconnected". Set network connection in "disconnected" state
+        * (carrier is OFF) during attach, so the IP network stack does not
+        * start IPv6 negotiation and more.
+        */
+       netif_carrier_off(dev->net);
+       ctx->tx_speed = ctx->rx_speed = 0;
+       return 0;
+
+error:
+       cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
+       dev->data[0] = 0;
+       dev_info(&dev->udev->dev, "Descriptor failure\n");
+       return -ENODEV;
+}
+
+static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+       struct usb_driver *driver;
+
+       if (ctx == NULL)
+               return;         /* no setup */
+
+       driver = driver_of(intf);
+
+       usb_set_intfdata(ctx->data, NULL);
+       usb_set_intfdata(ctx->control, NULL);
+       usb_set_intfdata(ctx->intf, NULL);
+
+       /* release interfaces, if any */
+       if (ctx->data_claimed) {
+               usb_driver_release_interface(driver, ctx->data);
+               ctx->data_claimed = 0;
+       }
+
+       if (ctx->control_claimed) {
+               usb_driver_release_interface(driver, ctx->control);
+               ctx->control_claimed = 0;
+       }
+
+       cdc_ncm_free(ctx);
+}
+
+static void cdc_ncm_zero_fill(u8 *ptr, u32 first, u32 end, u32 max)
+{
+       if (first >= max)
+               return;
+       if (first >= end)
+               return;
+       if (end > max)
+               end = max;
+       memset(ptr + first, 0, end - first);
+}
+
+static struct sk_buff *
+cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
+{
+       struct sk_buff *skb_out;
+       u32 rem;
+       u32 offset;
+       u32 last_offset;
+       u16 n = 0;
+       u8 timeout = 0;
+
+       /* if there is a remaining skb, it gets priority */
+       if (skb != NULL)
+               swap(skb, ctx->tx_rem_skb);
+       else
+               timeout = 1;
+
+       /*
+        * +----------------+
+        * | skb_out        |
+        * +----------------+
+        *           ^ offset
+        *        ^ last_offset
+        */
+
+       /* check if we are resuming an OUT skb */
+       if (ctx->tx_curr_skb != NULL) {
+               /* pop variables */
+               skb_out = ctx->tx_curr_skb;
+               offset = ctx->tx_curr_offset;
+               last_offset = ctx->tx_curr_last_offset;
+               n = ctx->tx_curr_frame_num;
+
+       } else {
+               /* reset variables */
+               skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC);
+               if (skb_out == NULL) {
+                       if (skb != NULL) {
+                               dev_kfree_skb_any(skb);
+                               ctx->netdev->stats.tx_dropped++;
+                       }
+                       goto exit_no_skb;
+               }
+
+               /* make room for NTH and NDP */
+               offset = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
+                                       ctx->tx_ndp_modulus) +
+                                       sizeof(struct usb_cdc_ncm_ndp16) +
+                                       (ctx->tx_max_datagrams + 1) *
+                                       sizeof(struct usb_cdc_ncm_dpe16);
+
+               /* store last valid offset before alignment */
+               last_offset = offset;
+               /* align first Datagram offset correctly */
+               offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
+               /* zero buffer till the first IP datagram */
+               cdc_ncm_zero_fill(skb_out->data, 0, offset, offset);
+               n = 0;
+               ctx->tx_curr_frame_num = 0;
+       }
+
+       for (; n < ctx->tx_max_datagrams; n++) {
+               /* check if end of transmit buffer is reached */
+               if (offset >= ctx->tx_max)
+                       break;
+
+               /* compute maximum buffer size */
+               rem = ctx->tx_max - offset;
+
+               if (skb == NULL) {
+                       skb = ctx->tx_rem_skb;
+                       ctx->tx_rem_skb = NULL;
+
+                       /* check for end of skb */
+                       if (skb == NULL)
+                               break;
+               }
+
+               if (skb->len > rem) {
+                       if (n == 0) {
+                               /* won't fit, MTU problem? */
+                               dev_kfree_skb_any(skb);
+                               skb = NULL;
+                               ctx->netdev->stats.tx_dropped++;
+                       } else {
+                               /* no room for skb - store for later */
+                               if (ctx->tx_rem_skb != NULL) {
+                                       dev_kfree_skb_any(ctx->tx_rem_skb);
+                                       ctx->netdev->stats.tx_dropped++;
+                               }
+                               ctx->tx_rem_skb = skb;
+                               skb = NULL;
+
+                               /* loop one more time */
+                               timeout = 1;
+                       }
+                       break;
+               }
+
+               memcpy(((u8 *)skb_out->data) + offset, skb->data, skb->len);
+
+               ctx->tx_ncm.dpe16[n].wDatagramLength = cpu_to_le16(skb->len);
+               ctx->tx_ncm.dpe16[n].wDatagramIndex = cpu_to_le16(offset);
+
+               /* update offset */
+               offset += skb->len;
+
+               /* store last valid offset before alignment */
+               last_offset = offset;
+
+               /* align offset correctly */
+               offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
+
+               /* zero padding */
+               cdc_ncm_zero_fill(skb_out->data, last_offset, offset,
+                                                               ctx->tx_max);
+               dev_kfree_skb_any(skb);
+               skb = NULL;
+       }
+
+       /* free up any dangling skb */
+       if (skb != NULL) {
+               dev_kfree_skb_any(skb);
+               skb = NULL;
+               ctx->netdev->stats.tx_dropped++;
+       }
+
+       ctx->tx_curr_frame_num = n;
+
+       if (n == 0) {
+               /* wait for more frames */
+               /* push variables */
+               ctx->tx_curr_skb = skb_out;
+               ctx->tx_curr_offset = offset;
+               ctx->tx_curr_last_offset = last_offset;
+               goto exit_no_skb;
+
+       } else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) {
+               /* wait for more frames */
+               /* push variables */
+               ctx->tx_curr_skb = skb_out;
+               ctx->tx_curr_offset = offset;
+               ctx->tx_curr_last_offset = last_offset;
+               /* set the pending count */
+               if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT)
+                       ctx->tx_timer_pending = 2;
+               goto exit_no_skb;
+
+       } else {
+               /* frame goes out */
+               /* variables will be reset at next call */
+       }
+
+       /* check for overflow */
+       if (last_offset > ctx->tx_max)
+               last_offset = ctx->tx_max;
+
+       /* revert offset */
+       offset = last_offset;
+
+       /*
+        * If collected data size is less or equal CDC_NCM_MIN_TX_PKT bytes,
+        * we send buffers as it is. If we get more data, it would be more
+        * efficient for USB HS mobile device with DMA engine to receive a full
+        * size NTB, than canceling DMA transfer and receiving a short packet.
+        */
+       if (offset > CDC_NCM_MIN_TX_PKT)
+               offset = ctx->tx_max;
+
+       /* final zero padding */
+       cdc_ncm_zero_fill(skb_out->data, last_offset, offset, ctx->tx_max);
+
+       /* store last offset */
+       last_offset = offset;
+
+       if ((last_offset < ctx->tx_max) && ((last_offset %
+                       le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) {
+               /* force short packet */
+               *(((u8 *)skb_out->data) + last_offset) = 0;
+               last_offset++;
+       }
+
+       /* zero the rest of the DPEs plus the last NULL entry */
+       for (; n <= CDC_NCM_DPT_DATAGRAMS_MAX; n++) {
+               ctx->tx_ncm.dpe16[n].wDatagramLength = 0;
+               ctx->tx_ncm.dpe16[n].wDatagramIndex = 0;
+       }
+
+       /* fill out 16-bit NTB header */
+       ctx->tx_ncm.nth16.dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
+       ctx->tx_ncm.nth16.wHeaderLength =
+                                       cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
+       ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
+       ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
+       ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
+                                                       ctx->tx_ndp_modulus);
+
+       memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
+       ctx->tx_seq++;
+
+       /* fill out 16-bit NDP table */
+       ctx->tx_ncm.ndp16.dwSignature =
+                               cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN);
+       rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
+                                       sizeof(struct usb_cdc_ncm_dpe16));
+       ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
+       ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */
+
+       memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex,
+                                               &(ctx->tx_ncm.ndp16),
+                                               sizeof(ctx->tx_ncm.ndp16));
+
+       memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex +
+                                       sizeof(ctx->tx_ncm.ndp16),
+                                       &(ctx->tx_ncm.dpe16),
+                                       (ctx->tx_curr_frame_num + 1) *
+                                       sizeof(struct usb_cdc_ncm_dpe16));
+
+       /* set frame length */
+       skb_put(skb_out, last_offset);
+
+       /* return skb */
+       ctx->tx_curr_skb = NULL;
+       return skb_out;
+
+exit_no_skb:
+       return NULL;
+}
+
+static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
+{
+       /* start timer, if not already started */
+       if (timer_pending(&ctx->tx_timer) == 0) {
+               ctx->tx_timer.function = &cdc_ncm_tx_timeout;
+               ctx->tx_timer.data = (unsigned long)ctx;
+               ctx->tx_timer.expires = jiffies + ((HZ + 999) / 1000);
+               add_timer(&ctx->tx_timer);
+       }
+}
+
+static void cdc_ncm_tx_timeout(unsigned long arg)
+{
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)arg;
+       u8 restart;
+
+       spin_lock(&ctx->mtx);
+       if (ctx->tx_timer_pending != 0) {
+               ctx->tx_timer_pending--;
+               restart = 1;
+       } else
+               restart = 0;
+
+       spin_unlock(&ctx->mtx);
+
+       if (restart)
+               cdc_ncm_tx_timeout_start(ctx);
+       else if (ctx->netdev != NULL)
+               usbnet_start_xmit(NULL, ctx->netdev);
+}
+
+static struct sk_buff *
+cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
+{
+       struct sk_buff *skb_out;
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+       u8 need_timer = 0;
+
+       /*
+        * The Ethernet API we are using does not support transmitting
+        * multiple Ethernet frames in a single call. This driver will
+        * accumulate multiple Ethernet frames and send out a larger
+        * USB frame when the USB buffer is full or when a single jiffies
+        * timeout happens.
+        */
+       if (ctx == NULL)
+               goto error;
+
+       spin_lock(&ctx->mtx);
+       skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
+       if (ctx->tx_curr_skb != NULL)
+               need_timer = 1;
+       spin_unlock(&ctx->mtx);
+
+       /* Start timer, if there is a remaining skb */
+       if (need_timer)
+               cdc_ncm_tx_timeout_start(ctx);
+
+       if (skb_out)
+               dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
+       return skb_out;
+
+error:
+       if (skb != NULL)
+               dev_kfree_skb_any(skb);
+
+       return NULL;
+}
+
+static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
+{
+       struct sk_buff *skb;
+       struct cdc_ncm_ctx *ctx;
+       int sumlen;
+       int actlen;
+       int temp;
+       int nframes;
+       int x;
+       int offset;
+
+       ctx = (struct cdc_ncm_ctx *)dev->data[0];
+       if (ctx == NULL)
+               goto error;
+
+       actlen = skb_in->len;
+       sumlen = CDC_NCM_NTB_MAX_SIZE_RX;
+
+       if (actlen < (sizeof(ctx->rx_ncm.nth16) + sizeof(ctx->rx_ncm.ndp16))) {
+               pr_debug("frame too short\n");
+               goto error;
+       }
+
+       memcpy(&(ctx->rx_ncm.nth16), ((u8 *)skb_in->data),
+                                               sizeof(ctx->rx_ncm.nth16));
+
+       if (le32_to_cpu(ctx->rx_ncm.nth16.dwSignature) !=
+           USB_CDC_NCM_NTH16_SIGN) {
+               pr_debug("invalid NTH16 signature <%u>\n",
+                        le32_to_cpu(ctx->rx_ncm.nth16.dwSignature));
+               goto error;
+       }
+
+       temp = le16_to_cpu(ctx->rx_ncm.nth16.wBlockLength);
+       if (temp > sumlen) {
+               pr_debug("unsupported NTB block length %u/%u\n", temp, sumlen);
+               goto error;
+       }
+
+       temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex);
+       if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) {
+               pr_debug("invalid DPT16 index\n");
+               goto error;
+       }
+
+       memcpy(&(ctx->rx_ncm.ndp16), ((u8 *)skb_in->data) + temp,
+                                               sizeof(ctx->rx_ncm.ndp16));
+
+       if (le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature) !=
+           USB_CDC_NCM_NDP16_NOCRC_SIGN) {
+               pr_debug("invalid DPT16 signature <%u>\n",
+                        le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature));
+               goto error;
+       }
+
+       if (le16_to_cpu(ctx->rx_ncm.ndp16.wLength) <
+           USB_CDC_NCM_NDP16_LENGTH_MIN) {
+               pr_debug("invalid DPT16 length <%u>\n",
+                        le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature));
+               goto error;
+       }
+
+       nframes = ((le16_to_cpu(ctx->rx_ncm.ndp16.wLength) -
+                                       sizeof(struct usb_cdc_ncm_ndp16)) /
+                                       sizeof(struct usb_cdc_ncm_dpe16));
+       nframes--; /* we process NDP entries except for the last one */
+
+       pr_debug("nframes = %u\n", nframes);
+
+       temp += sizeof(ctx->rx_ncm.ndp16);
+
+       if ((temp + nframes * (sizeof(struct usb_cdc_ncm_dpe16))) > actlen) {
+               pr_debug("Invalid nframes = %d\n", nframes);
+               goto error;
+       }
+
+       if (nframes > CDC_NCM_DPT_DATAGRAMS_MAX) {
+               pr_debug("Truncating number of frames from %u to %u\n",
+                                       nframes, CDC_NCM_DPT_DATAGRAMS_MAX);
+               nframes = CDC_NCM_DPT_DATAGRAMS_MAX;
+       }
+
+       memcpy(&(ctx->rx_ncm.dpe16), ((u8 *)skb_in->data) + temp,
+                               nframes * (sizeof(struct usb_cdc_ncm_dpe16)));
+
+       for (x = 0; x < nframes; x++) {
+               offset = le16_to_cpu(ctx->rx_ncm.dpe16[x].wDatagramIndex);
+               temp = le16_to_cpu(ctx->rx_ncm.dpe16[x].wDatagramLength);
+
+               /*
+                * CDC NCM ch. 3.7
+                * All entries after first NULL entry are to be ignored
+                */
+               if ((offset == 0) || (temp == 0)) {
+                       if (!x)
+                               goto error; /* empty NTB */
+                       break;
+               }
+
+               /* sanity checking */
+               if (((offset + temp) > actlen) ||
+                   (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
+                       pr_debug("invalid frame detected (ignored)"
+                               "offset[%u]=%u, length=%u, skb=%p\n",
+                                                       x, offset, temp, skb);
+                       if (!x)
+                               goto error;
+                       break;
+
+               } else {
+                       skb = skb_clone(skb_in, GFP_ATOMIC);
+                       skb->len = temp;
+                       skb->data = ((u8 *)skb_in->data) + offset;
+                       skb_set_tail_pointer(skb, temp);
+                       usbnet_skb_return(dev, skb);
+               }
+       }
+       return 1;
+error:
+       return 0;
+}
+
+static void
+cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
+                    struct connection_speed_change *data)
+{
+       uint32_t rx_speed = le32_to_cpu(data->USBitRate);
+       uint32_t tx_speed = le32_to_cpu(data->DSBitRate);
+
+       /*
+        * Currently the USB-NET API does not support reporting the actual
+        * device speed. Do print it instead.
+        */
+       if ((tx_speed != ctx->tx_speed) || (rx_speed != ctx->rx_speed)) {
+               ctx->tx_speed = tx_speed;
+               ctx->rx_speed = rx_speed;
+
+               if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
+                       printk(KERN_INFO KBUILD_MODNAME
+                               ": %s: %u mbit/s downlink "
+                               "%u mbit/s uplink\n",
+                               ctx->netdev->name,
+                               (unsigned int)(rx_speed / 1000000U),
+                               (unsigned int)(tx_speed / 1000000U));
+               } else {
+                       printk(KERN_INFO KBUILD_MODNAME
+                               ": %s: %u kbit/s downlink "
+                               "%u kbit/s uplink\n",
+                               ctx->netdev->name,
+                               (unsigned int)(rx_speed / 1000U),
+                               (unsigned int)(tx_speed / 1000U));
+               }
+       }
+}
+
+static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
+{
+       struct cdc_ncm_ctx *ctx;
+       struct usb_cdc_notification *event;
+
+       ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+       if (urb->actual_length < sizeof(*event))
+               return;
+
+       /* test for split data in 8-byte chunks */
+       if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
+               cdc_ncm_speed_change(ctx,
+                     (struct connection_speed_change *)urb->transfer_buffer);
+               return;
+       }
+
+       event = urb->transfer_buffer;
+
+       switch (event->bNotificationType) {
+       case USB_CDC_NOTIFY_NETWORK_CONNECTION:
+               /*
+                * According to the CDC NCM specification ch.7.1
+                * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
+                * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
+                */
+               ctx->connected = event->wValue;
+
+               printk(KERN_INFO KBUILD_MODNAME ": %s: network connection:"
+                       " %sconnected\n",
+                       ctx->netdev->name, ctx->connected ? "" : "dis");
+
+               if (ctx->connected)
+                       netif_carrier_on(dev->net);
+               else {
+                       netif_carrier_off(dev->net);
+                       ctx->tx_speed = ctx->rx_speed = 0;
+               }
+               break;
+
+       case USB_CDC_NOTIFY_SPEED_CHANGE:
+               if (urb->actual_length <
+                   (sizeof(*event) + sizeof(struct connection_speed_change)))
+                       set_bit(EVENT_STS_SPLIT, &dev->flags);
+               else
+                       cdc_ncm_speed_change(ctx,
+                               (struct connection_speed_change *) &event[1]);
+               break;
+
+       default:
+               dev_err(&dev->udev->dev, "NCM: unexpected "
+                       "notification 0x%02x!\n", event->bNotificationType);
+               break;
+       }
+}
+
+static int cdc_ncm_check_connect(struct usbnet *dev)
+{
+       struct cdc_ncm_ctx *ctx;
+
+       ctx = (struct cdc_ncm_ctx *)dev->data[0];
+       if (ctx == NULL)
+               return 1;       /* disconnected */
+
+       return !ctx->connected;
+}
+
+static int
+cdc_ncm_probe(struct usb_interface *udev, const struct usb_device_id *prod)
+{
+       return usbnet_probe(udev, prod);
+}
+
+static void cdc_ncm_disconnect(struct usb_interface *intf)
+{
+       struct usbnet *dev = usb_get_intfdata(intf);
+
+       if (dev == NULL)
+               return;         /* already disconnected */
+
+       usbnet_disconnect(intf);
+}
+
+static int cdc_ncm_manage_power(struct usbnet *dev, int status)
+{
+       dev->intf->needs_remote_wakeup = status;
+       return 0;
+}
+
+static const struct driver_info cdc_ncm_info = {
+       .description = "CDC NCM",
+       .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET,
+       .bind = cdc_ncm_bind,
+       .unbind = cdc_ncm_unbind,
+       .check_connect = cdc_ncm_check_connect,
+       .manage_power = cdc_ncm_manage_power,
+       .status = cdc_ncm_status,
+       .rx_fixup = cdc_ncm_rx_fixup,
+       .tx_fixup = cdc_ncm_tx_fixup,
+};
+
+static struct usb_driver cdc_ncm_driver = {
+       .name = "cdc_ncm",
+       .id_table = cdc_devs,
+       .probe = cdc_ncm_probe,
+       .disconnect = cdc_ncm_disconnect,
+       .suspend = usbnet_suspend,
+       .resume = usbnet_resume,
+       .supports_autosuspend = 1,
+};
+
+static struct ethtool_ops cdc_ncm_ethtool_ops = {
+       .get_drvinfo = cdc_ncm_get_drvinfo,
+       .get_link = usbnet_get_link,
+       .get_msglevel = usbnet_get_msglevel,
+       .set_msglevel = usbnet_set_msglevel,
+       .get_settings = usbnet_get_settings,
+       .set_settings = usbnet_set_settings,
+       .nway_reset = usbnet_nway_reset,
+};
+
+static int __init cdc_ncm_init(void)
+{
+       printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION "\n");
+       return usb_register(&cdc_ncm_driver);
+}
+
+module_init(cdc_ncm_init);
+
+static void __exit cdc_ncm_exit(void)
+{
+       usb_deregister(&cdc_ncm_driver);
+}
+
+module_exit(cdc_ncm_exit);
+
+MODULE_AUTHOR("Hans Petter Selasky");
+MODULE_DESCRIPTION("USB CDC NCM host driver");
+MODULE_LICENSE("Dual BSD/GPL");
index be8cc2a8e2137105ab85060472b748f4423f601d..93c6b5f62ac42bd8002e00d8e414befb0fb20f9b 100644 (file)
@@ -2993,12 +2993,14 @@ static int hso_probe(struct usb_interface *interface,
 
        case HSO_INTF_BULK:
                /* It's a regular bulk interface */
-               if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) &&
-                   !disable_net)
-                       hso_dev = hso_create_net_device(interface, port_spec);
-               else
+               if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) {
+                       if (!disable_net)
+                               hso_dev =
+                                   hso_create_net_device(interface, port_spec);
+               } else {
                        hso_dev =
                            hso_create_bulk_serial_device(interface, port_spec);
+               }
                if (!hso_dev)
                        goto exit;
                break;
index c04d49e31f814fe11d2f1b730dcba014afeb324c..cff74b81a7d2ccfdb566ea16901a2076d78964e5 100644 (file)
@@ -391,14 +391,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
                goto error;
        // else network stack removes extra byte if we forced a short packet
 
-       if (skb->len)
-               usbnet_skb_return (dev, skb);
-       else {
-               netif_dbg(dev, rx_err, dev->net, "drop\n");
-error:
-               dev->net->stats.rx_errors++;
-               skb_queue_tail (&dev->done, skb);
+       if (skb->len) {
+               /* all data was already cloned from skb inside the driver */
+               if (dev->driver_info->flags & FLAG_MULTI_PACKET)
+                       dev_kfree_skb_any(skb);
+               else
+                       usbnet_skb_return(dev, skb);
+               return;
        }
+
+       netif_dbg(dev, rx_err, dev->net, "drop\n");
+error:
+       dev->net->stats.rx_errors++;
+       skb_queue_tail(&dev->done, skb);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -971,7 +976,8 @@ static void tx_complete (struct urb *urb)
        struct usbnet           *dev = entry->dev;
 
        if (urb->status == 0) {
-               dev->net->stats.tx_packets++;
+               if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
+                       dev->net->stats.tx_packets++;
                dev->net->stats.tx_bytes += entry->length;
        } else {
                dev->net->stats.tx_errors++;
@@ -1044,8 +1050,13 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
        if (info->tx_fixup) {
                skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
                if (!skb) {
-                       netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
-                       goto drop;
+                       if (netif_msg_tx_err(dev)) {
+                               netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
+                               goto drop;
+                       } else {
+                               /* cdc_ncm collected packet; waits for more */
+                               goto not_drop;
+                       }
                }
        }
        length = skb->len;
@@ -1067,13 +1078,18 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
        /* don't assume the hardware handles USB_ZERO_PACKET
         * NOTE:  strictly conforming cdc-ether devices should expect
         * the ZLP here, but ignore the one-byte packet.
+        * NOTE2: CDC NCM specification is different from CDC ECM when
+        * handling ZLP/short packets, so cdc_ncm driver will make short
+        * packet itself if needed.
         */
        if (length % dev->maxpacket == 0) {
                if (!(info->flags & FLAG_SEND_ZLP)) {
-                       urb->transfer_buffer_length++;
-                       if (skb_tailroom(skb)) {
-                               skb->data[skb->len] = 0;
-                               __skb_put(skb, 1);
+                       if (!(info->flags & FLAG_MULTI_PACKET)) {
+                               urb->transfer_buffer_length++;
+                               if (skb_tailroom(skb)) {
+                                       skb->data[skb->len] = 0;
+                                       __skb_put(skb, 1);
+                               }
                        }
                } else
                        urb->transfer_flags |= URB_ZERO_PACKET;
@@ -1122,6 +1138,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
                netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
 drop:
                dev->net->stats.tx_dropped++;
+not_drop:
                if (skb)
                        dev_kfree_skb_any (skb);
                usb_free_urb (urb);
index 4930f9dbc493d50d2b46f7829d62541bc8b1e646..5e7f069eab533e75cf950294648a7967da104bb0 100644 (file)
@@ -30,8 +30,8 @@
 */
 
 #define DRV_NAME       "via-rhine"
-#define DRV_VERSION    "1.4.3"
-#define DRV_RELDATE    "2007-03-06"
+#define DRV_VERSION    "1.5.0"
+#define DRV_RELDATE    "2010-10-09"
 
 
 /* A few user-configurable values.
@@ -100,6 +100,7 @@ static const int multicast_filter_limit = 32;
 #include <linux/mii.h>
 #include <linux/ethtool.h>
 #include <linux/crc32.h>
+#include <linux/if_vlan.h>
 #include <linux/bitops.h>
 #include <linux/workqueue.h>
 #include <asm/processor.h>     /* Processor type for cache alignment. */
@@ -133,6 +134,9 @@ MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
 
+#define MCAM_SIZE      32
+#define VCAM_SIZE      32
+
 /*
                Theory of Operation
 
@@ -279,15 +283,16 @@ MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
 /* Offsets to the device registers. */
 enum register_offsets {
        StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
-       ChipCmd1=0x09,
+       ChipCmd1=0x09, TQWake=0x0A,
        IntrStatus=0x0C, IntrEnable=0x0E,
        MulticastFilter0=0x10, MulticastFilter1=0x14,
        RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
-       MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
+       MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
        MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
        ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
        RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
        StickyHW=0x83, IntrStatus2=0x84,
+       CamMask=0x88, CamCon=0x92, CamAddr=0x93,
        WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
        WOLcrClr1=0xA6, WOLcgClr=0xA7,
        PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
@@ -299,6 +304,40 @@ enum backoff_bits {
        BackCaptureEffect=0x04, BackRandom=0x08
 };
 
+/* Bits in the TxConfig (TCR) register */
+enum tcr_bits {
+       TCR_PQEN=0x01,
+       TCR_LB0=0x02,           /* loopback[0] */
+       TCR_LB1=0x04,           /* loopback[1] */
+       TCR_OFSET=0x08,
+       TCR_RTGOPT=0x10,
+       TCR_RTFT0=0x20,
+       TCR_RTFT1=0x40,
+       TCR_RTSF=0x80,
+};
+
+/* Bits in the CamCon (CAMC) register */
+enum camcon_bits {
+       CAMC_CAMEN=0x01,
+       CAMC_VCAMSL=0x02,
+       CAMC_CAMWR=0x04,
+       CAMC_CAMRD=0x08,
+};
+
+/* Bits in the PCIBusConfig1 (BCR1) register */
+enum bcr1_bits {
+       BCR1_POT0=0x01,
+       BCR1_POT1=0x02,
+       BCR1_POT2=0x04,
+       BCR1_CTFT0=0x08,
+       BCR1_CTFT1=0x10,
+       BCR1_CTSF=0x20,
+       BCR1_TXQNOBK=0x40,      /* for VT6105 */
+       BCR1_VIDFR=0x80,        /* for VT6105 */
+       BCR1_MED0=0x40,         /* for VT6102 */
+       BCR1_MED1=0x80,         /* for VT6102 */
+};
+
 #ifdef USE_MMIO
 /* Registers we check that mmio and reg are the same. */
 static const int mmio_verify_registers[] = {
@@ -356,6 +395,11 @@ enum desc_status_bits {
        DescOwn=0x80000000
 };
 
+/* Bits in *_desc.*_length */
+enum desc_length_bits {
+       DescTag=0x00010000
+};
+
 /* Bits in ChipCmd. */
 enum chip_cmd_bits {
        CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
@@ -365,6 +409,9 @@ enum chip_cmd_bits {
 };
 
 struct rhine_private {
+       /* Bit mask for configured VLAN ids */
+       unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
        /* Descriptor rings */
        struct rx_desc *rx_ring;
        struct tx_desc *tx_ring;
@@ -405,6 +452,23 @@ struct rhine_private {
        void __iomem *base;
 };
 
+#define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
+#define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
+#define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
+
+#define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
+#define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
+#define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
+
+#define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
+#define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
+#define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
+
+#define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
+#define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
+#define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
+
+
 static int  mdio_read(struct net_device *dev, int phy_id, int location);
 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 static int  rhine_open(struct net_device *dev);
@@ -422,6 +486,14 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 static const struct ethtool_ops netdev_ethtool_ops;
 static int  rhine_close(struct net_device *dev);
 static void rhine_shutdown (struct pci_dev *pdev);
+static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
+static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
+static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
+static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
+static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
+static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
+static void rhine_init_cam_filter(struct net_device *dev);
+static void rhine_update_vcam(struct net_device *dev);
 
 #define RHINE_WAIT_FOR(condition) do {                                 \
        int i=1024;                                                     \
@@ -629,6 +701,8 @@ static const struct net_device_ops rhine_netdev_ops = {
        .ndo_set_mac_address     = eth_mac_addr,
        .ndo_do_ioctl            = netdev_ioctl,
        .ndo_tx_timeout          = rhine_tx_timeout,
+       .ndo_vlan_rx_add_vid     = rhine_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid    = rhine_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller     = rhine_poll,
 #endif
@@ -795,6 +869,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
        if (rp->quirks & rqRhineI)
                dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
 
+       if (pdev->revision >= VT6105M)
+               dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+               NETIF_F_HW_VLAN_FILTER;
+
        /* dev->name not defined before register_netdev()! */
        rc = register_netdev(dev);
        if (rc)
@@ -1040,6 +1118,167 @@ static void rhine_set_carrier(struct mii_if_info *mii)
                       netif_carrier_ok(mii->dev));
 }
 
+/**
+ * rhine_set_cam - set CAM multicast filters
+ * @ioaddr: register block of this Rhine
+ * @idx: multicast CAM index [0..MCAM_SIZE-1]
+ * @addr: multicast address (6 bytes)
+ *
+ * Load addresses into multicast filters.
+ */
+static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
+{
+       int i;
+
+       iowrite8(CAMC_CAMEN, ioaddr + CamCon);
+       wmb();
+
+       /* Paranoid -- idx out of range should never happen */
+       idx &= (MCAM_SIZE - 1);
+
+       iowrite8((u8) idx, ioaddr + CamAddr);
+
+       for (i = 0; i < 6; i++, addr++)
+               iowrite8(*addr, ioaddr + MulticastFilter0 + i);
+       udelay(10);
+       wmb();
+
+       iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
+       udelay(10);
+
+       iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_set_vlan_cam - set CAM VLAN filters
+ * @ioaddr: register block of this Rhine
+ * @idx: VLAN CAM index [0..VCAM_SIZE-1]
+ * @addr: VLAN ID (2 bytes)
+ *
+ * Load addresses into VLAN filters.
+ */
+static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
+{
+       iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
+       wmb();
+
+       /* Paranoid -- idx out of range should never happen */
+       idx &= (VCAM_SIZE - 1);
+
+       iowrite8((u8) idx, ioaddr + CamAddr);
+
+       iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
+       udelay(10);
+       wmb();
+
+       iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
+       udelay(10);
+
+       iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_set_cam_mask - set multicast CAM mask
+ * @ioaddr: register block of this Rhine
+ * @mask: multicast CAM mask
+ *
+ * Mask sets multicast filters active/inactive.
+ */
+static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
+{
+       iowrite8(CAMC_CAMEN, ioaddr + CamCon);
+       wmb();
+
+       /* write mask */
+       iowrite32(mask, ioaddr + CamMask);
+
+       /* disable CAMEN */
+       iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_set_vlan_cam_mask - set VLAN CAM mask
+ * @ioaddr: register block of this Rhine
+ * @mask: VLAN CAM mask
+ *
+ * Mask sets VLAN filters active/inactive.
+ */
+static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
+{
+       iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
+       wmb();
+
+       /* write mask */
+       iowrite32(mask, ioaddr + CamMask);
+
+       /* disable CAMEN */
+       iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_init_cam_filter - initialize CAM filters
+ * @dev: network device
+ *
+ * Initialize (disable) hardware VLAN and multicast support on this
+ * Rhine.
+ */
+static void rhine_init_cam_filter(struct net_device *dev)
+{
+       struct rhine_private *rp = netdev_priv(dev);
+       void __iomem *ioaddr = rp->base;
+
+       /* Disable all CAMs */
+       rhine_set_vlan_cam_mask(ioaddr, 0);
+       rhine_set_cam_mask(ioaddr, 0);
+
+       /* disable hardware VLAN support */
+       BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
+       BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+}
+
+/**
+ * rhine_update_vcam - update VLAN CAM filters
+ * @rp: rhine_private data of this Rhine
+ *
+ * Update VLAN CAM filters to match configuration change.
+ */
+static void rhine_update_vcam(struct net_device *dev)
+{
+       struct rhine_private *rp = netdev_priv(dev);
+       void __iomem *ioaddr = rp->base;
+       u16 vid;
+       u32 vCAMmask = 0;       /* 32 vCAMs (6105M and better) */
+       unsigned int i = 0;
+
+       for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
+               rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
+               vCAMmask |= 1 << i;
+               if (++i >= VCAM_SIZE)
+                       break;
+       }
+       rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
+}
+
+static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+       struct rhine_private *rp = netdev_priv(dev);
+
+       spin_lock_irq(&rp->lock);
+       set_bit(vid, rp->active_vlans);
+       rhine_update_vcam(dev);
+       spin_unlock_irq(&rp->lock);
+}
+
+static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+       struct rhine_private *rp = netdev_priv(dev);
+
+       spin_lock_irq(&rp->lock);
+       clear_bit(vid, rp->active_vlans);
+       rhine_update_vcam(dev);
+       spin_unlock_irq(&rp->lock);
+}
+
 static void init_registers(struct net_device *dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
@@ -1061,6 +1300,9 @@ static void init_registers(struct net_device *dev)
 
        rhine_set_rx_mode(dev);
 
+       if (rp->pdev->revision >= VT6105M)
+               rhine_init_cam_filter(dev);
+
        napi_enable(&rp->napi);
 
        /* Enable interrupts by setting the interrupt mask. */
@@ -1276,16 +1518,28 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
        rp->tx_ring[entry].desc_length =
                cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
 
+       if (unlikely(vlan_tx_tag_present(skb))) {
+               rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
+               /* request tagging */
+               rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
+       }
+       else
+               rp->tx_ring[entry].tx_status = 0;
+
        /* lock eth irq */
        spin_lock_irqsave(&rp->lock, flags);
        wmb();
-       rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
+       rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
        wmb();
 
        rp->cur_tx++;
 
        /* Non-x86 Todo: explicitly flush cache lines here. */
 
+       if (vlan_tx_tag_present(skb))
+               /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
+               BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
+
        /* Wake the potentially-idle transmit channel */
        iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
               ioaddr + ChipCmd1);
@@ -1437,6 +1691,21 @@ static void rhine_tx(struct net_device *dev)
        spin_unlock(&rp->lock);
 }
 
+/**
+ * rhine_get_vlan_tci - extract TCI from Rx data buffer
+ * @skb: pointer to sk_buff
+ * @data_size: used data area of the buffer including CRC
+ *
+ * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
+ * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
+ * aligned following the CRC.
+ */
+static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
+{
+       u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
+       return ntohs(*(u16 *)trailer);
+}
+
 /* Process up to limit frames from receive ring */
 static int rhine_rx(struct net_device *dev, int limit)
 {
@@ -1454,6 +1723,7 @@ static int rhine_rx(struct net_device *dev, int limit)
        for (count = 0; count < limit; ++count) {
                struct rx_desc *desc = rp->rx_head_desc;
                u32 desc_status = le32_to_cpu(desc->rx_status);
+               u32 desc_length = le32_to_cpu(desc->desc_length);
                int data_size = desc_status >> 16;
 
                if (desc_status & DescOwn)
@@ -1498,6 +1768,7 @@ static int rhine_rx(struct net_device *dev, int limit)
                        struct sk_buff *skb = NULL;
                        /* Length should omit the CRC */
                        int pkt_len = data_size - 4;
+                       u16 vlan_tci = 0;
 
                        /* Check if the packet is long enough to accept without
                           copying to a minimally-sized skbuff. */
@@ -1532,7 +1803,14 @@ static int rhine_rx(struct net_device *dev, int limit)
                                                 rp->rx_buf_sz,
                                                 PCI_DMA_FROMDEVICE);
                        }
+
+                       if (unlikely(desc_length & DescTag))
+                               vlan_tci = rhine_get_vlan_tci(skb, data_size);
+
                        skb->protocol = eth_type_trans(skb, dev);
+
+                       if (unlikely(desc_length & DescTag))
+                               __vlan_hwaccel_put_tag(skb, vlan_tci);
                        netif_receive_skb(skb);
                        dev->stats.rx_bytes += pkt_len;
                        dev->stats.rx_packets++;
@@ -1596,6 +1874,11 @@ static void rhine_restart_tx(struct net_device *dev) {
 
                iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
                       ioaddr + ChipCmd);
+
+               if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
+                       /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
+                       BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
+
                iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
                       ioaddr + ChipCmd1);
                IOSYNC;
@@ -1631,7 +1914,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
        }
        if (intr_status & IntrTxUnderrun) {
                if (rp->tx_thresh < 0xE0)
-                       iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
+                       BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
                if (debug > 1)
                        printk(KERN_INFO "%s: Transmitter underrun, Tx "
                               "threshold now %2.2x.\n",
@@ -1646,7 +1929,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
            (intr_status & (IntrTxAborted |
             IntrTxUnderrun | IntrTxDescRace)) == 0) {
                if (rp->tx_thresh < 0xE0) {
-                       iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
+                       BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
                }
                if (debug > 1)
                        printk(KERN_INFO "%s: Unspecified error. Tx "
@@ -1688,7 +1971,8 @@ static void rhine_set_rx_mode(struct net_device *dev)
        struct rhine_private *rp = netdev_priv(dev);
        void __iomem *ioaddr = rp->base;
        u32 mc_filter[2];       /* Multicast hash filter */
-       u8 rx_mode;             /* Note: 0x02=accept runt, 0x01=accept errs */
+       u8 rx_mode = 0x0C;      /* Note: 0x02=accept runt, 0x01=accept errs */
+       struct netdev_hw_addr *ha;
 
        if (dev->flags & IFF_PROMISC) {         /* Set promiscuous. */
                rx_mode = 0x1C;
@@ -1699,10 +1983,18 @@ static void rhine_set_rx_mode(struct net_device *dev)
                /* Too many to match, or accept all multicasts. */
                iowrite32(0xffffffff, ioaddr + MulticastFilter0);
                iowrite32(0xffffffff, ioaddr + MulticastFilter1);
-               rx_mode = 0x0C;
+       } else if (rp->pdev->revision >= VT6105M) {
+               int i = 0;
+               u32 mCAMmask = 0;       /* 32 mCAMs (6105M and better) */
+               netdev_for_each_mc_addr(ha, dev) {
+                       if (i == MCAM_SIZE)
+                               break;
+                       rhine_set_cam(ioaddr, i, ha->addr);
+                       mCAMmask |= 1 << i;
+                       i++;
+               }
+               rhine_set_cam_mask(ioaddr, mCAMmask);
        } else {
-               struct netdev_hw_addr *ha;
-
                memset(mc_filter, 0, sizeof(mc_filter));
                netdev_for_each_mc_addr(ha, dev) {
                        int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
@@ -1711,9 +2003,15 @@ static void rhine_set_rx_mode(struct net_device *dev)
                }
                iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
                iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
-               rx_mode = 0x0C;
        }
-       iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
+       /* enable/disable VLAN receive filtering */
+       if (rp->pdev->revision >= VT6105M) {
+               if (dev->flags & IFF_PROMISC)
+                       BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+               else
+                       BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+       }
+       BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
 }
 
 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
@@ -1966,7 +2264,7 @@ static int rhine_resume(struct pci_dev *pdev)
        if (!netif_running(dev))
                return 0;
 
-        if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
+       if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
                printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
 
        ret = pci_set_power_state(pdev, PCI_D0);
index bc9bd10357060e429146f613554923e7e3bda01c..1dd3a21b3a4365110690f2ae3953185d361baba6 100644 (file)
@@ -1177,7 +1177,7 @@ static const struct ethtool_ops vxge_ethtool_ops = {
        .get_rx_csum            = vxge_get_rx_csum,
        .set_rx_csum            = vxge_set_rx_csum,
        .get_tx_csum            = ethtool_op_get_tx_csum,
-       .set_tx_csum            = ethtool_op_set_tx_hw_csum,
+       .set_tx_csum            = ethtool_op_set_tx_ipv6_csum,
        .get_sg                 = ethtool_op_get_sg,
        .set_sg                 = ethtool_op_set_sg,
        .get_tso                = ethtool_op_get_tso,
index 8a84152e320a19f8635a99767b2f95c8ddffbe02..4877b3b8a29e9f8ba92a374266bd9d8519449c0c 100644 (file)
@@ -3368,7 +3368,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
 
        ndev->features |= NETIF_F_SG;
 
-       ndev->features |= NETIF_F_HW_CSUM;
+       ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
        vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
                "%s : checksuming enabled", __func__);
 
index d81ad83978855ac4929a93f082139afd63f8cc41..cf05504d951130bf1f0f8460f9525ff8eaa0ab0c 100644 (file)
@@ -498,7 +498,6 @@ norbuff:
 static int x25_asy_close(struct net_device *dev)
 {
        struct x25_asy *sl = netdev_priv(dev);
-       int err;
 
        spin_lock(&sl->lock);
        if (sl->tty)
@@ -507,10 +506,6 @@ static int x25_asy_close(struct net_device *dev)
        netif_stop_queue(dev);
        sl->rcount = 0;
        sl->xleft  = 0;
-       err = lapb_unregister(dev);
-       if (err != LAPB_OK)
-               printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
-                       err);
        spin_unlock(&sl->lock);
        return 0;
 }
@@ -595,6 +590,7 @@ static int x25_asy_open_tty(struct tty_struct *tty)
 static void x25_asy_close_tty(struct tty_struct *tty)
 {
        struct x25_asy *sl = tty->disc_data;
+       int err;
 
        /* First make sure we're connected. */
        if (!sl || sl->magic != X25_ASY_MAGIC)
@@ -605,6 +601,11 @@ static void x25_asy_close_tty(struct tty_struct *tty)
                dev_close(sl->dev);
        rtnl_unlock();
 
+       err = lapb_unregister(sl->dev);
+       if (err != LAPB_OK)
+               printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
+                       err);
+
        tty->disc_data = NULL;
        sl->tty = NULL;
        x25_asy_free(sl);
index 872b1a3b21c68f4512dda985b8576566e16a4d72..73a8014cacb2e1d51c5dc63b99ed14be6901ca03 100644 (file)
@@ -62,6 +62,7 @@
 static int ar9003_hw_power_interpolate(int32_t x,
                                       int32_t *px, int32_t *py, u_int16_t np);
 
+
 static const struct ar9300_eeprom ar9300_default = {
        .eepromVersion = 2,
        .templateVersion = 2,
index 2de52d18152f1fb6b71ff631b576e5252059c4ed..de6c3086d232b64eab460764b3407edba021a328 100644 (file)
@@ -1000,21 +1000,6 @@ static int xemaclite_close(struct net_device *dev)
        return 0;
 }
 
-/**
- * xemaclite_get_stats - Get the stats for the net_device
- * @dev:       Pointer to the network device
- *
- * This function returns the address of the 'net_device_stats' structure for the
- * given network device. This structure holds usage statistics for the network
- * device.
- *
- * Return:     Pointer to the net_device_stats structure.
- */
-static struct net_device_stats *xemaclite_get_stats(struct net_device *dev)
-{
-       return &dev->stats;
-}
-
 /**
  * xemaclite_send - Transmit a frame
  * @orig_skb:  Pointer to the socket buffer to be transmitted
@@ -1285,7 +1270,6 @@ static struct net_device_ops xemaclite_netdev_ops = {
        .ndo_start_xmit         = xemaclite_send,
        .ndo_set_mac_address    = xemaclite_set_mac_address,
        .ndo_tx_timeout         = xemaclite_tx_timeout,
-       .ndo_get_stats          = xemaclite_get_stats,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = xemaclite_poll_controller,
 #endif
index 4b4da5b86ff99337fc0d81afeb48363918df65c3..f442668a1e52e137b347419f9938d386f638b16b 100644 (file)
@@ -129,8 +129,9 @@ static void handle_tx(struct vhost_net *net)
        size_t hdr_size;
        struct socket *sock;
 
-       sock = rcu_dereference_check(vq->private_data,
-                                    lockdep_is_held(&vq->mutex));
+       /* TODO: check that we are running from vhost_worker?
+        * Not sure it's worth it, it's straight-forward enough. */
+       sock = rcu_dereference_check(vq->private_data, 1);
        if (!sock)
                return;
 
index eed52bcd35d0d7e2e6e0e36e12ac773ada2dbb6d..010e2d87ed7568ea1019f57447f671666e56fbb3 100644 (file)
@@ -197,6 +197,21 @@ enum dccp_feature_numbers {
        DCCPF_MAX_CCID_SPECIFIC = 255,
 };
 
+/* DCCP socket control message types for cmsg */
+enum dccp_cmsg_type {
+       DCCP_SCM_PRIORITY = 1,
+       DCCP_SCM_QPOLICY_MAX = 0xFFFF,
+       /* ^-- Up to here reserved exclusively for qpolicy parameters */
+       DCCP_SCM_MAX
+};
+
+/* DCCP priorities for outgoing/queued packets */
+enum dccp_packet_dequeueing_policy {
+       DCCPQ_POLICY_SIMPLE,
+       DCCPQ_POLICY_PRIO,
+       DCCPQ_POLICY_MAX
+};
+
 /* DCCP socket options */
 #define DCCP_SOCKOPT_PACKET_SIZE       1 /* XXX deprecated, without effect */
 #define DCCP_SOCKOPT_SERVICE           2
@@ -210,6 +225,8 @@ enum dccp_feature_numbers {
 #define DCCP_SOCKOPT_CCID              13
 #define DCCP_SOCKOPT_TX_CCID           14
 #define DCCP_SOCKOPT_RX_CCID           15
+#define DCCP_SOCKOPT_QPOLICY_ID                16
+#define DCCP_SOCKOPT_QPOLICY_TXQLEN    17
 #define DCCP_SOCKOPT_CCID_RX_INFO      128
 #define DCCP_SOCKOPT_CCID_TX_INFO      192
 
@@ -458,6 +475,8 @@ struct dccp_ackvec;
  * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection)
  * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection)
  * @dccps_options_received - parsed set of retrieved options
+ * @dccps_qpolicy - TX dequeueing policy, one of %dccp_packet_dequeueing_policy
+ * @dccps_tx_qlen - maximum length of the TX queue
  * @dccps_role - role of this sock, one of %dccp_role
  * @dccps_hc_rx_insert_options - receiver wants to add options when acking
  * @dccps_hc_tx_insert_options - sender wants to add options when sending
@@ -500,6 +519,8 @@ struct dccp_sock {
        struct ccid                     *dccps_hc_rx_ccid;
        struct ccid                     *dccps_hc_tx_ccid;
        struct dccp_options_received    dccps_options_received;
+       __u8                            dccps_qpolicy;
+       __u32                           dccps_tx_qlen;
        enum dccp_role                  dccps_role:2;
        __u8                            dccps_hc_rx_insert_options:1;
        __u8                            dccps_hc_tx_insert_options:1;
index 447a775878fb94db30df9ac047327fcc8746079b..45266b75409a4718f5d294c6ddbc8e30e6984f66 100644 (file)
@@ -124,7 +124,9 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
 #define SKF_AD_MARK    20
 #define SKF_AD_QUEUE   24
 #define SKF_AD_HATYPE  28
-#define SKF_AD_MAX     32
+#define SKF_AD_RXHASH  32
+#define SKF_AD_CPU     36
+#define SKF_AD_MAX     40
 #define SKF_NET_OFF   (-0x100000)
 #define SKF_LL_OFF    (-0x200000)
 
@@ -146,7 +148,7 @@ struct sk_buff;
 struct sock;
 
 extern int sk_filter(struct sock *sk, struct sk_buff *skb);
-extern unsigned int sk_run_filter(struct sk_buff *skb,
+extern unsigned int sk_run_filter(const struct sk_buff *skb,
                                  const struct sock_filter *filter);
 extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 extern int sk_detach_filter(struct sock *sk);
index 2b86eaf11773fd80cb58d4b4e19b2640ae833dae..ae8fdc54e0c06941356de75606fc186f7cc63507 100644 (file)
@@ -222,7 +222,7 @@ static inline struct in_device *in_dev_get(const struct net_device *dev)
 
 static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
 {
-       return rcu_dereference_check(dev->ip_ptr, lockdep_rtnl_is_held());
+       return rtnl_dereference(dev->ip_ptr);
 }
 
 extern void in_dev_finish_destroy(struct in_device *idev);
index ced1159fa4f247e10becefdf6bd3ec3d6cbf873b..47cb09edec1a613821b734a7b4a0aa1def2ab0ae 100644 (file)
 
 /* jhash.h: Jenkins hash support.
  *
- * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
+ * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
  *
  * http://burtleburtle.net/bob/hash/
  *
  * These are the credits from Bob's sources:
  *
- * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
- * hash(), hash2(), hash3, and mix() are externally useful functions.
- * Routines to test the hash are included if SELF_TEST is defined.
- * You can use this free for any purpose.  It has no warranty.
+ * lookup3.c, by Bob Jenkins, May 2006, Public Domain.
  *
- * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ * These are functions for producing 32-bit hashes for hash table lookup.
+ * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
+ * are externally useful functions.  Routines to test the hash are included
+ * if SELF_TEST is defined.  You can use this free for any purpose.  It's in
+ * the public domain.  It has no warranty.
+ *
+ * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
  *
  * I've modified Bob's hash to be useful in the Linux kernel, and
- * any bugs present are surely my fault.  -DaveM
+ * any bugs present are my fault.
+ * Jozsef
  */
+#include <linux/bitops.h>
+#include <linux/unaligned/packed_struct.h>
+
+/* Best hash sizes are of power of two */
+#define jhash_size(n)   ((u32)1<<(n))
+/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */
+#define jhash_mask(n)   (jhash_size(n)-1)
+
+/* __jhash_mix -- mix 3 32-bit values reversibly. */
+#define __jhash_mix(a, b, c)                   \
+{                                              \
+       a -= c;  a ^= rol32(c, 4);  c += b;     \
+       b -= a;  b ^= rol32(a, 6);  a += c;     \
+       c -= b;  c ^= rol32(b, 8);  b += a;     \
+       a -= c;  a ^= rol32(c, 16); c += b;     \
+       b -= a;  b ^= rol32(a, 19); a += c;     \
+       c -= b;  c ^= rol32(b, 4);  b += a;     \
+}
 
-/* NOTE: Arguments are modified. */
-#define __jhash_mix(a, b, c) \
-{ \
-  a -= b; a -= c; a ^= (c>>13); \
-  b -= c; b -= a; b ^= (a<<8); \
-  c -= a; c -= b; c ^= (b>>13); \
-  a -= b; a -= c; a ^= (c>>12);  \
-  b -= c; b -= a; b ^= (a<<16); \
-  c -= a; c -= b; c ^= (b>>5); \
-  a -= b; a -= c; a ^= (c>>3);  \
-  b -= c; b -= a; b ^= (a<<10); \
-  c -= a; c -= b; c ^= (b>>15); \
+/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */
+#define __jhash_final(a, b, c)                 \
+{                                              \
+       c ^= b; c -= rol32(b, 14);              \
+       a ^= c; a -= rol32(c, 11);              \
+       b ^= a; b -= rol32(a, 25);              \
+       c ^= b; c -= rol32(b, 16);              \
+       a ^= c; a -= rol32(c, 4);               \
+       b ^= a; b -= rol32(a, 14);              \
+       c ^= b; c -= rol32(b, 24);              \
 }
 
-/* The golden ration: an arbitrary value */
-#define JHASH_GOLDEN_RATIO     0x9e3779b9
+/* An arbitrary initial parameter */
+#define JHASH_INITVAL          0xdeadbeef
 
-/* The most generic version, hashes an arbitrary sequence
- * of bytes.  No alignment or length assumptions are made about
- * the input key.
+/* jhash - hash an arbitrary key
+ * @k: sequence of bytes as key
+ * @length: the length of the key
+ * @initval: the previous hash, or an arbitray value
+ *
+ * The generic version, hashes an arbitrary sequence of bytes.
+ * No alignment or length assumptions are made about the input key.
+ *
+ * Returns the hash value of the key. The result depends on endianness.
  */
 static inline u32 jhash(const void *key, u32 length, u32 initval)
 {
-       u32 a, b, c, len;
+       u32 a, b, c;
        const u8 *k = key;
 
-       len = length;
-       a = b = JHASH_GOLDEN_RATIO;
-       c = initval;
-
-       while (len >= 12) {
-               a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24));
-               b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24));
-               c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24));
-
-               __jhash_mix(a,b,c);
+       /* Set up the internal state */
+       a = b = c = JHASH_INITVAL + length + initval;
 
+       /* All but the last block: affect some 32 bits of (a,b,c) */
+       while (length > 12) {
+               a += __get_unaligned_cpu32(k);
+               b += __get_unaligned_cpu32(k + 4);
+               c += __get_unaligned_cpu32(k + 8);
+               __jhash_mix(a, b, c);
+               length -= 12;
                k += 12;
-               len -= 12;
        }
-
-       c += length;
-       switch (len) {
-       case 11: c += ((u32)k[10]<<24);
-       case 10: c += ((u32)k[9]<<16);
-       case 9 : c += ((u32)k[8]<<8);
-       case 8 : b += ((u32)k[7]<<24);
-       case 7 : b += ((u32)k[6]<<16);
-       case 6 : b += ((u32)k[5]<<8);
-       case 5 : b += k[4];
-       case 4 : a += ((u32)k[3]<<24);
-       case 3 : a += ((u32)k[2]<<16);
-       case 2 : a += ((u32)k[1]<<8);
-       case 1 : a += k[0];
-       };
-
-       __jhash_mix(a,b,c);
+       /* Last block: affect all 32 bits of (c) */
+       /* All the case statements fall through */
+       switch (length) {
+       case 12: c += (u32)k[11]<<24;
+       case 11: c += (u32)k[10]<<16;
+       case 10: c += (u32)k[9]<<8;
+       case 9:  c += k[8];
+       case 8:  b += (u32)k[7]<<24;
+       case 7:  b += (u32)k[6]<<16;
+       case 6:  b += (u32)k[5]<<8;
+       case 5:  b += k[4];
+       case 4:  a += (u32)k[3]<<24;
+       case 3:  a += (u32)k[2]<<16;
+       case 2:  a += (u32)k[1]<<8;
+       case 1:  a += k[0];
+                __jhash_final(a, b, c);
+       case 0: /* Nothing left to add */
+               break;
+       }
 
        return c;
 }
 
-/* A special optimized version that handles 1 or more of u32s.
- * The length parameter here is the number of u32s in the key.
+/* jhash2 - hash an array of u32's
+ * @k: the key which must be an array of u32's
+ * @length: the number of u32's in the key
+ * @initval: the previous hash, or an arbitray value
+ *
+ * Returns the hash value of the key.
  */
 static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
 {
-       u32 a, b, c, len;
+       u32 a, b, c;
 
-       a = b = JHASH_GOLDEN_RATIO;
-       c = initval;
-       len = length;
+       /* Set up the internal state */
+       a = b = c = JHASH_INITVAL + (length<<2) + initval;
 
-       while (len >= 3) {
+       /* Handle most of the key */
+       while (length > 3) {
                a += k[0];
                b += k[1];
                c += k[2];
                __jhash_mix(a, b, c);
-               k += 3; len -= 3;
+               length -= 3;
+               k += 3;
        }
 
-       c += length * 4;
-
-       switch (len) {
-       case 2 : b += k[1];
-       case 1 : a += k[0];
-       };
-
-       __jhash_mix(a,b,c);
+       /* Handle the last 3 u32's: all the case statements fall through */
+       switch (length) {
+       case 3: c += k[2];
+       case 2: b += k[1];
+       case 1: a += k[0];
+               __jhash_final(a, b, c);
+       case 0: /* Nothing left to add */
+               break;
+       }
 
        return c;
 }
 
 
-/* A special ultra-optimized versions that knows they are hashing exactly
- * 3, 2 or 1 word(s).
- *
- * NOTE: In particular the "c += length; __jhash_mix(a,b,c);" normally
- *       done at the end is not done here.
- */
+/* jhash_3words - hash exactly 3, 2 or 1 word(s) */
 static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
 {
-       a += JHASH_GOLDEN_RATIO;
-       b += JHASH_GOLDEN_RATIO;
+       a += JHASH_INITVAL;
+       b += JHASH_INITVAL;
        c += initval;
 
-       __jhash_mix(a, b, c);
+       __jhash_final(a, b, c);
 
        return c;
 }
index 1ff81b51b656a13f32c4d8b5253f803593cd033b..dd3c34ebca9a67e05c2e59649da81486210e802c 100644 (file)
@@ -11,6 +11,7 @@
 #define MARVELL_PHY_ID_88E1118         0x01410e10
 #define MARVELL_PHY_ID_88E1121R                0x01410cb0
 #define MARVELL_PHY_ID_88E1145         0x01410cd0
+#define MARVELL_PHY_ID_88E1149R                0x01410e50
 #define MARVELL_PHY_ID_88E1240         0x01410e30
 #define MARVELL_PHY_ID_88E1318S                0x01410e90
 
index a9ac5dc26e3c88700462babf34a9106c52c6b799..d31bc3c9471712a1b17172194c3f132a86a12f72 100644 (file)
@@ -1360,7 +1360,8 @@ static inline struct net_device *first_net_device(struct net *net)
 
 extern int                     netdev_boot_setup_check(struct net_device *dev);
 extern unsigned long           netdev_boot_base(const char *prefix, int unit);
-extern struct net_device    *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
+extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
+                                             const char *hwaddr);
 extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
 extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
 extern void            dev_add_pack(struct packet_type *pt);
index ebb0c80ffd6ebb53b02fb2263e0a6ff6a634eb6a..12b2b18e50c1c321f208a83d4bbda61ce985450c 100644 (file)
@@ -230,6 +230,7 @@ enum
        LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */
        LINUX_MIB_TCPDEFERACCEPTDROP,
        LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */
+       LINUX_MIB_TCPTIMEWAITOVERFLOW,          /* TCPTimeWaitOverflow */
        __LINUX_MIB_MAX
 };
 
index 7ae27a473818ced2a93d84b997a47783504c3e1d..44842c8d38c04f949b9edfb2eb9bb04dc6bdb6d3 100644 (file)
@@ -97,6 +97,12 @@ struct driver_info {
 
 #define FLAG_LINK_INTR 0x0800          /* updates link (carrier) status */
 
+/*
+ * Indicates to usbnet, that USB driver accumulates multiple IP packets.
+ * Affects statistic (counters) and short packet handling.
+ */
+#define FLAG_MULTI_PACKET      0x1000
+
        /* init device ... can sleep, or cause probe() failure */
        int     (*bind)(struct usbnet *, struct usb_interface *);
 
index 90c9e2872f27214b4727ecf4f440e4975be1f1df..18e5c3f675804eb6fbc84bb4aa0781944807c260 100644 (file)
@@ -10,6 +10,7 @@ extern void unix_inflight(struct file *fp);
 extern void unix_notinflight(struct file *fp);
 extern void unix_gc(void);
 extern void wait_for_unix_gc(void);
+extern struct sock *unix_get_socket(struct file *filp);
 
 #define UNIX_HASH_SIZE 256
 
@@ -56,6 +57,7 @@ struct unix_sock {
        spinlock_t              lock;
        unsigned int            gc_candidate : 1;
        unsigned int            gc_maybe_cycle : 1;
+       unsigned char           recursion_level;
        struct socket_wq        peer_wq;
 };
 #define unix_sk(__sk) ((struct unix_sock *)__sk)
index a5bd72646d6510f18ff085674283f55386e118bc..85dee3a57b9b464afceb8c02baf1d9e55da918ad 100644 (file)
@@ -70,7 +70,7 @@ struct dst_entry {
 
        struct  dst_ops         *ops;
 
-       u32                     metrics[RTAX_MAX];
+       u32                     _metrics[RTAX_MAX];
 
 #ifdef CONFIG_NET_CLS_ROUTE
        __u32                   tclassid;
@@ -106,7 +106,27 @@ struct dst_entry {
 static inline u32
 dst_metric(const struct dst_entry *dst, int metric)
 {
-       return dst->metrics[metric-1];
+       return dst->_metrics[metric-1];
+}
+
+static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
+{
+       dst->_metrics[metric-1] = val;
+}
+
+static inline void dst_import_metrics(struct dst_entry *dst, const u32 *src_metrics)
+{
+       memcpy(dst->_metrics, src_metrics, RTAX_MAX * sizeof(u32));
+}
+
+static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
+{
+       dst_import_metrics(dest, src->_metrics);
+}
+
+static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
+{
+       return dst->_metrics;
 }
 
 static inline u32
@@ -134,7 +154,7 @@ static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metr
 static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric,
                                      unsigned long rtt)
 {
-       dst->metrics[metric-1] = jiffies_to_msecs(rtt);
+       dst_metric_set(dst, metric, jiffies_to_msecs(rtt));
 }
 
 static inline u32
index 8945f9fb192ab536d0e27f0b9617046f5086b9bc..8181498fa96ca5334cbff5f10a36a41abd8882dd 100644 (file)
@@ -116,8 +116,9 @@ struct inet_sock {
        struct ipv6_pinfo       *pinet6;
 #endif
        /* Socket demultiplex comparisons on incoming packets. */
-       __be32                  inet_daddr;
-       __be32                  inet_rcv_saddr;
+#define inet_daddr             sk.__sk_common.skc_daddr
+#define inet_rcv_saddr         sk.__sk_common.skc_rcv_saddr
+
        __be16                  inet_dport;
        __u16                   inet_num;
        __be32                  inet_saddr;
index a066fdd50da6c5041a12bb4983eeb4b9c8b9eee1..17404b5388a75302c4951d5f469fd96814ad7709 100644 (file)
@@ -88,12 +88,6 @@ extern void inet_twdr_hangman(unsigned long data);
 extern void inet_twdr_twkill_work(struct work_struct *work);
 extern void inet_twdr_twcal_tick(unsigned long data);
 
-#if (BITS_PER_LONG == 64)
-#define INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES 8
-#else
-#define INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES 4
-#endif
-
 struct inet_bind_bucket;
 
 /*
@@ -117,15 +111,15 @@ struct inet_timewait_sock {
 #define tw_hash                        __tw_common.skc_hash
 #define tw_prot                        __tw_common.skc_prot
 #define tw_net                 __tw_common.skc_net
+#define tw_daddr               __tw_common.skc_daddr
+#define tw_rcv_saddr           __tw_common.skc_rcv_saddr
        int                     tw_timeout;
        volatile unsigned char  tw_substate;
-       /* 3 bits hole, try to pack */
        unsigned char           tw_rcv_wscale;
+
        /* Socket demultiplex comparisons on incoming packets. */
-       /* these five are in inet_sock */
+       /* these three are in inet_sock */
        __be16                  tw_sport;
-       __be32                  tw_daddr __attribute__((aligned(INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES)));
-       __be32                  tw_rcv_saddr;
        __be16                  tw_dport;
        __u16                   tw_num;
        kmemcheck_bitfield_begin(flags);
@@ -191,10 +185,10 @@ static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
        return (struct inet_timewait_sock *)sk;
 }
 
-static inline __be32 inet_rcv_saddr(const struct sock *sk)
+static inline __be32 sk_rcv_saddr(const struct sock *sk)
 {
-       return likely(sk->sk_state != TCP_TIME_WAIT) ?
-               inet_sk(sk)->inet_rcv_saddr : inet_twsk(sk)->tw_rcv_saddr;
+/* both inet_sk() and inet_twsk() store rcv_saddr in skc_rcv_saddr */
+       return sk->__sk_common.skc_rcv_saddr;
 }
 
 extern void inet_twsk_put(struct inet_timewait_sock *tw);
index 717cfbf649dfb5ef092867756a70ebf22d122484..82e86034702f020915af395e4f57ca8a6609e3e1 100644 (file)
@@ -105,10 +105,8 @@ struct net;
 
 /**
  *     struct sock_common - minimal network layer representation of sockets
- *     @skc_node: main hash linkage for various protocol lookup tables
- *     @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
- *     @skc_refcnt: reference count
- *     @skc_tx_queue_mapping: tx queue number for this connection
+ *     @skc_daddr: Foreign IPv4 addr
+ *     @skc_rcv_saddr: Bound local IPv4 addr
  *     @skc_hash: hash value used with various protocol lookup tables
  *     @skc_u16hashes: two u16 hash values used by UDP lookup tables
  *     @skc_family: network address family
@@ -119,20 +117,20 @@ struct net;
  *     @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
  *     @skc_prot: protocol handlers inside a network family
  *     @skc_net: reference to the network namespace of this socket
+ *     @skc_node: main hash linkage for various protocol lookup tables
+ *     @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
+ *     @skc_tx_queue_mapping: tx queue number for this connection
+ *     @skc_refcnt: reference count
  *
  *     This is the minimal network layer representation of sockets, the header
  *     for struct sock and struct inet_timewait_sock.
  */
 struct sock_common {
-       /*
-        * first fields are not copied in sock_copy()
+       /* skc_daddr and skc_rcv_saddr must be grouped :
+        * cf INET_MATCH() and INET_TW_MATCH()
         */
-       union {
-               struct hlist_node       skc_node;
-               struct hlist_nulls_node skc_nulls_node;
-       };
-       atomic_t                skc_refcnt;
-       int                     skc_tx_queue_mapping;
+       __be32                  skc_daddr;
+       __be32                  skc_rcv_saddr;
 
        union  {
                unsigned int    skc_hash;
@@ -150,6 +148,18 @@ struct sock_common {
 #ifdef CONFIG_NET_NS
        struct net              *skc_net;
 #endif
+       /*
+        * fields between dontcopy_begin/dontcopy_end
+        * are not copied in sock_copy()
+        */
+       int                     skc_dontcopy_begin[0];
+       union {
+               struct hlist_node       skc_node;
+               struct hlist_nulls_node skc_nulls_node;
+       };
+       int                     skc_tx_queue_mapping;
+       atomic_t                skc_refcnt;
+       int                     skc_dontcopy_end[0];
 };
 
 /**
@@ -232,7 +242,8 @@ struct sock {
 #define sk_refcnt              __sk_common.skc_refcnt
 #define sk_tx_queue_mapping    __sk_common.skc_tx_queue_mapping
 
-#define sk_copy_start          __sk_common.skc_hash
+#define sk_dontcopy_begin      __sk_common.skc_dontcopy_begin
+#define sk_dontcopy_end                __sk_common.skc_dontcopy_end
 #define sk_hash                        __sk_common.skc_hash
 #define sk_family              __sk_common.skc_family
 #define sk_state               __sk_common.skc_state
@@ -1159,6 +1170,8 @@ extern void sk_common_release(struct sock *sk);
 /* Initialise core socket variables */
 extern void sock_init_data(struct socket *sock, struct sock *sk);
 
+extern void sk_filter_release_rcu(struct rcu_head *rcu);
+
 /**
  *     sk_filter_release - release a socket filter
  *     @fp: filter to remove
@@ -1169,7 +1182,7 @@ extern void sock_init_data(struct socket *sock, struct sock *sk);
 static inline void sk_filter_release(struct sk_filter *fp)
 {
        if (atomic_dec_and_test(&fp->refcnt))
-               kfree(fp);
+               call_rcu_bh(&fp->rcu, sk_filter_release_rcu);
 }
 
 static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
index 45c15f491401c017f8fa423b465f3c910de08685..798beac7f100271f0ca4607fe4b3b1fee3fc1d34 100644 (file)
 
 #include <linux/module.h>
 #include <linux/errno.h>
+#include <linux/kernel.h>
 #include <linux/uaccess.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/stddef.h>
 #include <linux/types.h>
 #include <net/9p/9p.h>
 #include <net/9p/client.h>
 #include "protocol.h"
 
-#ifndef MIN
-#define MIN(a, b) (((a) < (b)) ? (a) : (b))
-#endif
-
-#ifndef MAX
-#define MAX(a, b) (((a) > (b)) ? (a) : (b))
-#endif
-
-#ifndef offset_of
-#define offset_of(type, memb) \
-       ((unsigned long)(&((type *)0)->memb))
-#endif
-#ifndef container_of
-#define container_of(obj, type, memb) \
-       ((type *)(((char *)obj) - offset_of(type, memb)))
-#endif
-
 static int
 p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
 
@@ -104,7 +89,7 @@ EXPORT_SYMBOL(p9stat_free);
 
 static size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size)
 {
-       size_t len = MIN(pdu->size - pdu->offset, size);
+       size_t len = min(pdu->size - pdu->offset, size);
        memcpy(data, &pdu->sdata[pdu->offset], len);
        pdu->offset += len;
        return size - len;
@@ -112,7 +97,7 @@ static size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size)
 
 static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size)
 {
-       size_t len = MIN(pdu->capacity - pdu->size, size);
+       size_t len = min(pdu->capacity - pdu->size, size);
        memcpy(&pdu->sdata[pdu->size], data, len);
        pdu->size += len;
        return size - len;
@@ -121,7 +106,7 @@ static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size)
 static size_t
 pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
 {
-       size_t len = MIN(pdu->capacity - pdu->size, size);
+       size_t len = min(pdu->capacity - pdu->size, size);
        if (copy_from_user(&pdu->sdata[pdu->size], udata, len))
                len = 0;
 
@@ -201,7 +186,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
                                if (errcode)
                                        break;
 
-                               size = MAX(len, 0);
+                               size = max_t(int16_t, len, 0);
 
                                *sptr = kmalloc(size + 1, GFP_KERNEL);
                                if (*sptr == NULL) {
@@ -256,8 +241,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
                                    p9pdu_readf(pdu, proto_version, "d", count);
                                if (!errcode) {
                                        *count =
-                                           MIN(*count,
-                                               pdu->size - pdu->offset);
+                                           min_t(int32_t, *count,
+                                                 pdu->size - pdu->offset);
                                        *data = &pdu->sdata[pdu->offset];
                                }
                        }
@@ -421,7 +406,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
                                const char *sptr = va_arg(ap, const char *);
                                int16_t len = 0;
                                if (sptr)
-                                       len = MIN(strlen(sptr), USHRT_MAX);
+                                       len = min_t(int16_t, strlen(sptr), USHRT_MAX);
 
                                errcode = p9pdu_writef(pdu, proto_version,
                                                                "w", len);
index 17cb0b633576eb96582b2b0ff22030bef02c3830..556443566e9c417bbb07da73d0cafd750d11de73 100644 (file)
@@ -141,7 +141,7 @@ static int br_change_mtu(struct net_device *dev, int new_mtu)
 
 #ifdef CONFIG_BRIDGE_NETFILTER
        /* remember the MTU in the rtable for PMTU */
-       br->fake_rtable.dst.metrics[RTAX_MTU - 1] = new_mtu;
+       dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
 #endif
 
        return 0;
index 6e13920939113b07e7ae1bbed452323e261865d2..16f5c333596a4d8076cfa994cc91ba03c9690f47 100644 (file)
@@ -124,7 +124,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
        atomic_set(&rt->dst.__refcnt, 1);
        rt->dst.dev = br->dev;
        rt->dst.path = &rt->dst;
-       rt->dst.metrics[RTAX_MTU - 1] = 1500;
+       dst_metric_set(&rt->dst, RTAX_MTU, 1500);
        rt->dst.flags   = DST_NOXFRM;
        rt->dst.ops = &fake_dst_ops;
 }
index 153bdec408359938c26e27bc4ce560387361f1b0..e87ef435e11b862351d1863a3d3682539e8fbc37 100644 (file)
@@ -1,9 +1,6 @@
 #
 # Makefile for CEPH filesystem.
 #
-
-ifneq ($(KERNELRELEASE),)
-
 obj-$(CONFIG_CEPH_LIB) += libceph.o
 
 libceph-y := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
@@ -16,22 +13,3 @@ libceph-y := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
        ceph_fs.o ceph_strings.o ceph_hash.o \
        pagevec.o
 
-else
-#Otherwise we were called directly from the command
-# line; invoke the kernel build system.
-
-KERNELDIR ?= /lib/modules/$(shell uname -r)/build
-PWD := $(shell pwd)
-
-default: all
-
-all:
-       $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules
-
-modules_install:
-       $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules_install
-
-clean:
-       $(MAKE) -C $(KERNELDIR) M=$(PWD) clean
-
-endif
index 53d8abfa25d5ede4b487c88a5341b376c4921c26..bf3e6a13c215cd61cd7372e85ba19e7fc7974e9a 100644 (file)
@@ -19,7 +19,7 @@ struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
        if (b->vec.iov_base) {
                b->is_vmalloc = false;
        } else {
-               b->vec.iov_base = __vmalloc(len, gfp, PAGE_KERNEL);
+               b->vec.iov_base = __vmalloc(len, gfp | __GFP_HIGHMEM, PAGE_KERNEL);
                if (!b->vec.iov_base) {
                        kfree(b);
                        return NULL;
index cd1e039c87559a236db754e738917b5e34666aaa..18ac112ea7ae7abeaef244b33752d1d3d9d2e460 100644 (file)
@@ -177,7 +177,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
                 * interrupt level will suddenly eat the receive_queue.
                 *
                 * Look at current nfs client by the way...
-                * However, this function was corrent in any case. 8)
+                * However, this function was correct in any case. 8)
                 */
                unsigned long cpu_flags;
 
index cd2437495428dd2316af7e55e4e41cfdf12b6132..d28b3a023bb2101f4884a03957e6856d1c2efe05 100644 (file)
@@ -743,34 +743,31 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex)
 EXPORT_SYMBOL(dev_get_by_index);
 
 /**
- *     dev_getbyhwaddr - find a device by its hardware address
+ *     dev_getbyhwaddr_rcu - find a device by its hardware address
  *     @net: the applicable net namespace
  *     @type: media type of device
  *     @ha: hardware address
  *
  *     Search for an interface by MAC address. Returns NULL if the device
- *     is not found or a pointer to the device. The caller must hold the
- *     rtnl semaphore. The returned device has not had its ref count increased
+ *     is not found or a pointer to the device. The caller must hold RCU
+ *     The returned device has not had its ref count increased
  *     and the caller must therefore be careful about locking
  *
- *     BUGS:
- *     If the API was consistent this would be __dev_get_by_hwaddr
  */
 
-struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
+struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
+                                      const char *ha)
 {
        struct net_device *dev;
 
-       ASSERT_RTNL();
-
-       for_each_netdev(net, dev)
+       for_each_netdev_rcu(net, dev)
                if (dev->type == type &&
                    !memcmp(dev->dev_addr, ha, dev->addr_len))
                        return dev;
 
        return NULL;
 }
-EXPORT_SYMBOL(dev_getbyhwaddr);
+EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
 
 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
 {
@@ -2025,9 +2022,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
        int rc = NETDEV_TX_OK;
 
        if (likely(!skb->next)) {
-               if (!list_empty(&ptype_all))
-                       dev_queue_xmit_nit(skb, dev);
-
                /*
                 * If device doesnt need skb->dst, release it right now while
                 * its hot in this cpu cache
@@ -2035,6 +2029,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
                        skb_dst_drop(skb);
 
+               if (!list_empty(&ptype_all))
+                       dev_queue_xmit_nit(skb, dev);
+
                skb_orphan_try(skb);
 
                if (vlan_tx_tag_present(skb) &&
@@ -5041,10 +5038,13 @@ unsigned long netdev_fix_features(unsigned long features, const char *name)
        }
 
        if (features & NETIF_F_UFO) {
-               if (!(features & NETIF_F_GEN_CSUM)) {
+               /* maybe split UFO into V4 and V6? */
+               if (!((features & NETIF_F_GEN_CSUM) ||
+                   (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
+                           == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
                        if (name)
                                printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
-                                      "since no NETIF_F_HW_CSUM feature.\n",
+                                      "since no checksum offload features.\n",
                                       name);
                        features &= ~NETIF_F_UFO;
                }
@@ -5109,11 +5109,21 @@ static int netif_alloc_rx_queues(struct net_device *dev)
 }
 #endif
 
+static void netdev_init_one_queue(struct net_device *dev,
+                                 struct netdev_queue *queue, void *_unused)
+{
+       /* Initialize queue lock */
+       spin_lock_init(&queue->_xmit_lock);
+       netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
+       queue->xmit_lock_owner = -1;
+       netdev_queue_numa_node_write(queue, -1);
+       queue->dev = dev;
+}
+
 static int netif_alloc_netdev_queues(struct net_device *dev)
 {
        unsigned int count = dev->num_tx_queues;
        struct netdev_queue *tx;
-       int i;
 
        BUG_ON(count < 1);
 
@@ -5125,27 +5135,10 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
        }
        dev->_tx = tx;
 
-       for (i = 0; i < count; i++) {
-               netdev_queue_numa_node_write(&tx[i], -1);
-               tx[i].dev = dev;
-       }
-       return 0;
-}
-
-static void netdev_init_one_queue(struct net_device *dev,
-                                 struct netdev_queue *queue,
-                                 void *_unused)
-{
-       /* Initialize queue lock */
-       spin_lock_init(&queue->_xmit_lock);
-       netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
-       queue->xmit_lock_owner = -1;
-}
-
-static void netdev_init_queues(struct net_device *dev)
-{
        netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
        spin_lock_init(&dev->tx_global_lock);
+
+       return 0;
 }
 
 /**
@@ -5184,8 +5177,6 @@ int register_netdevice(struct net_device *dev)
 
        dev->iflink = -1;
 
-       netdev_init_queues(dev);
-
        /* Init, if this function is available */
        if (dev->netdev_ops->ndo_init) {
                ret = dev->netdev_ops->ndo_init(dev);
index 956a9f4971cbc2e7f81c4ab5d8c8a8b909d00bf1..d5bc288188830970a4e0439f7e26b4b131082f00 100644 (file)
@@ -1171,7 +1171,9 @@ static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr)
                return -EFAULT;
        if (edata.data && !(dev->features & NETIF_F_SG))
                return -EINVAL;
-       if (edata.data && !(dev->features & NETIF_F_HW_CSUM))
+       if (edata.data && !((dev->features & NETIF_F_GEN_CSUM) ||
+               (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
+                       == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)))
                return -EINVAL;
        return dev->ethtool_ops->set_ufo(dev, edata.data);
 }
index a44d27f9f0f0aa4975d95b8e8c56bebb3f0fda6b..e8a6ac411ffb3a5e6ead2fb3e20daa21143af23a 100644 (file)
@@ -88,7 +88,7 @@ enum {
 };
 
 /* No hurry in this branch */
-static void *__load_pointer(struct sk_buff *skb, int k)
+static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
 {
        u8 *ptr = NULL;
 
@@ -97,12 +97,12 @@ static void *__load_pointer(struct sk_buff *skb, int k)
        else if (k >= SKF_LL_OFF)
                ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
 
-       if (ptr >= skb->head && ptr < skb_tail_pointer(skb))
+       if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
                return ptr;
        return NULL;
 }
 
-static inline void *load_pointer(struct sk_buff *skb, int k,
+static inline void *load_pointer(const struct sk_buff *skb, int k,
                                 unsigned int size, void *buffer)
 {
        if (k >= 0)
@@ -110,7 +110,7 @@ static inline void *load_pointer(struct sk_buff *skb, int k,
        else {
                if (k >= SKF_AD_OFF)
                        return NULL;
-               return __load_pointer(skb, k);
+               return __load_pointer(skb, k, size);
        }
 }
 
@@ -160,17 +160,16 @@ EXPORT_SYMBOL(sk_filter);
  * and last instruction guaranteed to be a RET, we dont need to check
  * flen. (We used to pass to this function the length of filter)
  */
-unsigned int sk_run_filter(struct sk_buff *skb, const struct sock_filter *fentry)
+unsigned int sk_run_filter(const struct sk_buff *skb,
+                          const struct sock_filter *fentry)
 {
        void *ptr;
        u32 A = 0;                      /* Accumulator */
        u32 X = 0;                      /* Index Register */
        u32 mem[BPF_MEMWORDS];          /* Scratch Memory Store */
-       unsigned long memvalid = 0;
        u32 tmp;
        int k;
 
-       BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
        /*
         * Process array of filter instructions.
         */
@@ -318,12 +317,10 @@ load_b:
                        X = K;
                        continue;
                case BPF_S_LD_MEM:
-                       A = (memvalid & (1UL << K)) ?
-                               mem[K] : 0;
+                       A = mem[K];
                        continue;
                case BPF_S_LDX_MEM:
-                       X = (memvalid & (1UL << K)) ?
-                               mem[K] : 0;
+                       X = mem[K];
                        continue;
                case BPF_S_MISC_TAX:
                        X = A;
@@ -336,11 +333,9 @@ load_b:
                case BPF_S_RET_A:
                        return A;
                case BPF_S_ST:
-                       memvalid |= 1UL << K;
                        mem[K] = A;
                        continue;
                case BPF_S_STX:
-                       memvalid |= 1UL << K;
                        mem[K] = X;
                        continue;
                default:
@@ -375,6 +370,12 @@ load_b:
                                return 0;
                        A = skb->dev->type;
                        continue;
+               case SKF_AD_RXHASH:
+                       A = skb->rxhash;
+                       continue;
+               case SKF_AD_CPU:
+                       A = raw_smp_processor_id();
+                       continue;
                case SKF_AD_NLATTR: {
                        struct nlattr *nla;
 
@@ -419,6 +420,66 @@ load_b:
 }
 EXPORT_SYMBOL(sk_run_filter);
 
+/*
+ * Security :
+ * A BPF program is able to use 16 cells of memory to store intermediate
+ * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
+ * As we dont want to clear mem[] array for each packet going through
+ * sk_run_filter(), we check that filter loaded by user never try to read
+ * a cell if not previously written, and we check all branches to be sure
+ * a malicious user doesnt try to abuse us.
+ */
+static int check_load_and_stores(struct sock_filter *filter, int flen)
+{
+       u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
+       int pc, ret = 0;
+
+       BUILD_BUG_ON(BPF_MEMWORDS > 16);
+       masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
+       if (!masks)
+               return -ENOMEM;
+       memset(masks, 0xff, flen * sizeof(*masks));
+
+       for (pc = 0; pc < flen; pc++) {
+               memvalid &= masks[pc];
+
+               switch (filter[pc].code) {
+               case BPF_S_ST:
+               case BPF_S_STX:
+                       memvalid |= (1 << filter[pc].k);
+                       break;
+               case BPF_S_LD_MEM:
+               case BPF_S_LDX_MEM:
+                       if (!(memvalid & (1 << filter[pc].k))) {
+                               ret = -EINVAL;
+                               goto error;
+                       }
+                       break;
+               case BPF_S_JMP_JA:
+                       /* a jump must set masks on target */
+                       masks[pc + 1 + filter[pc].k] &= memvalid;
+                       memvalid = ~0;
+                       break;
+               case BPF_S_JMP_JEQ_K:
+               case BPF_S_JMP_JEQ_X:
+               case BPF_S_JMP_JGE_K:
+               case BPF_S_JMP_JGE_X:
+               case BPF_S_JMP_JGT_K:
+               case BPF_S_JMP_JGT_X:
+               case BPF_S_JMP_JSET_X:
+               case BPF_S_JMP_JSET_K:
+                       /* a jump must set masks on targets */
+                       masks[pc + 1 + filter[pc].jt] &= memvalid;
+                       masks[pc + 1 + filter[pc].jf] &= memvalid;
+                       memvalid = ~0;
+                       break;
+               }
+       }
+error:
+       kfree(masks);
+       return ret;
+}
+
 /**
  *     sk_chk_filter - verify socket filter code
  *     @filter: filter to verify
@@ -547,30 +608,23 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
        switch (filter[flen - 1].code) {
        case BPF_S_RET_K:
        case BPF_S_RET_A:
-               return 0;
+               return check_load_and_stores(filter, flen);
        }
        return -EINVAL;
 }
 EXPORT_SYMBOL(sk_chk_filter);
 
 /**
- *     sk_filter_rcu_release - Release a socket filter by rcu_head
+ *     sk_filter_release_rcu - Release a socket filter by rcu_head
  *     @rcu: rcu_head that contains the sk_filter to free
  */
-static void sk_filter_rcu_release(struct rcu_head *rcu)
+void sk_filter_release_rcu(struct rcu_head *rcu)
 {
        struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
 
-       sk_filter_release(fp);
-}
-
-static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp)
-{
-       unsigned int size = sk_filter_len(fp);
-
-       atomic_sub(size, &sk->sk_omem_alloc);
-       call_rcu_bh(&fp->rcu, sk_filter_rcu_release);
+       kfree(fp);
 }
+EXPORT_SYMBOL(sk_filter_release_rcu);
 
 /**
  *     sk_attach_filter - attach a socket filter
@@ -614,7 +668,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
        rcu_assign_pointer(sk->sk_filter, fp);
 
        if (old_fp)
-               sk_filter_delayed_uncharge(sk, old_fp);
+               sk_filter_uncharge(sk, old_fp);
        return 0;
 }
 EXPORT_SYMBOL_GPL(sk_attach_filter);
@@ -628,7 +682,7 @@ int sk_detach_filter(struct sock *sk)
                                           sock_owned_by_user(sk));
        if (filter) {
                rcu_assign_pointer(sk->sk_filter, NULL);
-               sk_filter_delayed_uncharge(sk, filter);
+               sk_filter_uncharge(sk, filter);
                ret = 0;
        }
        return ret;
index 41d99435f62d3004fd7ca14524dff45a32fcf7a3..182236b2510aeb16e6e0e2026264d683edf93e7e 100644 (file)
@@ -46,9 +46,7 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
        nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
        lopt_size += nr_table_entries * sizeof(struct request_sock *);
        if (lopt_size > PAGE_SIZE)
-               lopt = __vmalloc(lopt_size,
-                       GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
-                       PAGE_KERNEL);
+               lopt = vzalloc(lopt_size);
        else
                lopt = kzalloc(lopt_size, GFP_KERNEL);
        if (lopt == NULL)
index fb6080111461546953b34979db77f5e2d516e060..bcdb6ff6e6214cc0204fbc75dc8e222a87db93e0 100644 (file)
@@ -992,17 +992,18 @@ static inline void sock_lock_init(struct sock *sk)
 /*
  * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
  * even temporarly, because of RCU lookups. sk_node should also be left as is.
+ * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
  */
 static void sock_copy(struct sock *nsk, const struct sock *osk)
 {
 #ifdef CONFIG_SECURITY_NETWORK
        void *sptr = nsk->sk_security;
 #endif
-       BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
-                    sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) +
-                    sizeof(osk->sk_tx_queue_mapping));
-       memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
-              osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
+       memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
+
+       memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
+              osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
+
 #ifdef CONFIG_SECURITY_NETWORK
        nsk->sk_security = sptr;
        security_sk_clone(osk, nsk);
index dac7ed687f609c83e7be6681d871d9444658006e..b124d28ff1c886f969789aab5330d03b16222e0f 100644 (file)
@@ -26,7 +26,7 @@ static struct sock_filter ptp_filter[] = {
        PTP_FILTER
 };
 
-static unsigned int classify(struct sk_buff *skb)
+static unsigned int classify(const struct sk_buff *skb)
 {
        if (likely(skb->dev &&
                   skb->dev->phydev &&
index 2991efcc8deab0137acd4454ea09780a52d79a1a..5c8362b037ed12accc21fd85f58396f75fb2d870 100644 (file)
@@ -1,7 +1,7 @@
 obj-$(CONFIG_IP_DCCP) += dccp.o dccp_ipv4.o
 
-dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o
-
+dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o \
+         qpolicy.o
 #
 # CCID algorithms to be used by dccp.ko
 #
index 19fafd597465fac2ac7e40ff77b862a91884ed6b..48ad5d9da7cbe036dfbb62e75220e3c5f19c39f9 100644 (file)
@@ -243,6 +243,19 @@ extern void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
 extern void dccp_send_sync(struct sock *sk, const u64 seq,
                           const enum dccp_pkt_type pkt_type);
 
+/*
+ * TX Packet Dequeueing Interface
+ */
+extern void            dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb);
+extern bool            dccp_qpolicy_full(struct sock *sk);
+extern void            dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb);
+extern struct sk_buff  *dccp_qpolicy_top(struct sock *sk);
+extern struct sk_buff  *dccp_qpolicy_pop(struct sock *sk);
+extern bool            dccp_qpolicy_param_ok(struct sock *sk, __be32 param);
+
+/*
+ * TX Packet Output and TX Timers
+ */
 extern void   dccp_write_xmit(struct sock *sk);
 extern void   dccp_write_space(struct sock *sk);
 extern void   dccp_flush_write_queue(struct sock *sk, long *time_budget);
index 7d230d14ce22307b80cd4cf53e41e6563fab7063..15af247ea007a9e77f484a21c3cfe10d6796ff16 100644 (file)
@@ -241,7 +241,8 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
                dccp_update_gsr(sk, seqno);
 
                if (dh->dccph_type != DCCP_PKT_SYNC &&
-                   (ackno != DCCP_PKT_WITHOUT_ACK_SEQ))
+                   ackno != DCCP_PKT_WITHOUT_ACK_SEQ &&
+                   after48(ackno, dp->dccps_gar))
                        dp->dccps_gar = ackno;
        } else {
                unsigned long now = jiffies;
index d96dd9d362ae1162e7e82e1f22fc7338904b9a1c..784d302105439434ef7182e9a4f7b7d167a44baa 100644 (file)
@@ -242,7 +242,7 @@ static void dccp_xmit_packet(struct sock *sk)
 {
        int err, len;
        struct dccp_sock *dp = dccp_sk(sk);
-       struct sk_buff *skb = skb_dequeue(&sk->sk_write_queue);
+       struct sk_buff *skb = dccp_qpolicy_pop(sk);
 
        if (unlikely(skb == NULL))
                return;
@@ -345,7 +345,7 @@ void dccp_write_xmit(struct sock *sk)
        struct dccp_sock *dp = dccp_sk(sk);
        struct sk_buff *skb;
 
-       while ((skb = skb_peek(&sk->sk_write_queue))) {
+       while ((skb = dccp_qpolicy_top(sk))) {
                int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
 
                switch (ccid_packet_dequeue_eval(rc)) {
@@ -359,8 +359,7 @@ void dccp_write_xmit(struct sock *sk)
                        dccp_xmit_packet(sk);
                        break;
                case CCID_PACKET_ERR:
-                       skb_dequeue(&sk->sk_write_queue);
-                       kfree_skb(skb);
+                       dccp_qpolicy_drop(sk, skb);
                        dccp_pr_debug("packet discarded due to err=%d\n", rc);
                }
        }
index ef343d53fcea22edf7d7bb2be677f349ec63f547..152975d942d9a59a7c26756d0f22419b9e298425 100644 (file)
@@ -185,6 +185,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
        dp->dccps_role          = DCCP_ROLE_UNDEFINED;
        dp->dccps_service       = DCCP_SERVICE_CODE_IS_ABSENT;
        dp->dccps_l_ack_ratio   = dp->dccps_r_ack_ratio = 1;
+       dp->dccps_tx_qlen       = sysctl_dccp_tx_qlen;
 
        dccp_init_xmit_timers(sk);
 
@@ -532,6 +533,20 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
        case DCCP_SOCKOPT_RECV_CSCOV:
                err = dccp_setsockopt_cscov(sk, val, true);
                break;
+       case DCCP_SOCKOPT_QPOLICY_ID:
+               if (sk->sk_state != DCCP_CLOSED)
+                       err = -EISCONN;
+               else if (val < 0 || val >= DCCPQ_POLICY_MAX)
+                       err = -EINVAL;
+               else
+                       dp->dccps_qpolicy = val;
+               break;
+       case DCCP_SOCKOPT_QPOLICY_TXQLEN:
+               if (val < 0)
+                       err = -EINVAL;
+               else
+                       dp->dccps_tx_qlen = val;
+               break;
        default:
                err = -ENOPROTOOPT;
                break;
@@ -639,6 +654,12 @@ static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
        case DCCP_SOCKOPT_RECV_CSCOV:
                val = dp->dccps_pcrlen;
                break;
+       case DCCP_SOCKOPT_QPOLICY_ID:
+               val = dp->dccps_qpolicy;
+               break;
+       case DCCP_SOCKOPT_QPOLICY_TXQLEN:
+               val = dp->dccps_tx_qlen;
+               break;
        case 128 ... 191:
                return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
                                             len, (u32 __user *)optval, optlen);
@@ -681,6 +702,47 @@ int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
 EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
 #endif
 
+static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
+{
+       struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg);
+
+       /*
+        * Assign an (opaque) qpolicy priority value to skb->priority.
+        *
+        * We are overloading this skb field for use with the qpolicy subystem.
+        * The skb->priority is normally used for the SO_PRIORITY option, which
+        * is initialised from sk_priority. Since the assignment of sk_priority
+        * to skb->priority happens later (on layer 3), we overload this field
+        * for use with queueing priorities as long as the skb is on layer 4.
+        * The default priority value (if nothing is set) is 0.
+        */
+       skb->priority = 0;
+
+       for (; cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+
+               if (!CMSG_OK(msg, cmsg))
+                       return -EINVAL;
+
+               if (cmsg->cmsg_level != SOL_DCCP)
+                       continue;
+
+               if (cmsg->cmsg_type <= DCCP_SCM_QPOLICY_MAX &&
+                   !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type))
+                       return -EINVAL;
+
+               switch (cmsg->cmsg_type) {
+               case DCCP_SCM_PRIORITY:
+                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u32)))
+                               return -EINVAL;
+                       skb->priority = *(__u32 *)CMSG_DATA(cmsg);
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
 int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                 size_t len)
 {
@@ -696,8 +758,7 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        lock_sock(sk);
 
-       if (sysctl_dccp_tx_qlen &&
-           (sk->sk_write_queue.qlen >= sysctl_dccp_tx_qlen)) {
+       if (dccp_qpolicy_full(sk)) {
                rc = -EAGAIN;
                goto out_release;
        }
@@ -725,7 +786,11 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        if (rc != 0)
                goto out_discard;
 
-       skb_queue_tail(&sk->sk_write_queue, skb);
+       rc = dccp_msghdr_parse(msg, skb);
+       if (rc != 0)
+               goto out_discard;
+
+       dccp_qpolicy_push(sk, skb);
        /*
         * The xmit_timer is set if the TX CCID is rate-based and will expire
         * when congestion control permits to release further packets into the
diff --git a/net/dccp/qpolicy.c b/net/dccp/qpolicy.c
new file mode 100644 (file)
index 0000000..63c30bf
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+ *  net/dccp/qpolicy.c
+ *
+ *  Policy-based packet dequeueing interface for DCCP.
+ *
+ *  Copyright (c) 2008 Tomasz Grobelny <tomasz@grobelny.oswiecenia.net>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License v2
+ *  as published by the Free Software Foundation.
+ */
+#include "dccp.h"
+
+/*
+ *     Simple Dequeueing Policy:
+ *     If tx_qlen is different from 0, enqueue up to tx_qlen elements.
+ */
+static void qpolicy_simple_push(struct sock *sk, struct sk_buff *skb)
+{
+       skb_queue_tail(&sk->sk_write_queue, skb);
+}
+
+static bool qpolicy_simple_full(struct sock *sk)
+{
+       return dccp_sk(sk)->dccps_tx_qlen &&
+              sk->sk_write_queue.qlen >= dccp_sk(sk)->dccps_tx_qlen;
+}
+
+static struct sk_buff *qpolicy_simple_top(struct sock *sk)
+{
+       return skb_peek(&sk->sk_write_queue);
+}
+
+/*
+ *     Priority-based Dequeueing Policy:
+ *     If tx_qlen is different from 0 and the queue has reached its upper bound
+ *     of tx_qlen elements, replace older packets lowest-priority-first.
+ */
+static struct sk_buff *qpolicy_prio_best_skb(struct sock *sk)
+{
+       struct sk_buff *skb, *best = NULL;
+
+       skb_queue_walk(&sk->sk_write_queue, skb)
+               if (best == NULL || skb->priority > best->priority)
+                       best = skb;
+       return best;
+}
+
+static struct sk_buff *qpolicy_prio_worst_skb(struct sock *sk)
+{
+       struct sk_buff *skb, *worst = NULL;
+
+       skb_queue_walk(&sk->sk_write_queue, skb)
+               if (worst == NULL || skb->priority < worst->priority)
+                       worst = skb;
+       return worst;
+}
+
+static bool qpolicy_prio_full(struct sock *sk)
+{
+       if (qpolicy_simple_full(sk))
+               dccp_qpolicy_drop(sk, qpolicy_prio_worst_skb(sk));
+       return false;
+}
+
+/**
+ * struct dccp_qpolicy_operations  -  TX Packet Dequeueing Interface
+ * @push: add a new @skb to the write queue
+ * @full: indicates that no more packets will be admitted
+ * @top:  peeks at whatever the queueing policy defines as its `top'
+ */
+static struct dccp_qpolicy_operations {
+       void            (*push) (struct sock *sk, struct sk_buff *skb);
+       bool            (*full) (struct sock *sk);
+       struct sk_buff* (*top)  (struct sock *sk);
+       __be32          params;
+
+} qpol_table[DCCPQ_POLICY_MAX] = {
+       [DCCPQ_POLICY_SIMPLE] = {
+               .push   = qpolicy_simple_push,
+               .full   = qpolicy_simple_full,
+               .top    = qpolicy_simple_top,
+               .params = 0,
+       },
+       [DCCPQ_POLICY_PRIO] = {
+               .push   = qpolicy_simple_push,
+               .full   = qpolicy_prio_full,
+               .top    = qpolicy_prio_best_skb,
+               .params = DCCP_SCM_PRIORITY,
+       },
+};
+
+/*
+ *     Externally visible interface
+ */
+void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb)
+{
+       qpol_table[dccp_sk(sk)->dccps_qpolicy].push(sk, skb);
+}
+
+bool dccp_qpolicy_full(struct sock *sk)
+{
+       return qpol_table[dccp_sk(sk)->dccps_qpolicy].full(sk);
+}
+
+void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb)
+{
+       if (skb != NULL) {
+               skb_unlink(skb, &sk->sk_write_queue);
+               kfree_skb(skb);
+       }
+}
+
+struct sk_buff *dccp_qpolicy_top(struct sock *sk)
+{
+       return qpol_table[dccp_sk(sk)->dccps_qpolicy].top(sk);
+}
+
+struct sk_buff *dccp_qpolicy_pop(struct sock *sk)
+{
+       struct sk_buff *skb = dccp_qpolicy_top(sk);
+
+       if (skb != NULL) {
+               /* Clear any skb fields that we used internally */
+               skb->priority = 0;
+               skb_unlink(skb, &sk->sk_write_queue);
+       }
+       return skb;
+}
+
+bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param)
+{
+       /* check if exactly one bit is set */
+       if (!param || (param & (param - 1)))
+               return false;
+       return (qpol_table[dccp_sk(sk)->dccps_qpolicy].params & param) == param;
+}
index 9ecef9968c3940026deefba772fa568af7237bcc..0065e7e14af4eafa8e613a2712c98ca16e7c1d6e 100644 (file)
@@ -1556,6 +1556,8 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
                        if (r_len > sizeof(struct linkinfo_dn))
                                r_len = sizeof(struct linkinfo_dn);
 
+                       memset(&link, 0, sizeof(link));
+
                        switch(sock->state) {
                                case SS_CONNECTING:
                                        link.idn_linkstate = LL_CONNECTING;
index 8280e43c88610460664199061488f5186de692cd..e2e926841fe6255e75f128f45919ba78fad2aa8f 100644 (file)
@@ -240,13 +240,13 @@ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
 
        if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) {
                if (!(dst_metric_locked(dst, RTAX_MTU))) {
-                       dst->metrics[RTAX_MTU-1] = mtu;
+                       dst_metric_set(dst, RTAX_MTU, mtu);
                        dst_set_expires(dst, dn_rt_mtu_expires);
                }
                if (!(dst_metric_locked(dst, RTAX_ADVMSS))) {
                        u32 mss = mtu - DN_MAX_NSP_DATA_HEADER;
                        if (dst_metric(dst, RTAX_ADVMSS) > mss)
-                               dst->metrics[RTAX_ADVMSS-1] = mss;
+                               dst_metric_set(dst, RTAX_ADVMSS, mss);
                }
        }
 }
@@ -806,8 +806,7 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
                if (DN_FIB_RES_GW(*res) &&
                    DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
                        rt->rt_gateway = DN_FIB_RES_GW(*res);
-               memcpy(rt->dst.metrics, fi->fib_metrics,
-                      sizeof(rt->dst.metrics));
+               dst_import_metrics(&rt->dst, fi->fib_metrics);
        }
        rt->rt_type = res->type;
 
@@ -820,11 +819,11 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
 
        if (dst_metric(&rt->dst, RTAX_MTU) == 0 ||
            dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
-               rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu;
+               dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu);
        mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
        if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0 ||
            dst_metric(&rt->dst, RTAX_ADVMSS) > mss)
-               rt->dst.metrics[RTAX_ADVMSS-1] = mss;
+               dst_metric_set(&rt->dst, RTAX_ADVMSS, mss);
        return 0;
 }
 
@@ -1502,7 +1501,7 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
        RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src);
        if (rt->rt_daddr != rt->rt_gateway)
                RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway);
-       if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
+       if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
                goto rtattr_failure;
        expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
        if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires,
index f8c1ae4b41f03641f7bb7db11feb88893df65519..f180371fa415de9f17a7911e391a272eb09b55ac 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/skbuff.h>
 #include <linux/udp.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <net/sock.h>
 #include <net/inet_common.h>
 #include <linux/stat.h>
@@ -276,12 +277,12 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
 #endif
 #ifdef CONFIG_ECONET_AUNUDP
        struct msghdr udpmsg;
-       struct iovec iov[msg->msg_iovlen+1];
+       struct iovec iov[2];
        struct aunhdr ah;
        struct sockaddr_in udpdest;
        __kernel_size_t size;
-       int i;
        mm_segment_t oldfs;
+       char *userbuf;
 #endif
 
        /*
@@ -297,23 +298,14 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
 
        mutex_lock(&econet_mutex);
 
-       if (saddr == NULL) {
-               struct econet_sock *eo = ec_sk(sk);
-
-               addr.station = eo->station;
-               addr.net     = eo->net;
-               port         = eo->port;
-               cb           = eo->cb;
-       } else {
-               if (msg->msg_namelen < sizeof(struct sockaddr_ec)) {
-                       mutex_unlock(&econet_mutex);
-                       return -EINVAL;
-               }
-               addr.station = saddr->addr.station;
-               addr.net = saddr->addr.net;
-               port = saddr->port;
-               cb = saddr->cb;
-       }
+        if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) {
+                mutex_unlock(&econet_mutex);
+                return -EINVAL;
+        }
+        addr.station = saddr->addr.station;
+        addr.net = saddr->addr.net;
+        port = saddr->port;
+        cb = saddr->cb;
 
        /* Look for a device with the right network number. */
        dev = net2dev_map[addr.net];
@@ -328,17 +320,17 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
                }
        }
 
-       if (len + 15 > dev->mtu) {
-               mutex_unlock(&econet_mutex);
-               return -EMSGSIZE;
-       }
-
        if (dev->type == ARPHRD_ECONET) {
                /* Real hardware Econet.  We're not worthy etc. */
 #ifdef CONFIG_ECONET_NATIVE
                unsigned short proto = 0;
                int res;
 
+               if (len + 15 > dev->mtu) {
+                       mutex_unlock(&econet_mutex);
+                       return -EMSGSIZE;
+               }
+
                dev_hold(dev);
 
                skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev),
@@ -351,7 +343,6 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
 
                eb = (struct ec_cb *)&skb->cb;
 
-               /* BUG: saddr may be NULL */
                eb->cookie = saddr->cookie;
                eb->sec = *saddr;
                eb->sent = ec_tx_done;
@@ -415,6 +406,11 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
                return -ENETDOWN;               /* No socket - can't send */
        }
 
+       if (len > 32768) {
+               err = -E2BIG;
+               goto error;
+       }
+
        /* Make up a UDP datagram and hand it off to some higher intellect. */
 
        memset(&udpdest, 0, sizeof(udpdest));
@@ -446,36 +442,26 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
 
        /* tack our header on the front of the iovec */
        size = sizeof(struct aunhdr);
-       /*
-        * XXX: that is b0rken.  We can't mix userland and kernel pointers
-        * in iovec, since on a lot of platforms copy_from_user() will
-        * *not* work with the kernel and userland ones at the same time,
-        * regardless of what we do with set_fs().  And we are talking about
-        * econet-over-ethernet here, so "it's only ARM anyway" doesn't
-        * apply.  Any suggestions on fixing that code?         -- AV
-        */
        iov[0].iov_base = (void *)&ah;
        iov[0].iov_len = size;
-       for (i = 0; i < msg->msg_iovlen; i++) {
-               void __user *base = msg->msg_iov[i].iov_base;
-               size_t iov_len = msg->msg_iov[i].iov_len;
-               /* Check it now since we switch to KERNEL_DS later. */
-               if (!access_ok(VERIFY_READ, base, iov_len)) {
-                       mutex_unlock(&econet_mutex);
-                       return -EFAULT;
-               }
-               iov[i+1].iov_base = base;
-               iov[i+1].iov_len = iov_len;
-               size += iov_len;
+
+       userbuf = vmalloc(len);
+       if (userbuf == NULL) {
+               err = -ENOMEM;
+               goto error;
        }
 
+       iov[1].iov_base = userbuf;
+       iov[1].iov_len = len;
+       err = memcpy_fromiovec(userbuf, msg->msg_iov, len);
+       if (err)
+               goto error_free_buf;
+
        /* Get a skbuff (no data, just holds our cb information) */
        if ((skb = sock_alloc_send_skb(sk, 0,
                                       msg->msg_flags & MSG_DONTWAIT,
-                                      &err)) == NULL) {
-               mutex_unlock(&econet_mutex);
-               return err;
-       }
+                                      &err)) == NULL)
+               goto error_free_buf;
 
        eb = (struct ec_cb *)&skb->cb;
 
@@ -491,7 +477,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
        udpmsg.msg_name = (void *)&udpdest;
        udpmsg.msg_namelen = sizeof(udpdest);
        udpmsg.msg_iov = &iov[0];
-       udpmsg.msg_iovlen = msg->msg_iovlen + 1;
+       udpmsg.msg_iovlen = 2;
        udpmsg.msg_control = NULL;
        udpmsg.msg_controllen = 0;
        udpmsg.msg_flags=0;
@@ -499,9 +485,13 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
        oldfs = get_fs(); set_fs(KERNEL_DS);    /* More privs :-) */
        err = sock_sendmsg(udpsock, &udpmsg, size);
        set_fs(oldfs);
+
+error_free_buf:
+       vfree(userbuf);
 #else
        err = -EPROTOTYPE;
 #endif
+       error:
        mutex_unlock(&econet_mutex);
 
        return err;
@@ -671,6 +661,11 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
        err = 0;
        switch (cmd) {
        case SIOCSIFADDR:
+               if (!capable(CAP_NET_ADMIN)) {
+                       err = -EPERM;
+                       break;
+               }
+
                edev = dev->ec_ptr;
                if (edev == NULL) {
                        /* Magic up a new one. */
index 93c91b633a566729bc48acedc766ae3db20f3bc2..6df6ecf4970876b54ef16acac3a971fdaf1d333c 100644 (file)
@@ -52,11 +52,11 @@ struct net_device *ieee802154_get_dev(struct net *net,
 
        switch (addr->addr_type) {
        case IEEE802154_ADDR_LONG:
-               rtnl_lock();
-               dev = dev_getbyhwaddr(net, ARPHRD_IEEE802154, addr->hwaddr);
+               rcu_read_lock();
+               dev = dev_getbyhwaddr_rcu(net, ARPHRD_IEEE802154, addr->hwaddr);
                if (dev)
                        dev_hold(dev);
-               rtnl_unlock();
+               rcu_read_unlock();
                break;
        case IEEE802154_ADDR_SHORT:
                if (addr->pan_id == 0xffff ||
index 7833f17b648a149b8d21e134b2eee0c6d619a97c..a2fc7b961dbcd9e467b024aff1b2ec02a20e7861 100644 (file)
@@ -883,7 +883,7 @@ static int arp_process(struct sk_buff *skb)
 
                        dont_send = arp_ignore(in_dev, sip, tip);
                        if (!dont_send && IN_DEV_ARPFILTER(in_dev))
-                               dont_send |= arp_filter(sip, tip, dev);
+                               dont_send = arp_filter(sip, tip, dev);
                        if (!dont_send) {
                                n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
                                if (n) {
@@ -1017,13 +1017,14 @@ static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on)
                IPV4_DEVCONF_ALL(net, PROXY_ARP) = on;
                return 0;
        }
-       if (__in_dev_get_rtnl(dev)) {
-               IN_DEV_CONF_SET(__in_dev_get_rtnl(dev), PROXY_ARP, on);
+       if (__in_dev_get_rcu(dev)) {
+               IN_DEV_CONF_SET(__in_dev_get_rcu(dev), PROXY_ARP, on);
                return 0;
        }
        return -ENXIO;
 }
 
+/* must be called with rcu_read_lock() */
 static int arp_req_set_public(struct net *net, struct arpreq *r,
                struct net_device *dev)
 {
@@ -1033,7 +1034,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
        if (mask && mask != htonl(0xFFFFFFFF))
                return -EINVAL;
        if (!dev && (r->arp_flags & ATF_COM)) {
-               dev = dev_getbyhwaddr(net, r->arp_ha.sa_family,
+               dev = dev_getbyhwaddr_rcu(net, r->arp_ha.sa_family,
                                      r->arp_ha.sa_data);
                if (!dev)
                        return -ENODEV;
@@ -1225,10 +1226,10 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
        if (!(r.arp_flags & ATF_NETMASK))
                ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
                                                           htonl(0xFFFFFFFFUL);
-       rtnl_lock();
+       rcu_read_lock();
        if (r.arp_dev[0]) {
                err = -ENODEV;
-               dev = __dev_get_by_name(net, r.arp_dev);
+               dev = dev_get_by_name_rcu(net, r.arp_dev);
                if (dev == NULL)
                        goto out;
 
@@ -1252,12 +1253,12 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
                break;
        case SIOCGARP:
                err = arp_req_get(&r, dev);
-               if (!err && copy_to_user(arg, &r, sizeof(r)))
-                       err = -EFAULT;
                break;
        }
 out:
-       rtnl_unlock();
+       rcu_read_unlock();
+       if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r)))
+               err = -EFAULT;
        return err;
 }
 
index d9f71bae45c40ddf0231c12164dea85d32c2b883..3b067704ab3852e5e51363ead5cbe9013fb5741e 100644 (file)
@@ -1258,7 +1258,7 @@ errout:
 
 static size_t inet_get_link_af_size(const struct net_device *dev)
 {
-       struct in_device *in_dev = __in_dev_get_rcu(dev);
+       struct in_device *in_dev = __in_dev_get_rtnl(dev);
 
        if (!in_dev)
                return 0;
@@ -1268,7 +1268,7 @@ static size_t inet_get_link_af_size(const struct net_device *dev)
 
 static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
 {
-       struct in_device *in_dev = __in_dev_get_rcu(dev);
+       struct in_device *in_dev = __in_dev_get_rtnl(dev);
        struct nlattr *nla;
        int i;
 
@@ -1295,7 +1295,7 @@ static int inet_validate_link_af(const struct net_device *dev,
        struct nlattr *a, *tb[IFLA_INET_MAX+1];
        int err, rem;
 
-       if (dev && !__in_dev_get_rcu(dev))
+       if (dev && !__in_dev_get_rtnl(dev))
                return -EAFNOSUPPORT;
 
        err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy);
@@ -1319,7 +1319,7 @@ static int inet_validate_link_af(const struct net_device *dev,
 
 static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
 {
-       struct in_device *in_dev = __in_dev_get_rcu(dev);
+       struct in_device *in_dev = __in_dev_get_rtnl(dev);
        struct nlattr *a, *tb[IFLA_INET_MAX+1];
        int rem;
 
index 200eb538fbb3f77101fe9646893dddbd9e83117b..0f280348e0fdad70adb37f3b33bdbe6fedd99025 100644 (file)
@@ -365,7 +365,7 @@ static struct tnode *tnode_alloc(size_t size)
        if (size <= PAGE_SIZE)
                return kzalloc(size, GFP_KERNEL);
        else
-               return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+               return vzalloc(size);
 }
 
 static void __tnode_vfree(struct work_struct *arg)
index 06f5f8f482f0e092d86571b75846a60ea865d612..25e318153f143366894ec111dc5e6cb89d980c9b 100644 (file)
@@ -55,7 +55,6 @@ EXPORT_SYMBOL(inet_get_local_port_range);
 int inet_csk_bind_conflict(const struct sock *sk,
                           const struct inet_bind_bucket *tb)
 {
-       const __be32 sk_rcv_saddr = inet_rcv_saddr(sk);
        struct sock *sk2;
        struct hlist_node *node;
        int reuse = sk->sk_reuse;
@@ -75,9 +74,9 @@ int inet_csk_bind_conflict(const struct sock *sk,
                     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
                        if (!reuse || !sk2->sk_reuse ||
                            sk2->sk_state == TCP_LISTEN) {
-                               const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
-                               if (!sk2_rcv_saddr || !sk_rcv_saddr ||
-                                   sk2_rcv_saddr == sk_rcv_saddr)
+                               const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
+                               if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
+                                   sk2_rcv_saddr == sk_rcv_saddr(sk))
                                        break;
                        }
                }
index 1b344f30b463fab9ed70a8f19a19d348d7c626f7..3c0369a3a663693ac24e6acdd2730909ae5ddf4a 100644 (file)
@@ -133,8 +133,7 @@ int __inet_inherit_port(struct sock *sk, struct sock *child)
                        }
                }
        }
-       sk_add_bind_node(child, &tb->owners);
-       inet_csk(child)->icsk_bind_hash = tb;
+       inet_bind_hash(child, tb, port);
        spin_unlock(&head->lock);
 
        return 0;
index 258c98d5fa79fb861fce41e45da19c6201e3a18a..ff4e7a4e33ed21896c872a45a210b440e7f3c9bf 100644 (file)
@@ -818,7 +818,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                             !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
                            rt6->rt6i_dst.plen == 128) {
                                rt6->rt6i_flags |= RTF_MODIFIED;
-                               skb_dst(skb)->metrics[RTAX_MTU-1] = mtu;
+                               dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
                        }
                }
 
index 1b48eb1ed4531d3fd037e7ee9aa6367b6cb575f3..b14ec7d03b6e70b9a5a1823261ac18cb40462035 100644 (file)
@@ -253,6 +253,7 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
        SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
        SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),
+       SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
        SNMP_MIB_SENTINEL
 };
 
index 3843c2dfde82cfaf90be1784d3fded834b027baf..26ac396eaa5ef0bdd20fadc9997b3391fdb81849 100644 (file)
@@ -1686,11 +1686,14 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
                                        if (mtu < dst_mtu(&rth->dst)) {
                                                dst_confirm(&rth->dst);
                                                if (mtu < ip_rt_min_pmtu) {
+                                                       u32 lock = dst_metric(&rth->dst,
+                                                                             RTAX_LOCK);
                                                        mtu = ip_rt_min_pmtu;
-                                                       rth->dst.metrics[RTAX_LOCK-1] |=
-                                                               (1 << RTAX_MTU);
+                                                       lock |= (1 << RTAX_MTU);
+                                                       dst_metric_set(&rth->dst, RTAX_LOCK,
+                                                                      lock);
                                                }
-                                               rth->dst.metrics[RTAX_MTU-1] = mtu;
+                                               dst_metric_set(&rth->dst, RTAX_MTU, mtu);
                                                dst_set_expires(&rth->dst,
                                                        ip_rt_mtu_expires);
                                        }
@@ -1708,10 +1711,11 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
        if (dst_mtu(dst) > mtu && mtu >= 68 &&
            !(dst_metric_locked(dst, RTAX_MTU))) {
                if (mtu < ip_rt_min_pmtu) {
+                       u32 lock = dst_metric(dst, RTAX_LOCK);
                        mtu = ip_rt_min_pmtu;
-                       dst->metrics[RTAX_LOCK-1] |= (1 << RTAX_MTU);
+                       dst_metric_set(dst, RTAX_LOCK, lock | (1 << RTAX_MTU));
                }
-               dst->metrics[RTAX_MTU-1] = mtu;
+               dst_metric_set(dst, RTAX_MTU, mtu);
                dst_set_expires(dst, ip_rt_mtu_expires);
                call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
        }
@@ -1796,36 +1800,37 @@ static void set_class_tag(struct rtable *rt, u32 tag)
 
 static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
 {
+       struct dst_entry *dst = &rt->dst;
        struct fib_info *fi = res->fi;
 
        if (fi) {
                if (FIB_RES_GW(*res) &&
                    FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
                        rt->rt_gateway = FIB_RES_GW(*res);
-               memcpy(rt->dst.metrics, fi->fib_metrics,
-                      sizeof(rt->dst.metrics));
+               dst_import_metrics(dst, fi->fib_metrics);
                if (fi->fib_mtu == 0) {
-                       rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu;
-                       if (dst_metric_locked(&rt->dst, RTAX_MTU) &&
+                       dst_metric_set(dst, RTAX_MTU, dst->dev->mtu);
+                       if (dst_metric_locked(dst, RTAX_MTU) &&
                            rt->rt_gateway != rt->rt_dst &&
-                           rt->dst.dev->mtu > 576)
-                               rt->dst.metrics[RTAX_MTU-1] = 576;
+                           dst->dev->mtu > 576)
+                               dst_metric_set(dst, RTAX_MTU, 576);
                }
 #ifdef CONFIG_NET_CLS_ROUTE
-               rt->dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
+               dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
 #endif
        } else
-               rt->dst.metrics[RTAX_MTU-1]= rt->dst.dev->mtu;
-
-       if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0)
-               rt->dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
-       if (dst_mtu(&rt->dst) > IP_MAX_MTU)
-               rt->dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
-       if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0)
-               rt->dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->dst.dev->mtu - 40,
-                                      ip_rt_min_advmss);
-       if (dst_metric(&rt->dst, RTAX_ADVMSS) > 65535 - 40)
-               rt->dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
+               dst_metric_set(dst, RTAX_MTU, dst->dev->mtu);
+
+       if (dst_metric(dst, RTAX_HOPLIMIT) == 0)
+               dst_metric_set(dst, RTAX_HOPLIMIT, sysctl_ip_default_ttl);
+       if (dst_mtu(dst) > IP_MAX_MTU)
+               dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
+       if (dst_metric(dst, RTAX_ADVMSS) == 0)
+               dst_metric_set(dst, RTAX_ADVMSS,
+                              max_t(unsigned int, dst->dev->mtu - 40,
+                                    ip_rt_min_advmss));
+       if (dst_metric(dst, RTAX_ADVMSS) > 65535 - 40)
+               dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
 
 #ifdef CONFIG_NET_CLS_ROUTE
 #ifdef CONFIG_IP_MULTIPLE_TABLES
@@ -2720,7 +2725,7 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
                new->__use = 1;
                new->input = dst_discard;
                new->output = dst_discard;
-               memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
+               dst_copy_metrics(new, &ort->dst);
 
                new->dev = ort->dst.dev;
                if (new->dev)
@@ -2827,7 +2832,7 @@ static int rt_fill_info(struct net *net,
        if (rt->rt_dst != rt->rt_gateway)
                NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
 
-       if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
+       if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
                goto nla_put_failure;
 
        if (rt->fl.mark)
index e91911d7aae26656f940a747a6d0ccb91739f39a..1b4ec21497a49756025965e4e17e4bc9466f4f5c 100644 (file)
@@ -26,6 +26,8 @@ static int zero;
 static int tcp_retr1_max = 255;
 static int ip_local_port_range_min[] = { 1, 1 };
 static int ip_local_port_range_max[] = { 65535, 65535 };
+static int tcp_adv_win_scale_min = -31;
+static int tcp_adv_win_scale_max = 31;
 
 /* Update system visible IP port range */
 static void set_local_port_range(int range[2])
@@ -426,7 +428,9 @@ static struct ctl_table ipv4_table[] = {
                .data           = &sysctl_tcp_adv_win_scale,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &tcp_adv_win_scale_min,
+               .extra2         = &tcp_adv_win_scale_max,
        },
        {
                .procname       = "tcp_tw_reuse",
index 2bb46d55f40cf0e680b420f6d80d7d745b6e1634..6c11eece262cf77a377a9fa901f4031c46745205 100644 (file)
@@ -2244,7 +2244,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                /* Values greater than interface MTU won't take effect. However
                 * at the point when this call is done we typically don't yet
                 * know which interface is going to be used */
-               if (val < 64 || val > MAX_TCP_WINDOW) {
+               if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
                        err = -EINVAL;
                        break;
                }
index 6d8ab1c4efc3ea59c4848c74ae367f872cd6505e..824e8c8a17adc8bfc8faf6c6839074d54f027fe4 100644 (file)
@@ -734,7 +734,7 @@ void tcp_update_metrics(struct sock *sk)
                         * Reset our results.
                         */
                        if (!(dst_metric_locked(dst, RTAX_RTT)))
-                               dst->metrics[RTAX_RTT - 1] = 0;
+                               dst_metric_set(dst, RTAX_RTT, 0);
                        return;
                }
 
@@ -776,34 +776,38 @@ void tcp_update_metrics(struct sock *sk)
                        if (dst_metric(dst, RTAX_SSTHRESH) &&
                            !dst_metric_locked(dst, RTAX_SSTHRESH) &&
                            (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
-                               dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1;
+                               dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1);
                        if (!dst_metric_locked(dst, RTAX_CWND) &&
                            tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
-                               dst->metrics[RTAX_CWND - 1] = tp->snd_cwnd;
+                               dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd);
                } else if (tp->snd_cwnd > tp->snd_ssthresh &&
                           icsk->icsk_ca_state == TCP_CA_Open) {
                        /* Cong. avoidance phase, cwnd is reliable. */
                        if (!dst_metric_locked(dst, RTAX_SSTHRESH))
-                               dst->metrics[RTAX_SSTHRESH-1] =
-                                       max(tp->snd_cwnd >> 1, tp->snd_ssthresh);
+                               dst_metric_set(dst, RTAX_SSTHRESH,
+                                              max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
                        if (!dst_metric_locked(dst, RTAX_CWND))
-                               dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_cwnd) >> 1;
+                               dst_metric_set(dst, RTAX_CWND,
+                                              (dst_metric(dst, RTAX_CWND) +
+                                               tp->snd_cwnd) >> 1);
                } else {
                        /* Else slow start did not finish, cwnd is non-sense,
                           ssthresh may be also invalid.
                         */
                        if (!dst_metric_locked(dst, RTAX_CWND))
-                               dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_ssthresh) >> 1;
+                               dst_metric_set(dst, RTAX_CWND,
+                                              (dst_metric(dst, RTAX_CWND) +
+                                               tp->snd_ssthresh) >> 1);
                        if (dst_metric(dst, RTAX_SSTHRESH) &&
                            !dst_metric_locked(dst, RTAX_SSTHRESH) &&
                            tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH))
-                               dst->metrics[RTAX_SSTHRESH-1] = tp->snd_ssthresh;
+                               dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh);
                }
 
                if (!dst_metric_locked(dst, RTAX_REORDERING)) {
                        if (dst_metric(dst, RTAX_REORDERING) < tp->reordering &&
                            tp->reordering != sysctl_tcp_reordering)
-                               dst->metrics[RTAX_REORDERING-1] = tp->reordering;
+                               dst_metric_set(dst, RTAX_REORDERING, tp->reordering);
                }
        }
 }
index dd555051ec8be951a3da76cf5645343bf810584b..4fc3387aa9942a0b69b5a25c91745caaa4d46389 100644 (file)
@@ -2013,7 +2013,9 @@ get_req:
        }
 get_sk:
        sk_nulls_for_each_from(sk, node) {
-               if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
+               if (!net_eq(sock_net(sk), net))
+                       continue;
+               if (sk->sk_family == st->family) {
                        cur = sk;
                        goto out;
                }
index 3527b51d615956b87eb8b1f0ee2de5982da1f588..80b1f80759abff53b0bf7e3438c72bbc2aa85162 100644 (file)
@@ -392,7 +392,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                 * socket up.  We've got bigger problems than
                 * non-graceful socket closings.
                 */
-               LIMIT_NETDEBUG(KERN_INFO "TCP: time wait bucket table overflow\n");
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
        }
 
        tcp_update_metrics(sk);
index 749b6498588e8389c96c555730a34d4f7d424321..97041f24cd2764741648c16e206d170c422a67d8 100644 (file)
@@ -231,11 +231,10 @@ void tcp_select_initial_window(int __space, __u32 mss,
                /* when initializing use the value from init_rcv_wnd
                 * rather than the default from above
                 */
-               if (init_rcv_wnd &&
-                   (*rcv_wnd > init_rcv_wnd * mss))
-                       *rcv_wnd = init_rcv_wnd * mss;
-               else if (*rcv_wnd > init_cwnd * mss)
-                       *rcv_wnd = init_cwnd * mss;
+               if (init_rcv_wnd)
+                       *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
+               else
+                       *rcv_wnd = min(*rcv_wnd, init_cwnd * mss);
        }
 
        /* Set the clamp no higher than max representable value */
@@ -386,27 +385,30 @@ struct tcp_out_options {
  */
 static u8 tcp_cookie_size_check(u8 desired)
 {
-       if (desired > 0) {
+       int cookie_size;
+
+       if (desired > 0)
                /* previously specified */
                return desired;
-       }
-       if (sysctl_tcp_cookie_size <= 0) {
+
+       cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size);
+       if (cookie_size <= 0)
                /* no default specified */
                return 0;
-       }
-       if (sysctl_tcp_cookie_size <= TCP_COOKIE_MIN) {
+
+       if (cookie_size <= TCP_COOKIE_MIN)
                /* value too small, specify minimum */
                return TCP_COOKIE_MIN;
-       }
-       if (sysctl_tcp_cookie_size >= TCP_COOKIE_MAX) {
+
+       if (cookie_size >= TCP_COOKIE_MAX)
                /* value too large, specify maximum */
                return TCP_COOKIE_MAX;
-       }
-       if (0x1 & sysctl_tcp_cookie_size) {
+
+       if (cookie_size & 1)
                /* 8-bit multiple, illegal, fix it */
-               return (u8)(sysctl_tcp_cookie_size + 0x1);
-       }
-       return (u8)sysctl_tcp_cookie_size;
+               cookie_size++;
+
+       return (u8)cookie_size;
 }
 
 /* Write previously computed TCP options to the packet.
@@ -1516,6 +1518,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
        struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 send_win, cong_win, limit, in_flight;
+       int win_divisor;
 
        if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
                goto send_now;
@@ -1547,13 +1550,14 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
        if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
                goto send_now;
 
-       if (sysctl_tcp_tso_win_divisor) {
+       win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
+       if (win_divisor) {
                u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
 
                /* If at least some fraction of a window is available,
                 * just use it.
                 */
-               chunk /= sysctl_tcp_tso_win_divisor;
+               chunk /= win_divisor;
                if (limit >= chunk)
                        goto send_now;
        } else {
index b1155554bb183fff1acc381ea3c17b3cdcd67034..4f4483e697bd09e541624dca9c58a3afa5cdbbe3 100644 (file)
@@ -1173,6 +1173,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
                                sizeof (struct ipv6hdr);
 
                        dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr);
+                       if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+                               dev->mtu-=8;
 
                        if (dev->mtu < IPV6_MIN_MTU)
                                dev->mtu = IPV6_MIN_MTU;
@@ -1361,12 +1363,17 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
 
 static void ip6_tnl_dev_setup(struct net_device *dev)
 {
+       struct ip6_tnl *t;
+
        dev->netdev_ops = &ip6_tnl_netdev_ops;
        dev->destructor = ip6_dev_free;
 
        dev->type = ARPHRD_TUNNEL6;
        dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr);
        dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr);
+       t = netdev_priv(dev);
+       if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+               dev->mtu-=8;
        dev->flags |= IFF_NOARP;
        dev->addr_len = sizeof(struct in6_addr);
        dev->features |= NETIF_F_NETNS_LOCAL;
index e18f8413020311019d187652021ca11a604361b0..2342545a5ee9bfe125ff3030bac07cafcb24a00b 100644 (file)
@@ -1259,7 +1259,8 @@ static void ndisc_router_discovery(struct sk_buff *skb)
        if (ra_msg->icmph.icmp6_hop_limit) {
                in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
                if (rt)
-                       rt->dst.metrics[RTAX_HOPLIMIT-1] = ra_msg->icmph.icmp6_hop_limit;
+                       dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
+                                      ra_msg->icmph.icmp6_hop_limit);
        }
 
 skip_defrtr:
@@ -1377,7 +1378,7 @@ skip_linkparms:
                        in6_dev->cnf.mtu6 = mtu;
 
                        if (rt)
-                               rt->dst.metrics[RTAX_MTU-1] = mtu;
+                               dst_metric_set(&rt->dst, RTAX_MTU, mtu);
 
                        rt6_mtu_change(skb->dev, mtu);
                }
index 026caef0326caa90aee54a147d54014a20a0c039..4aed0812b512e0bf9cc628086b9de2b33667d518 100644 (file)
@@ -129,7 +129,6 @@ static struct rt6_info ip6_null_entry_template = {
                .__use          = 1,
                .obsolete       = -1,
                .error          = -ENETUNREACH,
-               .metrics        = { [RTAX_HOPLIMIT - 1] = 255, },
                .input          = ip6_pkt_discard,
                .output         = ip6_pkt_discard_out,
        },
@@ -150,7 +149,6 @@ static struct rt6_info ip6_prohibit_entry_template = {
                .__use          = 1,
                .obsolete       = -1,
                .error          = -EACCES,
-               .metrics        = { [RTAX_HOPLIMIT - 1] = 255, },
                .input          = ip6_pkt_prohibit,
                .output         = ip6_pkt_prohibit_out,
        },
@@ -166,7 +164,6 @@ static struct rt6_info ip6_blk_hole_entry_template = {
                .__use          = 1,
                .obsolete       = -1,
                .error          = -EINVAL,
-               .metrics        = { [RTAX_HOPLIMIT - 1] = 255, },
                .input          = dst_discard,
                .output         = dst_discard,
        },
@@ -844,7 +841,7 @@ int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl
                new->input = dst_discard;
                new->output = dst_discard;
 
-               memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
+               dst_copy_metrics(new, &ort->dst);
                new->dev = ort->dst.dev;
                if (new->dev)
                        dev_hold(new->dev);
@@ -928,10 +925,12 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
        if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
                rt6->rt6i_flags |= RTF_MODIFIED;
                if (mtu < IPV6_MIN_MTU) {
+                       u32 features = dst_metric(dst, RTAX_FEATURES);
                        mtu = IPV6_MIN_MTU;
-                       dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
+                       features |= RTAX_FEATURE_ALLFRAG;
+                       dst_metric_set(dst, RTAX_FEATURES, features);
                }
-               dst->metrics[RTAX_MTU-1] = mtu;
+               dst_metric_set(dst, RTAX_MTU, mtu);
                call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
        }
 }
@@ -989,9 +988,9 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        rt->rt6i_idev     = idev;
        rt->rt6i_nexthop  = neigh;
        atomic_set(&rt->dst.__refcnt, 1);
-       rt->dst.metrics[RTAX_HOPLIMIT-1] = 255;
-       rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
-       rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst));
+       dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
+       dst_metric_set(&rt->dst, RTAX_MTU, ipv6_get_mtu(rt->rt6i_dev));
+       dst_metric_set(&rt->dst, RTAX_ADVMSS, ipv6_advmss(net, dst_mtu(&rt->dst)));
        rt->dst.output  = ip6_output;
 
 #if 0  /* there's no chance to use these for ndisc */
@@ -1305,17 +1304,17 @@ install_route:
                                        goto out;
                                }
 
-                               rt->dst.metrics[type - 1] = nla_get_u32(nla);
+                               dst_metric_set(&rt->dst, type, nla_get_u32(nla));
                        }
                }
        }
 
        if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0)
-               rt->dst.metrics[RTAX_HOPLIMIT-1] = -1;
+               dst_metric_set(&rt->dst, RTAX_HOPLIMIT, -1);
        if (!dst_mtu(&rt->dst))
-               rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
+               dst_metric_set(&rt->dst, RTAX_MTU, ipv6_get_mtu(dev));
        if (!dst_metric(&rt->dst, RTAX_ADVMSS))
-               rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst));
+               dst_metric_set(&rt->dst, RTAX_ADVMSS, ipv6_advmss(net, dst_mtu(&rt->dst)));
        rt->dst.dev = dev;
        rt->rt6i_idev = idev;
        rt->rt6i_table = table;
@@ -1541,9 +1540,9 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
        ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
        nrt->rt6i_nexthop = neigh_clone(neigh);
        /* Reset pmtu, it may be better */
-       nrt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
-       nrt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev),
-                                                       dst_mtu(&nrt->dst));
+       dst_metric_set(&nrt->dst, RTAX_MTU, ipv6_get_mtu(neigh->dev));
+       dst_metric_set(&nrt->dst, RTAX_ADVMSS, ipv6_advmss(dev_net(neigh->dev),
+                                                          dst_mtu(&nrt->dst)));
 
        if (ip6_ins_rt(nrt))
                goto out;
@@ -1602,9 +1601,12 @@ static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr,
           would return automatically.
         */
        if (rt->rt6i_flags & RTF_CACHE) {
-               rt->dst.metrics[RTAX_MTU-1] = pmtu;
-               if (allfrag)
-                       rt->dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
+               dst_metric_set(&rt->dst, RTAX_MTU, pmtu);
+               if (allfrag) {
+                       u32 features = dst_metric(&rt->dst, RTAX_FEATURES);
+                       features |= RTAX_FEATURE_ALLFRAG;
+                       dst_metric_set(&rt->dst, RTAX_FEATURES, features);
+               }
                dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
                rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
                goto out;
@@ -1621,9 +1623,12 @@ static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr,
                nrt = rt6_alloc_clone(rt, daddr);
 
        if (nrt) {
-               nrt->dst.metrics[RTAX_MTU-1] = pmtu;
-               if (allfrag)
-                       nrt->dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
+               dst_metric_set(&nrt->dst, RTAX_MTU, pmtu);
+               if (allfrag) {
+                       u32 features = dst_metric(&nrt->dst, RTAX_FEATURES);
+                       features |= RTAX_FEATURE_ALLFRAG;
+                       dst_metric_set(&nrt->dst, RTAX_FEATURES, features);
+               }
 
                /* According to RFC 1981, detecting PMTU increase shouldn't be
                 * happened within 5 mins, the recommended timer is 10 mins.
@@ -1674,7 +1679,7 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
                rt->dst.input = ort->dst.input;
                rt->dst.output = ort->dst.output;
 
-               memcpy(rt->dst.metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
+               dst_copy_metrics(&rt->dst, &ort->dst);
                rt->dst.error = ort->dst.error;
                rt->dst.dev = ort->dst.dev;
                if (rt->dst.dev)
@@ -1966,9 +1971,9 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
        rt->dst.output = ip6_output;
        rt->rt6i_dev = net->loopback_dev;
        rt->rt6i_idev = idev;
-       rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
-       rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst));
-       rt->dst.metrics[RTAX_HOPLIMIT-1] = -1;
+       dst_metric_set(&rt->dst, RTAX_MTU, ipv6_get_mtu(rt->rt6i_dev));
+       dst_metric_set(&rt->dst, RTAX_ADVMSS, ipv6_advmss(net, dst_mtu(&rt->dst)));
+       dst_metric_set(&rt->dst, RTAX_HOPLIMIT, -1);
        rt->dst.obsolete = -1;
 
        rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
@@ -2068,8 +2073,8 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
            (dst_mtu(&rt->dst) >= arg->mtu ||
             (dst_mtu(&rt->dst) < arg->mtu &&
              dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
-               rt->dst.metrics[RTAX_MTU-1] = arg->mtu;
-               rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu);
+               dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
+               dst_metric_set(&rt->dst, RTAX_ADVMSS, ipv6_advmss(net, arg->mtu));
        }
        return 0;
 }
@@ -2295,7 +2300,7 @@ static int rt6_fill_node(struct net *net,
                        NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
        }
 
-       if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
+       if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
                goto nla_put_failure;
 
        if (rt->dst.neighbour)
@@ -2686,6 +2691,7 @@ static int __net_init ip6_route_net_init(struct net *net)
        net->ipv6.ip6_null_entry->dst.path =
                (struct dst_entry *)net->ipv6.ip6_null_entry;
        net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
+       dst_metric_set(&net->ipv6.ip6_null_entry->dst, RTAX_HOPLIMIT, 255);
 
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
        net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
@@ -2696,6 +2702,7 @@ static int __net_init ip6_route_net_init(struct net *net)
        net->ipv6.ip6_prohibit_entry->dst.path =
                (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
        net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
+       dst_metric_set(&net->ipv6.ip6_prohibit_entry->dst, RTAX_HOPLIMIT, 255);
 
        net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
                                               sizeof(*net->ipv6.ip6_blk_hole_entry),
@@ -2705,6 +2712,7 @@ static int __net_init ip6_route_net_init(struct net *net)
        net->ipv6.ip6_blk_hole_entry->dst.path =
                (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
        net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
+       dst_metric_set(&net->ipv6.ip6_blk_hole_entry->dst, RTAX_HOPLIMIT, 255);
 #endif
 
        net->ipv6.sysctl.flush_delay = 0;
index 6e48a80d0f25a234551226b3b62719822fe74e84..8ce38f10a547e68fee50a9151cfb8727770bceda 100644 (file)
@@ -606,8 +606,9 @@ static int ipip6_rcv(struct sk_buff *skb)
                return 0;
        }
 
-       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+       /* no tunnel matched,  let upstream know, ipsec may handle it */
        rcu_read_unlock();
+       return 1;
 out:
        kfree_skb(skb);
        return 0;
index b541a4e009fba179d939cfe71b4d083168831728..7aad12770867bde1478e2e516b61b3dcddf5aa54 100644 (file)
@@ -54,8 +54,8 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
 {
        const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
        const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
-       __be32 sk1_rcv_saddr = inet_sk(sk)->inet_rcv_saddr;
-       __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
+       __be32 sk1_rcv_saddr = sk_rcv_saddr(sk);
+       __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
        int sk_ipv6only = ipv6_only_sock(sk);
        int sk2_ipv6only = inet_v6_ipv6only(sk2);
        int addr_type = ipv6_addr_type(sk_rcv_saddr6);
index 04635e88e8ed3ef25b0bdac9170506ee390c93b6..110efb704c9b93eca1d86db0d252592f117de6b0 100644 (file)
@@ -672,4 +672,8 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
 MODULE_DESCRIPTION("L2TP over IP");
 MODULE_VERSION("1.0");
-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, SOCK_DGRAM, IPPROTO_L2TP);
+
+/* Use the value of SOCK_DGRAM (2) directory, because __stringify does't like
+ * enums
+ */
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
index 582612998211d24aa8d4aea919eed1ba1e994db3..dfd3a648a55107bda2ff14adb6f9e91c06449240 100644 (file)
@@ -316,9 +316,9 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
        if (unlikely(addr->sllc_family != AF_LLC))
                goto out;
        rc = -ENODEV;
-       rtnl_lock();
+       rcu_read_lock();
        if (sk->sk_bound_dev_if) {
-               llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
+               llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
                if (llc->dev) {
                        if (!addr->sllc_arphrd)
                                addr->sllc_arphrd = llc->dev->type;
@@ -329,14 +329,15 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
                            !llc_mac_match(addr->sllc_mac,
                                           llc->dev->dev_addr)) {
                                rc = -EINVAL;
-                               dev_put(llc->dev);
                                llc->dev = NULL;
                        }
                }
        } else
-               llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd,
+               llc->dev = dev_getbyhwaddr_rcu(&init_net, addr->sllc_arphrd,
                                           addr->sllc_mac);
-       rtnl_unlock();
+       if (llc->dev)
+               dev_hold(llc->dev);
+       rcu_read_unlock();
        if (!llc->dev)
                goto out;
        if (!addr->sllc_sap) {
index 422705d62b5baa0e0796300a6ab2adc51e7d6e8c..246a04a1323483467158b1003abba9f88a03cb1f 100644 (file)
@@ -167,7 +167,6 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
 #define PGV_FROM_VMALLOC 1
 struct pgv {
        char *buffer;
-       unsigned char flags;
 };
 
 struct packet_ring_buffer {
@@ -224,6 +223,13 @@ struct packet_skb_cb {
 
 #define PACKET_SKB_CB(__skb)   ((struct packet_skb_cb *)((__skb)->cb))
 
+static inline __pure struct page *pgv_to_page(void *addr)
+{
+       if (is_vmalloc_addr(addr))
+               return vmalloc_to_page(addr);
+       return virt_to_page(addr);
+}
+
 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
 {
        union {
@@ -236,11 +242,11 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status)
        switch (po->tp_version) {
        case TPACKET_V1:
                h.h1->tp_status = status;
-               flush_dcache_page(virt_to_page(&h.h1->tp_status));
+               flush_dcache_page(pgv_to_page(&h.h1->tp_status));
                break;
        case TPACKET_V2:
                h.h2->tp_status = status;
-               flush_dcache_page(virt_to_page(&h.h2->tp_status));
+               flush_dcache_page(pgv_to_page(&h.h2->tp_status));
                break;
        default:
                pr_err("TPACKET version not supported\n");
@@ -263,10 +269,10 @@ static int __packet_get_status(struct packet_sock *po, void *frame)
        h.raw = frame;
        switch (po->tp_version) {
        case TPACKET_V1:
-               flush_dcache_page(virt_to_page(&h.h1->tp_status));
+               flush_dcache_page(pgv_to_page(&h.h1->tp_status));
                return h.h1->tp_status;
        case TPACKET_V2:
-               flush_dcache_page(virt_to_page(&h.h2->tp_status));
+               flush_dcache_page(pgv_to_page(&h.h2->tp_status));
                return h.h2->tp_status;
        default:
                pr_err("TPACKET version not supported\n");
@@ -511,7 +517,8 @@ out_free:
        return err;
 }
 
-static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
+static inline unsigned int run_filter(const struct sk_buff *skb,
+                                     const struct sock *sk,
                                      unsigned int res)
 {
        struct sk_filter *filter;
@@ -526,15 +533,15 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
 }
 
 /*
  This function makes lazy skb cloning in hope that most of packets
  are discarded by BPF.
-
  Note tricky part: we DO mangle shared skb! skb->data, skb->len
  and skb->cb are mangled. It works because (and until) packets
  falling here are owned by current CPU. Output packets are cloned
  by dev_queue_xmit_nit(), input packets are processed by net_bh
  sequencially, so that if we return skb to original state on exit,
  we will not harm anyone.
* This function makes lazy skb cloning in hope that most of packets
* are discarded by BPF.
+ *
* Note tricky part: we DO mangle shared skb! skb->data, skb->len
* and skb->cb are mangled. It works because (and until) packets
* falling here are owned by current CPU. Output packets are cloned
* by dev_queue_xmit_nit(), input packets are processed by net_bh
* sequencially, so that if we return skb to original state on exit,
* we will not harm anyone.
  */
 
 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -560,11 +567,11 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
 
        if (dev->header_ops) {
                /* The device has an explicit notion of ll header,
-                  exported to higher levels.
-
-                  Otherwise, the device hides datails of it frame
-                  structure, so that corresponding packet head
-                  never delivered to user.
+                * exported to higher levels.
+                *
+                * Otherwise, the device hides details of its frame
+                * structure, so that corresponding packet head is
+                * never delivered to user.
                 */
                if (sk->sk_type != SOCK_DGRAM)
                        skb_push(skb, skb->data - skb_mac_header(skb));
@@ -799,17 +806,15 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 
        __packet_set_status(po, h.raw, status);
        smp_mb();
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
        {
-               struct page *p_start, *p_end;
-               u8 *h_end = h.raw + macoff + snaplen - 1;
-
-               p_start = virt_to_page(h.raw);
-               p_end = virt_to_page(h_end);
-               while (p_start <= p_end) {
-                       flush_dcache_page(p_start);
-                       p_start++;
-               }
+               u8 *start, *end;
+
+               end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen);
+               for (start = h.raw; start < end; start += PAGE_SIZE)
+                       flush_dcache_page(pgv_to_page(start));
        }
+#endif
 
        sk->sk_data_ready(sk, 0);
 
@@ -915,7 +920,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
        }
 
        err = -EFAULT;
-       page = virt_to_page(data);
        offset = offset_in_page(data);
        len_max = PAGE_SIZE - offset;
        len = ((to_write > len_max) ? len_max : to_write);
@@ -934,11 +938,11 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
                        return -EFAULT;
                }
 
+               page = pgv_to_page(data);
+               data += len;
                flush_dcache_page(page);
                get_page(page);
-               skb_fill_page_desc(skb,
-                               nr_frags,
-                               page++, offset, len);
+               skb_fill_page_desc(skb, nr_frags, page, offset, len);
                to_write -= len;
                offset = 0;
                len_max = PAGE_SIZE;
@@ -2340,7 +2344,7 @@ static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
 
        for (i = 0; i < len; i++) {
                if (likely(pg_vec[i].buffer)) {
-                       if (pg_vec[i].flags & PGV_FROM_VMALLOC)
+                       if (is_vmalloc_addr(pg_vec[i].buffer))
                                vfree(pg_vec[i].buffer);
                        else
                                free_pages((unsigned long)pg_vec[i].buffer,
@@ -2351,8 +2355,7 @@ static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
        kfree(pg_vec);
 }
 
-static inline char *alloc_one_pg_vec_page(unsigned long order,
-                                         unsigned char *flags)
+static inline char *alloc_one_pg_vec_page(unsigned long order)
 {
        char *buffer = NULL;
        gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
@@ -2366,7 +2369,6 @@ static inline char *alloc_one_pg_vec_page(unsigned long order,
        /*
         * __get_free_pages failed, fall back to vmalloc
         */
-       *flags |= PGV_FROM_VMALLOC;
        buffer = vzalloc((1 << order) * PAGE_SIZE);
 
        if (buffer)
@@ -2375,7 +2377,6 @@ static inline char *alloc_one_pg_vec_page(unsigned long order,
        /*
         * vmalloc failed, lets dig into swap here
         */
-       *flags = 0;
        gfp_flags &= ~__GFP_NORETRY;
        buffer = (char *)__get_free_pages(gfp_flags, order);
        if (buffer)
@@ -2398,8 +2399,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
                goto out;
 
        for (i = 0; i < block_nr; i++) {
-               pg_vec[i].buffer = alloc_one_pg_vec_page(order,
-                                                        &pg_vec[i].flags);
+               pg_vec[i].buffer = alloc_one_pg_vec_page(order);
                if (unlikely(!pg_vec[i].buffer))
                        goto out_free_pgvec;
        }
@@ -2409,7 +2409,6 @@ out:
 
 out_free_pgvec:
        free_pg_vec(pg_vec, order, block_nr);
-       kfree(pg_vec);
        pg_vec = NULL;
        goto out;
 }
@@ -2583,13 +2582,8 @@ static int packet_mmap(struct file *file, struct socket *sock,
                        void *kaddr = rb->pg_vec[i].buffer;
                        int pg_num;
 
-                       for (pg_num = 0; pg_num < rb->pg_vec_pages;
-                                       pg_num++) {
-                               if (rb->pg_vec[i].flags & PGV_FROM_VMALLOC)
-                                       page = vmalloc_to_page(kaddr);
-                               else
-                                       page = virt_to_page(kaddr);
-
+                       for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
+                               page = pgv_to_page(kaddr);
                                err = vm_insert_page(vma, start, page);
                                if (unlikely(err))
                                        goto out;
index 6bd554323a342d4db49be267766b87492bd2e584..842c7f3650b94b1fb1f33b2c0a32665e6f4bf252 100644 (file)
@@ -6047,7 +6047,7 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
                 * will suddenly eat the receive_queue.
                 *
                 *  Look at current nfs client by the way...
-                *  However, this function was corrent in any case. 8)
+                *  However, this function was correct in any case. 8)
                 */
                if (flags & MSG_PEEK) {
                        spin_lock_bh(&sk->sk_receive_queue.lock);
index 7ff31c60186ab0ae0ff42f7d739894b38f6db8dc..417d7a6c36cf4a5256d17f0b2eb03f8b25e61eca 100644 (file)
@@ -1344,9 +1344,25 @@ static void unix_destruct_scm(struct sk_buff *skb)
        sock_wfree(skb);
 }
 
+#define MAX_RECURSION_LEVEL 4
+
 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
 {
        int i;
+       unsigned char max_level = 0;
+       int unix_sock_count = 0;
+
+       for (i = scm->fp->count - 1; i >= 0; i--) {
+               struct sock *sk = unix_get_socket(scm->fp->fp[i]);
+
+               if (sk) {
+                       unix_sock_count++;
+                       max_level = max(max_level,
+                                       unix_sk(sk)->recursion_level);
+               }
+       }
+       if (unlikely(max_level > MAX_RECURSION_LEVEL))
+               return -ETOOMANYREFS;
 
        /*
         * Need to duplicate file references for the sake of garbage
@@ -1357,9 +1373,11 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
        if (!UNIXCB(skb).fp)
                return -ENOMEM;
 
-       for (i = scm->fp->count-1; i >= 0; i--)
-               unix_inflight(scm->fp->fp[i]);
-       return 0;
+       if (unix_sock_count) {
+               for (i = scm->fp->count - 1; i >= 0; i--)
+                       unix_inflight(scm->fp->fp[i]);
+       }
+       return max_level;
 }
 
 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
@@ -1394,6 +1412,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
        struct sk_buff *skb;
        long timeo;
        struct scm_cookie tmp_scm;
+       int max_level;
 
        if (NULL == siocb->scm)
                siocb->scm = &tmp_scm;
@@ -1432,8 +1451,9 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
                goto out;
 
        err = unix_scm_to_skb(siocb->scm, skb, true);
-       if (err)
+       if (err < 0)
                goto out_free;
+       max_level = err + 1;
        unix_get_secdata(siocb->scm, skb);
 
        skb_reset_transport_header(skb);
@@ -1515,6 +1535,8 @@ restart:
        if (sock_flag(other, SOCK_RCVTSTAMP))
                __net_timestamp(skb);
        skb_queue_tail(&other->sk_receive_queue, skb);
+       if (max_level > unix_sk(other)->recursion_level)
+               unix_sk(other)->recursion_level = max_level;
        unix_state_unlock(other);
        other->sk_data_ready(other, len);
        sock_put(other);
@@ -1545,6 +1567,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
        int sent = 0;
        struct scm_cookie tmp_scm;
        bool fds_sent = false;
+       int max_level;
 
        if (NULL == siocb->scm)
                siocb->scm = &tmp_scm;
@@ -1608,10 +1631,11 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
 
                /* Only send the fds in the first buffer */
                err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
-               if (err) {
+               if (err < 0) {
                        kfree_skb(skb);
                        goto out_err;
                }
+               max_level = err + 1;
                fds_sent = true;
 
                err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
@@ -1627,6 +1651,8 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
                        goto pipe_err_free;
 
                skb_queue_tail(&other->sk_receive_queue, skb);
+               if (max_level > unix_sk(other)->recursion_level)
+                       unix_sk(other)->recursion_level = max_level;
                unix_state_unlock(other);
                other->sk_data_ready(other, size);
                sent += size;
@@ -1847,6 +1873,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                unix_state_lock(sk);
                skb = skb_dequeue(&sk->sk_receive_queue);
                if (skb == NULL) {
+                       unix_sk(sk)->recursion_level = 0;
                        if (copied >= target)
                                goto unlock;
 
index c8df6fda0b1fcf124b65812f600710a3b3f17069..f89f83bf828ee0e713ded75c84da22a1c1b2426e 100644 (file)
@@ -96,7 +96,7 @@ static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
 unsigned int unix_tot_inflight;
 
 
-static struct sock *unix_get_socket(struct file *filp)
+struct sock *unix_get_socket(struct file *filp)
 {
        struct sock *u_sock = NULL;
        struct inode *inode = filp->f_path.dentry->d_inode;
@@ -259,9 +259,16 @@ static void inc_inflight_move_tail(struct unix_sock *u)
 }
 
 static bool gc_in_progress = false;
+#define UNIX_INFLIGHT_TRIGGER_GC 16000
 
 void wait_for_unix_gc(void)
 {
+       /*
+        * If number of inflight sockets is insane,
+        * force a garbage collect right now.
+        */
+       if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
+               unix_gc();
        wait_event(unix_gc_wait, gc_in_progress == false);
 }
 
index 4c81f6abb65b80dd608267baa35fefacaa2eb12d..4cbc942f762a9308ed37016b9aaf5fd3db06ce56 100644 (file)
@@ -398,6 +398,7 @@ void __exit x25_link_free(void)
        list_for_each_safe(entry, tmp, &x25_neigh_list) {
                nb = list_entry(entry, struct x25_neigh, node);
                __x25_remove_neigh(nb);
+               dev_put(nb->dev);
        }
        write_unlock_bh(&x25_neigh_list_lock);
 }
index a2023ec52329ef66b95e14dc94f7086e5683e534..1e98bc0fe0a54bbca2a7fc1f94ab9344af0fbded 100644 (file)
@@ -19,7 +19,7 @@ struct hlist_head *xfrm_hash_alloc(unsigned int sz)
        if (sz <= PAGE_SIZE)
                n = kzalloc(sz, GFP_KERNEL);
        else if (hashdist)
-               n = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+               n = vzalloc(sz);
        else
                n = (struct hlist_head *)
                        __get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
index 044e77898512b43d772f43659388aee51db19012..6e50ccd8c5325d31b16c912d0687a752b852c4d4 100644 (file)
@@ -1433,7 +1433,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
                }
 
                xdst->route = dst;
-               memcpy(&dst1->metrics, &dst->metrics, sizeof(dst->metrics));
+               dst_copy_metrics(dst1, dst);
 
                if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
                        family = xfrm[i]->props.family;
@@ -2271,7 +2271,7 @@ static void xfrm_init_pmtu(struct dst_entry *dst)
                if (pmtu > route_mtu_cached)
                        pmtu = route_mtu_cached;
 
-               dst->metrics[RTAX_MTU-1] = pmtu;
+               dst_metric_set(dst, RTAX_MTU, pmtu);
        } while ((dst = dst->next));
 }
 
@@ -2349,7 +2349,7 @@ static int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
                mtu = xfrm_state_mtu(dst->xfrm, mtu);
                if (mtu > last->route_mtu_cached)
                        mtu = last->route_mtu_cached;
-               dst->metrics[RTAX_MTU-1] = mtu;
+               dst_metric_set(dst, RTAX_MTU, mtu);
 
                if (last == first)
                        break;