]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge tag 'linux-can-next-for-3.20-20150204' of git://git.kernel.org/pub/scm/linux...
authorDavid S. Miller <davem@davemloft.net>
Thu, 5 Feb 2015 08:29:14 +0000 (00:29 -0800)
committerDavid S. Miller <davem@davemloft.net>
Thu, 5 Feb 2015 08:29:14 +0000 (00:29 -0800)
Marc Kleine-Budde says:

====================
pull-request: can-next 2015-02-04

this is a pull request of 2 patches for net-next/master.

Nicholas Mc Guire contributes a patch for the janz-ican3 driver to fix
a mismatch in an assignment. Ahmed S. Darwish contributes a patch for
the kvaser_usb driver, to make the driver more robust during the
bus-off handling.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
142 files changed:
Documentation/DocBook/80211.tmpl
crypto/af_alg.c
crypto/algif_hash.c
crypto/algif_skcipher.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btusb.c
drivers/infiniband/hw/mlx4/ah.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/misc/vmw_vmci/vmci_queue_pair.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_values.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/fs_enet/fs_enet.h
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_resources.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/intf.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/ti/cpsw-common.c
drivers/net/ethernet/ti/netcp_ethss.c
drivers/net/ethernet/ti/tlan.c
drivers/net/ieee802154/cc2520.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/scsi/csiostor/csio_hw.c
drivers/scsi/csiostor/csio_hw_chip.h
drivers/scsi/csiostor/csio_mb.c
drivers/vhost/net.c
drivers/vhost/scsi.c
drivers/vhost/vhost.c
fs/afs/rxrpc.c
include/crypto/if_alg.h
include/linux/ieee80211.h
include/linux/mlx4/cmd.h
include/linux/mlx4/device.h
include/linux/mlx4/driver.h
include/linux/mlx4/qp.h
include/linux/netdevice.h
include/linux/rhashtable.h
include/linux/skbuff.h
include/linux/socket.h
include/linux/uio.h
include/linux/vmw_vmci_api.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/mgmt.h
include/net/bonding.h
include/net/cfg80211.h
include/net/mac80211.h
include/net/ping.h
include/net/sock.h
include/net/tcp.h
include/net/udplite.h
include/uapi/linux/nl80211.h
include/uapi/linux/pkt_sched.h
lib/Makefile
lib/iovec.c [deleted file]
lib/rhashtable.c
net/bluetooth/bnep/core.c
net/bluetooth/hci_core.c
net/bluetooth/hci_debugfs.c
net/bluetooth/hci_event.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bluetooth/smp.c
net/core/Makefile
net/core/dev.c
net/core/flow_dissector.c
net/core/iovec.c [deleted file]
net/core/net_namespace.c
net/core/rtnetlink.c
net/core/skbuff.c
net/ipv4/fou.c
net/ipv4/ip_output.c
net/ipv4/ping.c
net/ipv4/raw.c
net/ipv4/tcp.c
net/ipv4/tcp_output.c
net/ipv6/ping.c
net/ipv6/raw.c
net/mac80211/Kconfig
net/mac80211/Makefile
net/mac80211/aes_ccm.c
net/mac80211/aes_ccm.h
net/mac80211/aes_cmac.c
net/mac80211/aes_cmac.h
net/mac80211/aes_gcm.c [new file with mode: 0644]
net/mac80211/aes_gcm.h [new file with mode: 0644]
net/mac80211/aes_gmac.c [new file with mode: 0644]
net/mac80211/aes_gmac.h [new file with mode: 0644]
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debugfs_key.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/key.h
net/mac80211/main.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/tdls.c
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/wpa.c
net/mac80211/wpa.h
net/netfilter/nft_hash.c
net/netlink/af_netlink.c
net/rfkill/rfkill-gpio.c
net/rxrpc/ar-output.c
net/sched/sch_fq.c
net/socket.c
net/tipc/discover.c
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/node.c
net/tipc/socket.c
net/vmw_vsock/vmci_transport.c
net/wireless/nl80211.c
net/wireless/util.c

index 49b8b8907f36e0b9797f47e5a7de0f00e0c90db4..aac9357d4866bda01fcf2a0e532a5068b5d4bfbf 100644 (file)
       <section id="ps-client">
         <title>support for powersaving clients</title>
 !Pinclude/net/mac80211.h AP support for powersaving clients
-      </section>
 !Finclude/net/mac80211.h ieee80211_get_buffered_bc
 !Finclude/net/mac80211.h ieee80211_beacon_get
 !Finclude/net/mac80211.h ieee80211_sta_eosp
 !Finclude/net/mac80211.h ieee80211_sta_ps_transition_ni
 !Finclude/net/mac80211.h ieee80211_sta_set_buffered
 !Finclude/net/mac80211.h ieee80211_sta_block_awake
+      </section>
       </chapter>
 
       <chapter id="multi-iface">
           <title>RX A-MPDU aggregation</title>
 !Pnet/mac80211/agg-rx.c RX A-MPDU aggregation
 !Cnet/mac80211/agg-rx.c
-        </sect1>
 !Finclude/net/mac80211.h ieee80211_ampdu_mlme_action
+        </sect1>
       </chapter>
 
       <chapter id="smps">
index 4665b79c729ac1d59d699907d3e5c75628231cdb..eb78fe8a60c8e175af9b5e976510997eb87777c9 100644 (file)
@@ -338,49 +338,31 @@ static const struct net_proto_family alg_family = {
        .owner  =       THIS_MODULE,
 };
 
-int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
-                  int write)
+int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
 {
-       unsigned long from = (unsigned long)addr;
-       unsigned long npages;
-       unsigned off;
-       int err;
-       int i;
-
-       err = -EFAULT;
-       if (!access_ok(write ? VERIFY_READ : VERIFY_WRITE, addr, len))
-               goto out;
-
-       off = from & ~PAGE_MASK;
-       npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       if (npages > ALG_MAX_PAGES)
-               npages = ALG_MAX_PAGES;
+       size_t off;
+       ssize_t n;
+       int npages, i;
 
-       err = get_user_pages_fast(from, npages, write, sgl->pages);
-       if (err < 0)
-               goto out;
+       n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off);
+       if (n < 0)
+               return n;
 
-       npages = err;
-       err = -EINVAL;
+       npages = PAGE_ALIGN(off + n);
        if (WARN_ON(npages == 0))
-               goto out;
-
-       err = 0;
+               return -EINVAL;
 
        sg_init_table(sgl->sg, npages);
 
-       for (i = 0; i < npages; i++) {
+       for (i = 0, len = n; i < npages; i++) {
                int plen = min_t(int, len, PAGE_SIZE - off);
 
                sg_set_page(sgl->sg + i, sgl->pages[i], plen, off);
 
                off = 0;
                len -= plen;
-               err += plen;
        }
-
-out:
-       return err;
+       return n;
 }
 EXPORT_SYMBOL_GPL(af_alg_make_sg);
 
index 01f56eb7816ee4320a9acd9632c3e4b5bc3233ee..01da360bdb5510b78eac0ee43630795c4a011d76 100644 (file)
@@ -41,8 +41,6 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
        struct hash_ctx *ctx = ask->private;
-       unsigned long iovlen;
-       const struct iovec *iov;
        long copied = 0;
        int err;
 
@@ -58,37 +56,28 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
 
        ctx->more = 0;
 
-       for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs; iovlen > 0;
-            iovlen--, iov++) {
-               unsigned long seglen = iov->iov_len;
-               char __user *from = iov->iov_base;
+       while (iov_iter_count(&msg->msg_iter)) {
+               int len = iov_iter_count(&msg->msg_iter);
 
-               while (seglen) {
-                       int len = min_t(unsigned long, seglen, limit);
-                       int newlen;
+               if (len > limit)
+                       len = limit;
 
-                       newlen = af_alg_make_sg(&ctx->sgl, from, len, 0);
-                       if (newlen < 0) {
-                               err = copied ? 0 : newlen;
-                               goto unlock;
-                       }
-
-                       ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL,
-                                               newlen);
-
-                       err = af_alg_wait_for_completion(
-                               crypto_ahash_update(&ctx->req),
-                               &ctx->completion);
+               len = af_alg_make_sg(&ctx->sgl, &msg->msg_iter, len);
+               if (len < 0) {
+                       err = copied ? 0 : len;
+                       goto unlock;
+               }
 
-                       af_alg_free_sg(&ctx->sgl);
+               ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
 
-                       if (err)
-                               goto unlock;
+               err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req),
+                                                &ctx->completion);
+               af_alg_free_sg(&ctx->sgl);
+               if (err)
+                       goto unlock;
 
-                       seglen -= newlen;
-                       from += newlen;
-                       copied += newlen;
-               }
+               copied += len;
+               iov_iter_advance(&msg->msg_iter, len);
        }
 
        err = 0;
index c12207c8dde9e6b6a5365783f299623e7a221914..37110fd68adfecb84a2b78663c937f6567106da1 100644 (file)
@@ -426,67 +426,59 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
                &ctx->req));
        struct skcipher_sg_list *sgl;
        struct scatterlist *sg;
-       unsigned long iovlen;
-       const struct iovec *iov;
        int err = -EAGAIN;
        int used;
        long copied = 0;
 
        lock_sock(sk);
-       for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs; iovlen > 0;
-            iovlen--, iov++) {
-               unsigned long seglen = iov->iov_len;
-               char __user *from = iov->iov_base;
-
-               while (seglen) {
-                       sgl = list_first_entry(&ctx->tsgl,
-                                              struct skcipher_sg_list, list);
-                       sg = sgl->sg;
-
-                       while (!sg->length)
-                               sg++;
-
-                       if (!ctx->used) {
-                               err = skcipher_wait_for_data(sk, flags);
-                               if (err)
-                                       goto unlock;
-                       }
+       while (iov_iter_count(&msg->msg_iter)) {
+               sgl = list_first_entry(&ctx->tsgl,
+                                      struct skcipher_sg_list, list);
+               sg = sgl->sg;
 
-                       used = min_t(unsigned long, ctx->used, seglen);
+               while (!sg->length)
+                       sg++;
 
-                       used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
-                       err = used;
-                       if (err < 0)
+               used = ctx->used;
+               if (!used) {
+                       err = skcipher_wait_for_data(sk, flags);
+                       if (err)
                                goto unlock;
+               }
+
+               used = min_t(unsigned long, used, iov_iter_count(&msg->msg_iter));
+
+               used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
+               err = used;
+               if (err < 0)
+                       goto unlock;
 
-                       if (ctx->more || used < ctx->used)
-                               used -= used % bs;
+               if (ctx->more || used < ctx->used)
+                       used -= used % bs;
 
-                       err = -EINVAL;
-                       if (!used)
-                               goto free;
+               err = -EINVAL;
+               if (!used)
+                       goto free;
 
-                       ablkcipher_request_set_crypt(&ctx->req, sg,
-                                                    ctx->rsgl.sg, used,
-                                                    ctx->iv);
+               ablkcipher_request_set_crypt(&ctx->req, sg,
+                                            ctx->rsgl.sg, used,
+                                            ctx->iv);
 
-                       err = af_alg_wait_for_completion(
+               err = af_alg_wait_for_completion(
                                ctx->enc ?
                                        crypto_ablkcipher_encrypt(&ctx->req) :
                                        crypto_ablkcipher_decrypt(&ctx->req),
                                &ctx->completion);
 
 free:
-                       af_alg_free_sg(&ctx->rsgl);
+               af_alg_free_sg(&ctx->rsgl);
 
-                       if (err)
-                               goto unlock;
+               if (err)
+                       goto unlock;
 
-                       copied += used;
-                       from += used;
-                       seglen -= used;
-                       skcipher_pull_sgl(sk, used);
-               }
+               copied += used;
+               skcipher_pull_sgl(sk, used);
+               iov_iter_advance(&msg->msg_iter, used);
        }
 
        err = 0;
index 1ee27ac18de052e660fcafc148b6dbfd76a2499f..de4c8499cbac958f0100f0004e38884839281729 100644 (file)
@@ -108,6 +108,7 @@ static const struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x13d3, 0x3393) },
        { USB_DEVICE(0x13d3, 0x3402) },
        { USB_DEVICE(0x13d3, 0x3408) },
+       { USB_DEVICE(0x13d3, 0x3423) },
        { USB_DEVICE(0x13d3, 0x3432) },
 
        /* Atheros AR5BBU12 with sflash firmware */
@@ -162,6 +163,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
 
        /* Atheros AR5BBU22 with sflash firmware */
@@ -174,6 +176,8 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
 #define USB_REQ_DFU_DNLOAD     1
 #define BULK_SIZE              4096
 #define FW_HDR_SIZE            20
+#define TIMEGAP_USEC_MIN       50
+#define TIMEGAP_USEC_MAX       100
 
 static int ath3k_load_firmware(struct usb_device *udev,
                                const struct firmware *firmware)
@@ -205,6 +209,9 @@ static int ath3k_load_firmware(struct usb_device *udev,
        pipe = usb_sndbulkpipe(udev, 0x02);
 
        while (count) {
+               /* workaround the compatibility issue with xHCI controller*/
+               usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX);
+
                size = min_t(uint, count, BULK_SIZE);
                memcpy(send_buf, firmware->data + sent, size);
 
@@ -302,6 +309,9 @@ static int ath3k_load_fwfile(struct usb_device *udev,
        pipe = usb_sndbulkpipe(udev, 0x02);
 
        while (count) {
+               /* workaround the compatibility issue with xHCI controller*/
+               usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX);
+
                size = min_t(uint, count, BULK_SIZE);
                memcpy(send_buf, firmware->data + sent, size);
 
index 4a6495ab97268b6d3bd5a6895fb504a9926f3e89..b876888811432a9bad46ab73a32ca40b04ed2ce4 100644 (file)
@@ -28,7 +28,7 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
-#define VERSION "0.6"
+#define VERSION "0.7"
 
 static bool disable_scofix;
 static bool force_scofix;
@@ -50,11 +50,16 @@ static struct usb_driver btusb_driver;
 #define BTUSB_BCM_PATCHRAM     0x400
 #define BTUSB_MARVELL          0x800
 #define BTUSB_SWAVE            0x1000
+#define BTUSB_INTEL_NEW                0x2000
+#define BTUSB_AMP              0x4000
 
 static const struct usb_device_id btusb_table[] = {
        /* Generic Bluetooth USB device */
        { USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
 
+       /* Generic Bluetooth AMP device */
+       { USB_DEVICE_INFO(0xe0, 0x01, 0x04), .driver_info = BTUSB_AMP },
+
        /* Apple-specific (Broadcom) devices */
        { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
 
@@ -110,16 +115,24 @@ static const struct usb_device_id btusb_table[] = {
        { USB_DEVICE(0x13d3, 0x3404),
          .driver_info = BTUSB_BCM_PATCHRAM },
 
+       /* Broadcom BCM20702B0 (Dynex/Insignia) */
+       { USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM },
+
        /* Foxconn - Hon Hai */
        { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01),
          .driver_info = BTUSB_BCM_PATCHRAM },
 
+       /* Lite-On Technology - Broadcom based */
+       { USB_VENDOR_AND_INTERFACE_INFO(0x04ca, 0xff, 0x01, 0x01),
+         .driver_info = BTUSB_BCM_PATCHRAM },
+
        /* Broadcom devices with vendor specific id */
        { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01),
          .driver_info = BTUSB_BCM_PATCHRAM },
 
        /* ASUSTek Computer - Broadcom based */
-       { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01),
+         .driver_info = BTUSB_BCM_PATCHRAM },
 
        /* Belkin F8065bf - Broadcom based */
        { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
@@ -189,6 +202,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
 
        /* Atheros AR5BBU12 with sflash firmware */
@@ -253,13 +267,18 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x16d3, 0x0002),
          .driver_info = BTUSB_SNIFFER | BTUSB_BROKEN_ISOC },
 
-       /* Intel Bluetooth device */
+       /* Marvell Bluetooth devices */
+       { USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL },
+       { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
+
+       /* Intel Bluetooth devices */
        { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
        { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
+       { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
 
-       /* Marvell device */
-       { USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL },
-       { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
+       /* Other Intel Bluetooth devices */
+       { USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
+         .driver_info = BTUSB_IGNORE },
 
        { }     /* Terminating entry */
 };
@@ -271,6 +290,11 @@ static const struct usb_device_id blacklist_table[] = {
 #define BTUSB_ISOC_RUNNING     2
 #define BTUSB_SUSPENDING       3
 #define BTUSB_DID_ISO_RESUME   4
+#define BTUSB_BOOTLOADER       5
+#define BTUSB_DOWNLOADING      6
+#define BTUSB_FIRMWARE_LOADED  7
+#define BTUSB_FIRMWARE_FAILED  8
+#define BTUSB_BOOTING          9
 
 struct btusb_data {
        struct hci_dev       *hdev;
@@ -304,6 +328,7 @@ struct btusb_data {
        struct usb_endpoint_descriptor *isoc_rx_ep;
 
        __u8 cmdreq_type;
+       __u8 cmdreq;
 
        unsigned int sco_num;
        int isoc_altsetting;
@@ -313,6 +338,16 @@ struct btusb_data {
        int (*recv_bulk)(struct btusb_data *data, void *buffer, int count);
 };
 
+static int btusb_wait_on_bit_timeout(void *word, int bit, unsigned long timeout,
+                                    unsigned mode)
+{
+       might_sleep();
+       if (!test_bit(bit, word))
+               return 0;
+       return out_of_line_wait_on_bit_timeout(word, bit, bit_wait_timeout,
+                                              mode, timeout);
+}
+
 static inline void btusb_free_frags(struct btusb_data *data)
 {
        unsigned long flags;
@@ -957,7 +992,7 @@ static struct urb *alloc_ctrl_urb(struct hci_dev *hdev, struct sk_buff *skb)
        }
 
        dr->bRequestType = data->cmdreq_type;
-       dr->bRequest     = 0;
+       dr->bRequest     = data->cmdreq;
        dr->wIndex       = 0;
        dr->wValue       = 0;
        dr->wLength      = __cpu_to_le16(skb->len);
@@ -1295,6 +1330,26 @@ struct intel_version {
        u8 fw_patch_num;
 } __packed;
 
+struct intel_boot_params {
+       __u8     status;
+       __u8     otp_format;
+       __u8     otp_content;
+       __u8     otp_patch;
+       __le16   dev_revid;
+       __u8     secure_boot;
+       __u8     key_from_hdr;
+       __u8     key_type;
+       __u8     otp_lock;
+       __u8     api_lock;
+       __u8     debug_lock;
+       bdaddr_t otp_bdaddr;
+       __u8     min_fw_build_nn;
+       __u8     min_fw_build_cw;
+       __u8     min_fw_build_yy;
+       __u8     limited_cce;
+       __u8     unlocked_state;
+} __packed;
+
 static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
                                                       struct intel_version *ver)
 {
@@ -1703,6 +1758,562 @@ exit_mfg_deactivate:
        return 0;
 }
 
+static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
+{
+       struct sk_buff *skb;
+       struct hci_event_hdr *hdr;
+       struct hci_ev_cmd_complete *evt;
+
+       skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC);
+       if (!skb)
+               return -ENOMEM;
+
+       hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
+       hdr->evt = HCI_EV_CMD_COMPLETE;
+       hdr->plen = sizeof(*evt) + 1;
+
+       evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
+       evt->ncmd = 0x01;
+       evt->opcode = cpu_to_le16(opcode);
+
+       *skb_put(skb, 1) = 0x00;
+
+       bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
+
+       return hci_recv_frame(hdev, skb);
+}
+
+static int btusb_recv_bulk_intel(struct btusb_data *data, void *buffer,
+                                int count)
+{
+       /* When the device is in bootloader mode, then it can send
+        * events via the bulk endpoint. These events are treated the
+        * same way as the ones received from the interrupt endpoint.
+        */
+       if (test_bit(BTUSB_BOOTLOADER, &data->flags))
+               return btusb_recv_intr(data, buffer, count);
+
+       return btusb_recv_bulk(data, buffer, count);
+}
+
+static int btusb_recv_event_intel(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct btusb_data *data = hci_get_drvdata(hdev);
+
+       if (test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+               struct hci_event_hdr *hdr = (void *)skb->data;
+
+               /* When the firmware loading completes the device sends
+                * out a vendor specific event indicating the result of
+                * the firmware loading.
+                */
+               if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 &&
+                   skb->data[2] == 0x06) {
+                       if (skb->data[3] != 0x00)
+                               test_bit(BTUSB_FIRMWARE_FAILED, &data->flags);
+
+                       if (test_and_clear_bit(BTUSB_DOWNLOADING,
+                                              &data->flags) &&
+                           test_bit(BTUSB_FIRMWARE_LOADED, &data->flags)) {
+                               smp_mb__after_atomic();
+                               wake_up_bit(&data->flags, BTUSB_DOWNLOADING);
+                       }
+               }
+
+               /* When switching to the operational firmware the device
+                * sends a vendor specific event indicating that the bootup
+                * completed.
+                */
+               if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 &&
+                   skb->data[2] == 0x02) {
+                       if (test_and_clear_bit(BTUSB_BOOTING, &data->flags)) {
+                               smp_mb__after_atomic();
+                               wake_up_bit(&data->flags, BTUSB_BOOTING);
+                       }
+               }
+       }
+
+       return hci_recv_frame(hdev, skb);
+}
+
+static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct btusb_data *data = hci_get_drvdata(hdev);
+       struct urb *urb;
+
+       BT_DBG("%s", hdev->name);
+
+       if (!test_bit(HCI_RUNNING, &hdev->flags))
+               return -EBUSY;
+
+       switch (bt_cb(skb)->pkt_type) {
+       case HCI_COMMAND_PKT:
+               if (test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+                       struct hci_command_hdr *cmd = (void *)skb->data;
+                       __u16 opcode = le16_to_cpu(cmd->opcode);
+
+                       /* When in bootloader mode and the command 0xfc09
+                        * is received, it needs to be send down the
+                        * bulk endpoint. So allocate a bulk URB instead.
+                        */
+                       if (opcode == 0xfc09)
+                               urb = alloc_bulk_urb(hdev, skb);
+                       else
+                               urb = alloc_ctrl_urb(hdev, skb);
+
+                       /* When the 0xfc01 command is issued to boot into
+                        * the operational firmware, it will actually not
+                        * send a command complete event. To keep the flow
+                        * control working inject that event here.
+                        */
+                       if (opcode == 0xfc01)
+                               inject_cmd_complete(hdev, opcode);
+               } else {
+                       urb = alloc_ctrl_urb(hdev, skb);
+               }
+               if (IS_ERR(urb))
+                       return PTR_ERR(urb);
+
+               hdev->stat.cmd_tx++;
+               return submit_or_queue_tx_urb(hdev, urb);
+
+       case HCI_ACLDATA_PKT:
+               urb = alloc_bulk_urb(hdev, skb);
+               if (IS_ERR(urb))
+                       return PTR_ERR(urb);
+
+               hdev->stat.acl_tx++;
+               return submit_or_queue_tx_urb(hdev, urb);
+
+       case HCI_SCODATA_PKT:
+               if (hci_conn_num(hdev, SCO_LINK) < 1)
+                       return -ENODEV;
+
+               urb = alloc_isoc_urb(hdev, skb);
+               if (IS_ERR(urb))
+                       return PTR_ERR(urb);
+
+               hdev->stat.sco_tx++;
+               return submit_tx_urb(hdev, urb);
+       }
+
+       return -EILSEQ;
+}
+
+static int btusb_intel_secure_send(struct hci_dev *hdev, u8 fragment_type,
+                                  u32 plen, const void *param)
+{
+       while (plen > 0) {
+               struct sk_buff *skb;
+               u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
+
+               cmd_param[0] = fragment_type;
+               memcpy(cmd_param + 1, param, fragment_len);
+
+               skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
+                                    cmd_param, HCI_INIT_TIMEOUT);
+               if (IS_ERR(skb))
+                       return PTR_ERR(skb);
+
+               kfree_skb(skb);
+
+               plen -= fragment_len;
+               param += fragment_len;
+       }
+
+       return 0;
+}
+
+static void btusb_intel_version_info(struct hci_dev *hdev,
+                                    struct intel_version *ver)
+{
+       const char *variant;
+
+       switch (ver->fw_variant) {
+       case 0x06:
+               variant = "Bootloader";
+               break;
+       case 0x23:
+               variant = "Firmware";
+               break;
+       default:
+               return;
+       }
+
+       BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name,
+               variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
+               ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy);
+}
+
+static int btusb_setup_intel_new(struct hci_dev *hdev)
+{
+       static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
+                                         0x00, 0x08, 0x04, 0x00 };
+       struct btusb_data *data = hci_get_drvdata(hdev);
+       struct sk_buff *skb;
+       struct intel_version *ver;
+       struct intel_boot_params *params;
+       const struct firmware *fw;
+       const u8 *fw_ptr;
+       char fwname[64];
+       ktime_t calltime, delta, rettime;
+       unsigned long long duration;
+       int err;
+
+       BT_DBG("%s", hdev->name);
+
+       calltime = ktime_get();
+
+       /* Read the Intel version information to determine if the device
+        * is in bootloader mode or if it already has operational firmware
+        * loaded.
+        */
+       skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Reading Intel version information failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return PTR_ERR(skb);
+       }
+
+       if (skb->len != sizeof(*ver)) {
+               BT_ERR("%s: Intel version event size mismatch", hdev->name);
+               kfree_skb(skb);
+               return -EILSEQ;
+       }
+
+       ver = (struct intel_version *)skb->data;
+       if (ver->status) {
+               BT_ERR("%s: Intel version command failure (%02x)",
+                      hdev->name, ver->status);
+               err = -bt_to_errno(ver->status);
+               kfree_skb(skb);
+               return err;
+       }
+
+       /* The hardware platform number has a fixed value of 0x37 and
+        * for now only accept this single value.
+        */
+       if (ver->hw_platform != 0x37) {
+               BT_ERR("%s: Unsupported Intel hardware platform (%u)",
+                      hdev->name, ver->hw_platform);
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
+        * supported by this firmware loading method. This check has been
+        * put in place to ensure correct forward compatibility options
+        * when newer hardware variants come along.
+        */
+       if (ver->hw_variant != 0x0b) {
+               BT_ERR("%s: Unsupported Intel hardware variant (%u)",
+                      hdev->name, ver->hw_variant);
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       btusb_intel_version_info(hdev, ver);
+
+       /* The firmware variant determines if the device is in bootloader
+        * mode or is running operational firmware. The value 0x06 identifies
+        * the bootloader and the value 0x23 identifies the operational
+        * firmware.
+        *
+        * When the operational firmware is already present, then only
+        * the check for valid Bluetooth device address is needed. This
+        * determines if the device will be added as configured or
+        * unconfigured controller.
+        *
+        * It is not possible to use the Secure Boot Parameters in this
+        * case since that command is only available in bootloader mode.
+        */
+       if (ver->fw_variant == 0x23) {
+               kfree_skb(skb);
+               clear_bit(BTUSB_BOOTLOADER, &data->flags);
+               btusb_check_bdaddr_intel(hdev);
+               return 0;
+       }
+
+       /* If the device is not in bootloader mode, then the only possible
+        * choice is to return an error and abort the device initialization.
+        */
+       if (ver->fw_variant != 0x06) {
+               BT_ERR("%s: Unsupported Intel firmware variant (%u)",
+                      hdev->name, ver->fw_variant);
+               kfree_skb(skb);
+               return -ENODEV;
+       }
+
+       kfree_skb(skb);
+
+       /* Read the secure boot parameters to identify the operating
+        * details of the bootloader.
+        */
+       skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Reading Intel boot parameters failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return PTR_ERR(skb);
+       }
+
+       if (skb->len != sizeof(*params)) {
+               BT_ERR("%s: Intel boot parameters size mismatch", hdev->name);
+               kfree_skb(skb);
+               return -EILSEQ;
+       }
+
+       params = (struct intel_boot_params *)skb->data;
+       if (params->status) {
+               BT_ERR("%s: Intel boot parameters command failure (%02x)",
+                      hdev->name, params->status);
+               err = -bt_to_errno(params->status);
+               kfree_skb(skb);
+               return err;
+       }
+
+       BT_INFO("%s: Device revision is %u", hdev->name,
+               le16_to_cpu(params->dev_revid));
+
+       BT_INFO("%s: Secure boot is %s", hdev->name,
+               params->secure_boot ? "enabled" : "disabled");
+
+       BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name,
+               params->min_fw_build_nn, params->min_fw_build_cw,
+               2000 + params->min_fw_build_yy);
+
+       /* It is required that every single firmware fragment is acknowledged
+        * with a command complete event. If the boot parameters indicate
+        * that this bootloader does not send them, then abort the setup.
+        */
+       if (params->limited_cce != 0x00) {
+               BT_ERR("%s: Unsupported Intel firmware loading method (%u)",
+                      hdev->name, params->limited_cce);
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       /* If the OTP has no valid Bluetooth device address, then there will
+        * also be no valid address for the operational firmware.
+        */
+       if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
+               BT_INFO("%s: No device address configured", hdev->name);
+               set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+       }
+
+       /* With this Intel bootloader only the hardware variant and device
+        * revision information are used to select the right firmware.
+        *
+        * Currently this bootloader support is limited to hardware variant
+        * iBT 3.0 (LnP/SfP) which is identified by the value 11 (0x0b).
+        */
+       snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.sfi",
+                le16_to_cpu(params->dev_revid));
+
+       err = request_firmware(&fw, fwname, &hdev->dev);
+       if (err < 0) {
+               BT_ERR("%s: Failed to load Intel firmware file (%d)",
+                      hdev->name, err);
+               kfree_skb(skb);
+               return err;
+       }
+
+       BT_INFO("%s: Found device firmware: %s", hdev->name, fwname);
+
+       kfree_skb(skb);
+
+       if (fw->size < 644) {
+               BT_ERR("%s: Invalid size of firmware file (%zu)",
+                      hdev->name, fw->size);
+               err = -EBADF;
+               goto done;
+       }
+
+       set_bit(BTUSB_DOWNLOADING, &data->flags);
+
+       /* Start the firmware download transaction with the Init fragment
+        * represented by the 128 bytes of CSS header.
+        */
+       err = btusb_intel_secure_send(hdev, 0x00, 128, fw->data);
+       if (err < 0) {
+               BT_ERR("%s: Failed to send firmware header (%d)",
+                      hdev->name, err);
+               goto done;
+       }
+
+       /* Send the 256 bytes of public key information from the firmware
+        * as the PKey fragment.
+        */
+       err = btusb_intel_secure_send(hdev, 0x03, 256, fw->data + 128);
+       if (err < 0) {
+               BT_ERR("%s: Failed to send firmware public key (%d)",
+                      hdev->name, err);
+               goto done;
+       }
+
+       /* Send the 256 bytes of signature information from the firmware
+        * as the Sign fragment.
+        */
+       err = btusb_intel_secure_send(hdev, 0x02, 256, fw->data + 388);
+       if (err < 0) {
+               BT_ERR("%s: Failed to send firmware signature (%d)",
+                      hdev->name, err);
+               goto done;
+       }
+
+       fw_ptr = fw->data + 644;
+
+       while (fw_ptr - fw->data < fw->size) {
+               struct hci_command_hdr *cmd = (void *)fw_ptr;
+               u8 cmd_len;
+
+               cmd_len = sizeof(*cmd) + cmd->plen;
+
+               /* Send each command from the firmware data buffer as
+                * a single Data fragment.
+                */
+               err = btusb_intel_secure_send(hdev, 0x01, cmd_len, fw_ptr);
+               if (err < 0) {
+                       BT_ERR("%s: Failed to send firmware data (%d)",
+                              hdev->name, err);
+                       goto done;
+               }
+
+               fw_ptr += cmd_len;
+       }
+
+       set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+
+       BT_INFO("%s: Waiting for firmware download to complete", hdev->name);
+
+       /* Before switching the device into operational mode and with that
+        * booting the loaded firmware, wait for the bootloader notification
+        * that all fragments have been successfully received.
+        *
+        * When the event processing receives the notification, then the
+        * BTUSB_DOWNLOADING flag will be cleared.
+        *
+        * The firmware loading should not take longer than 5 seconds
+        * and thus just timeout if that happens and fail the setup
+        * of this device.
+        */
+       err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING,
+                                       msecs_to_jiffies(5000),
+                                       TASK_INTERRUPTIBLE);
+       if (err == 1) {
+               BT_ERR("%s: Firmware loading interrupted", hdev->name);
+               err = -EINTR;
+               goto done;
+       }
+
+       if (err) {
+               BT_ERR("%s: Firmware loading timeout", hdev->name);
+               err = -ETIMEDOUT;
+               goto done;
+       }
+
+       if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) {
+               BT_ERR("%s: Firmware loading failed", hdev->name);
+               err = -ENOEXEC;
+               goto done;
+       }
+
+       rettime = ktime_get();
+       delta = ktime_sub(rettime, calltime);
+       duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+       BT_INFO("%s: Firmware loaded in %llu usecs", hdev->name, duration);
+
+done:
+       release_firmware(fw);
+
+       if (err < 0)
+               return err;
+
+       calltime = ktime_get();
+
+       set_bit(BTUSB_BOOTING, &data->flags);
+
+       skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param,
+                            HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       kfree_skb(skb);
+
+       /* The bootloader will not indicate when the device is ready. This
+        * is done by the operational firmware sending bootup notification.
+        *
+        * Booting into operational firmware should not take longer than
+        * 1 second. However if that happens, then just fail the setup
+        * since something went wrong.
+        */
+       BT_INFO("%s: Waiting for device to boot", hdev->name);
+
+       err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_BOOTING,
+                                       msecs_to_jiffies(1000),
+                                       TASK_INTERRUPTIBLE);
+
+       if (err == 1) {
+               BT_ERR("%s: Device boot interrupted", hdev->name);
+               return -EINTR;
+       }
+
+       if (err) {
+               BT_ERR("%s: Device boot timeout", hdev->name);
+               return -ETIMEDOUT;
+       }
+
+       rettime = ktime_get();
+       delta = ktime_sub(rettime, calltime);
+       duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+       BT_INFO("%s: Device booted in %llu usecs", hdev->name, duration);
+
+       clear_bit(BTUSB_BOOTLOADER, &data->flags);
+
+       return 0;
+}
+
+static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code)
+{
+       struct sk_buff *skb;
+       u8 type = 0x00;
+
+       BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code);
+
+       skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Reset after hardware error failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return;
+       }
+       kfree_skb(skb);
+
+       skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Retrieving Intel exception info failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return;
+       }
+
+       if (skb->len != 13) {
+               BT_ERR("%s: Exception info size mismatch", hdev->name);
+               kfree_skb(skb);
+               return;
+       }
+
+       if (skb->data[0] != 0x00) {
+               BT_ERR("%s: Exception info command failure (%02x)",
+                      hdev->name, skb->data[0]);
+               kfree_skb(skb);
+               return;
+       }
+
+       BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
+
+       kfree_skb(skb);
+}
+
 static int btusb_set_bdaddr_intel(struct hci_dev *hdev, const bdaddr_t *bdaddr)
 {
        struct sk_buff *skb;
@@ -2033,7 +2644,13 @@ static int btusb_probe(struct usb_interface *intf,
        if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep)
                return -ENODEV;
 
-       data->cmdreq_type = USB_TYPE_CLASS;
+       if (id->driver_info & BTUSB_AMP) {
+               data->cmdreq_type = USB_TYPE_CLASS | 0x01;
+               data->cmdreq = 0x2b;
+       } else {
+               data->cmdreq_type = USB_TYPE_CLASS;
+               data->cmdreq = 0x00;
+       }
 
        data->udev = interface_to_usbdev(intf);
        data->intf = intf;
@@ -2049,8 +2666,14 @@ static int btusb_probe(struct usb_interface *intf,
        init_usb_anchor(&data->isoc_anchor);
        spin_lock_init(&data->rxlock);
 
-       data->recv_event = hci_recv_frame;
-       data->recv_bulk = btusb_recv_bulk;
+       if (id->driver_info & BTUSB_INTEL_NEW) {
+               data->recv_event = btusb_recv_event_intel;
+               data->recv_bulk = btusb_recv_bulk_intel;
+               set_bit(BTUSB_BOOTLOADER, &data->flags);
+       } else {
+               data->recv_event = hci_recv_frame;
+               data->recv_bulk = btusb_recv_bulk;
+       }
 
        hdev = hci_alloc_dev();
        if (!hdev)
@@ -2059,6 +2682,11 @@ static int btusb_probe(struct usb_interface *intf,
        hdev->bus = HCI_USB;
        hci_set_drvdata(hdev, data);
 
+       if (id->driver_info & BTUSB_AMP)
+               hdev->dev_type = HCI_AMP;
+       else
+               hdev->dev_type = HCI_BREDR;
+
        data->hdev = hdev;
 
        SET_HCIDEV_DEV(hdev, &intf->dev);
@@ -2081,6 +2709,15 @@ static int btusb_probe(struct usb_interface *intf,
        if (id->driver_info & BTUSB_INTEL) {
                hdev->setup = btusb_setup_intel;
                hdev->set_bdaddr = btusb_set_bdaddr_intel;
+               set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+       }
+
+       if (id->driver_info & BTUSB_INTEL_NEW) {
+               hdev->send = btusb_send_frame_intel;
+               hdev->setup = btusb_setup_intel_new;
+               hdev->hw_error = btusb_hw_error_intel;
+               hdev->set_bdaddr = btusb_set_bdaddr_intel;
+               set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
        }
 
        if (id->driver_info & BTUSB_MARVELL)
@@ -2094,11 +2731,18 @@ static int btusb_probe(struct usb_interface *intf,
        if (id->driver_info & BTUSB_INTEL_BOOT)
                set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
 
-       if (id->driver_info & BTUSB_ATH3012)
+       if (id->driver_info & BTUSB_ATH3012) {
                hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
+               set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+       }
 
-       /* Interface numbers are hardcoded in the specification */
-       data->isoc = usb_ifnum_to_if(data->udev, 1);
+       if (id->driver_info & BTUSB_AMP) {
+               /* AMP controllers do not support SCO packets */
+               data->isoc = NULL;
+       } else {
+               /* Interface numbers are hardcoded in the specification */
+               data->isoc = usb_ifnum_to_if(data->udev, 1);
+       }
 
        if (!reset)
                set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
@@ -2192,7 +2836,6 @@ static void btusb_disconnect(struct usb_interface *intf)
        else if (data->isoc)
                usb_driver_release_interface(&btusb_driver, data->isoc);
 
-       btusb_free_frags(data);
        hci_free_dev(hdev);
 }
 
index 2d8c3397774f6ba84c207bd35a64a203f2916620..f50a546224adf09b91ec7f4b1d75b2bbfde5c880 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/slab.h>
 #include <linux/inet.h>
 #include <linux/string.h>
+#include <linux/mlx4/driver.h>
 
 #include "mlx4_ib.h"
 
index 9db258f7c804c548c3b481d679830aee857b02de..2ed5b996b2f43611584c1d7328bd8b839a0e8f9b 100644 (file)
@@ -351,6 +351,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
        enum ib_mtu tmp;
        struct mlx4_cmd_mailbox *mailbox;
        int err = 0;
+       int is_bonded = mlx4_is_bonded(mdev->dev);
 
        mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
        if (IS_ERR(mailbox))
@@ -374,8 +375,12 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
        props->state            = IB_PORT_DOWN;
        props->phys_state       = state_to_phys_state(props->state);
        props->active_mtu       = IB_MTU_256;
+       if (is_bonded)
+               rtnl_lock(); /* required to get upper dev */
        spin_lock_bh(&iboe->lock);
        ndev = iboe->netdevs[port - 1];
+       if (ndev && is_bonded)
+               ndev = netdev_master_upper_dev_get(ndev);
        if (!ndev)
                goto out_unlock;
 
@@ -387,6 +392,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
        props->phys_state       = state_to_phys_state(props->state);
 out_unlock:
        spin_unlock_bh(&iboe->lock);
+       if (is_bonded)
+               rtnl_unlock();
 out:
        mlx4_free_cmd_mailbox(mdev->dev, mailbox);
        return err;
@@ -844,7 +851,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
 
 struct mlx4_ib_steering {
        struct list_head list;
-       u64 reg_id;
+       struct mlx4_flow_reg_id reg_id;
        union ib_gid gid;
 };
 
@@ -1135,9 +1142,11 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
                                    struct ib_flow_attr *flow_attr,
                                    int domain)
 {
-       int err = 0, i = 0;
+       int err = 0, i = 0, j = 0;
        struct mlx4_ib_flow *mflow;
        enum mlx4_net_trans_promisc_mode type[2];
+       struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
+       int is_bonded = mlx4_is_bonded(dev);
 
        memset(type, 0, sizeof(type));
 
@@ -1172,26 +1181,55 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
 
        while (i < ARRAY_SIZE(type) && type[i]) {
                err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
-                                           &mflow->reg_id[i]);
+                                           &mflow->reg_id[i].id);
                if (err)
                        goto err_create_flow;
                i++;
+               if (is_bonded) {
+                       flow_attr->port = 2;
+                       err = __mlx4_ib_create_flow(qp, flow_attr,
+                                                   domain, type[j],
+                                                   &mflow->reg_id[j].mirror);
+                       flow_attr->port = 1;
+                       if (err)
+                               goto err_create_flow;
+                       j++;
+               }
+
        }
 
        if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
-               err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
+               err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
+                                              &mflow->reg_id[i].id);
                if (err)
                        goto err_create_flow;
                i++;
+               if (is_bonded) {
+                       flow_attr->port = 2;
+                       err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
+                                                      &mflow->reg_id[j].mirror);
+                       flow_attr->port = 1;
+                       if (err)
+                               goto err_create_flow;
+                       j++;
+               }
+               /* function to create mirror rule */
        }
 
        return &mflow->ibflow;
 
 err_create_flow:
        while (i) {
-               (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]);
+               (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
+                                            mflow->reg_id[i].id);
                i--;
        }
+
+       while (j) {
+               (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
+                                            mflow->reg_id[j].mirror);
+               j--;
+       }
 err_free:
        kfree(mflow);
        return ERR_PTR(err);
@@ -1204,10 +1242,16 @@ static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
        struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
        struct mlx4_ib_flow *mflow = to_mflow(flow_id);
 
-       while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) {
-               err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]);
+       while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
+               err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
                if (err)
                        ret = err;
+               if (mflow->reg_id[i].mirror) {
+                       err = __mlx4_ib_destroy_flow(mdev->dev,
+                                                    mflow->reg_id[i].mirror);
+                       if (err)
+                               ret = err;
+               }
                i++;
        }
 
@@ -1219,11 +1263,12 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 {
        int err;
        struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+       struct mlx4_dev *dev = mdev->dev;
        struct mlx4_ib_qp *mqp = to_mqp(ibqp);
-       u64 reg_id;
        struct mlx4_ib_steering *ib_steering = NULL;
        enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
                MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
+       struct mlx4_flow_reg_id reg_id;
 
        if (mdev->dev->caps.steering_mode ==
            MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -1235,10 +1280,20 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
                                    !!(mqp->flags &
                                       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
-                                   prot, &reg_id);
+                                   prot, &reg_id.id);
        if (err)
                goto err_malloc;
 
+       reg_id.mirror = 0;
+       if (mlx4_is_bonded(dev)) {
+               err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, 2,
+                                           !!(mqp->flags &
+                                           MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
+                                           prot, &reg_id.mirror);
+               if (err)
+                       goto err_add;
+       }
+
        err = add_gid_entry(ibqp, gid);
        if (err)
                goto err_add;
@@ -1254,7 +1309,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 
 err_add:
        mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
-                             prot, reg_id);
+                             prot, reg_id.id);
+       if (reg_id.mirror)
+               mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
+                                     prot, reg_id.mirror);
 err_malloc:
        kfree(ib_steering);
 
@@ -1281,10 +1339,12 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 {
        int err;
        struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+       struct mlx4_dev *dev = mdev->dev;
        struct mlx4_ib_qp *mqp = to_mqp(ibqp);
        struct net_device *ndev;
        struct mlx4_ib_gid_entry *ge;
-       u64 reg_id = 0;
+       struct mlx4_flow_reg_id reg_id = {0, 0};
+
        enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
                MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
 
@@ -1309,10 +1369,17 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        }
 
        err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
-                                   prot, reg_id);
+                                   prot, reg_id.id);
        if (err)
                return err;
 
+       if (mlx4_is_bonded(dev)) {
+               err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
+                                           prot, reg_id.mirror);
+               if (err)
+                       return err;
+       }
+
        mutex_lock(&mqp->mutex);
        ge = find_gid_entry(mqp, gid->raw);
        if (ge) {
@@ -1440,6 +1507,7 @@ static void update_gids_task(struct work_struct *work)
        union ib_gid *gids;
        int err;
        struct mlx4_dev *dev = gw->dev->dev;
+       int is_bonded = mlx4_is_bonded(dev);
 
        if (!gw->dev->ib_active)
                return;
@@ -1459,7 +1527,10 @@ static void update_gids_task(struct work_struct *work)
        if (err)
                pr_warn("set port command failed\n");
        else
-               mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
+               if ((gw->port == 1) || !is_bonded)
+                       mlx4_ib_dispatch_event(gw->dev,
+                                              is_bonded ? 1 : gw->port,
+                                              IB_EVENT_GID_CHANGE);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        kfree(gw);
@@ -1875,7 +1946,8 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
                                 * don't want the bond IP based gids in the table since
                                 * flows that select port by gid may get the down port.
                                */
-                               if (port_state == IB_PORT_DOWN) {
+                               if (port_state == IB_PORT_DOWN &&
+                                   !mlx4_is_bonded(ibdev->dev)) {
                                        reset_gid_table(ibdev, port);
                                        mlx4_ib_set_default_gid(ibdev,
                                                                curr_netdev,
@@ -2047,6 +2119,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
        int err;
        struct mlx4_ib_iboe *iboe;
        int ib_num_ports = 0;
+       int num_req_counters;
 
        pr_info_once("%s", mlx4_ib_version);
 
@@ -2080,13 +2153,15 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
        MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
 
        ibdev->dev = dev;
+       ibdev->bond_next_port   = 0;
 
        strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
        ibdev->ib_dev.owner             = THIS_MODULE;
        ibdev->ib_dev.node_type         = RDMA_NODE_IB_CA;
        ibdev->ib_dev.local_dma_lkey    = dev->caps.reserved_lkey;
        ibdev->num_ports                = num_ports;
-       ibdev->ib_dev.phys_port_cnt     = ibdev->num_ports;
+       ibdev->ib_dev.phys_port_cnt     = mlx4_is_bonded(dev) ?
+                                               1 : ibdev->num_ports;
        ibdev->ib_dev.num_comp_vectors  = dev->caps.num_comp_vectors;
        ibdev->ib_dev.dma_device        = &dev->persist->pdev->dev;
 
@@ -2207,7 +2282,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
        if (init_node_data(ibdev))
                goto err_map;
 
-       for (i = 0; i < ibdev->num_ports; ++i) {
+       num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
+       for (i = 0; i < num_req_counters; ++i) {
                mutex_init(&ibdev->qp1_proxy_lock[i]);
                if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
                                                IB_LINK_LAYER_ETHERNET) {
@@ -2218,6 +2294,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                        ibdev->counters[i] = -1;
                }
        }
+       if (mlx4_is_bonded(dev))
+               for (i = 1; i < ibdev->num_ports ; ++i)
+                       ibdev->counters[i] = ibdev->counters[0];
+
 
        mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
                ib_num_ports++;
@@ -2538,6 +2618,38 @@ out:
        return;
 }
 
+static void handle_bonded_port_state_event(struct work_struct *work)
+{
+       struct ib_event_work *ew =
+               container_of(work, struct ib_event_work, work);
+       struct mlx4_ib_dev *ibdev = ew->ib_dev;
+       enum ib_port_state bonded_port_state = IB_PORT_NOP;
+       int i;
+       struct ib_event ibev;
+
+       kfree(ew);
+       spin_lock_bh(&ibdev->iboe.lock);
+       for (i = 0; i < MLX4_MAX_PORTS; ++i) {
+               struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
+
+               enum ib_port_state curr_port_state =
+                       (netif_running(curr_netdev) &&
+                        netif_carrier_ok(curr_netdev)) ?
+                       IB_PORT_ACTIVE : IB_PORT_DOWN;
+
+               bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
+                       curr_port_state : IB_PORT_ACTIVE;
+       }
+       spin_unlock_bh(&ibdev->iboe.lock);
+
+       ibev.device = &ibdev->ib_dev;
+       ibev.element.port_num = 1;
+       ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
+               IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+
+       ib_dispatch_event(&ibev);
+}
+
 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
                          enum mlx4_dev_event event, unsigned long param)
 {
@@ -2547,6 +2659,18 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
        struct ib_event_work *ew;
        int p = 0;
 
+       if (mlx4_is_bonded(dev) &&
+           ((event == MLX4_DEV_EVENT_PORT_UP) ||
+           (event == MLX4_DEV_EVENT_PORT_DOWN))) {
+               ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
+               if (!ew)
+                       return;
+               INIT_WORK(&ew->work, handle_bonded_port_state_event);
+               ew->ib_dev = ibdev;
+               queue_work(wq, &ew->work);
+               return;
+       }
+
        if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
                eqe = (struct mlx4_eqe *)param;
        else
@@ -2607,7 +2731,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
        }
 
        ibev.device           = ibdev_ptr;
-       ibev.element.port_num = (u8) p;
+       ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
 
        ib_dispatch_event(&ibev);
 }
@@ -2616,7 +2740,8 @@ static struct mlx4_interface mlx4_ib_interface = {
        .add            = mlx4_ib_add,
        .remove         = mlx4_ib_remove,
        .event          = mlx4_ib_event,
-       .protocol       = MLX4_PROT_IB_IPV6
+       .protocol       = MLX4_PROT_IB_IPV6,
+       .flags          = MLX4_INTFF_BONDING
 };
 
 static int __init mlx4_ib_init(void)
index 6eb743f65f6f5633eb126ae5fe7ac768299fe77d..721540c9163d540b98717fc8f4c13cb2302d5752 100644 (file)
@@ -134,10 +134,17 @@ struct mlx4_ib_fmr {
        struct mlx4_fmr         mfmr;
 };
 
+#define MAX_REGS_PER_FLOW 2
+
+struct mlx4_flow_reg_id {
+       u64 id;
+       u64 mirror;
+};
+
 struct mlx4_ib_flow {
        struct ib_flow ibflow;
        /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */
-       u64 reg_id[2];
+       struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW];
 };
 
 struct mlx4_ib_wq {
@@ -527,6 +534,7 @@ struct mlx4_ib_dev {
        struct mlx4_ib_qp      *qp1_proxy[MLX4_MAX_PORTS];
        /* lock when destroying qp1_proxy and getting netdev events */
        struct mutex            qp1_proxy_lock[MLX4_MAX_PORTS];
+       u8                      bond_next_port;
 };
 
 struct ib_event_work {
@@ -622,6 +630,13 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
        return container_of(ibah, struct mlx4_ib_ah, ibah);
 }
 
+static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
+{
+       dev->bond_next_port = (dev->bond_next_port + 1) % dev->num_ports;
+
+       return dev->bond_next_port + 1;
+}
+
 int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
 
index cf000b7ad64f9b11698ee8dbfa2aec9883e886db..792f9dc86adac3a098f167c69f29455945eb315b 100644 (file)
@@ -40,6 +40,7 @@
 #include <rdma/ib_addr.h>
 #include <rdma/ib_mad.h>
 
+#include <linux/mlx4/driver.h>
 #include <linux/mlx4/qp.h>
 
 #include "mlx4_ib.h"
@@ -93,17 +94,6 @@ enum {
 #ifndef ETH_ALEN
 #define ETH_ALEN        6
 #endif
-static inline u64 mlx4_mac_to_u64(u8 *addr)
-{
-       u64 mac = 0;
-       int i;
-
-       for (i = 0; i < ETH_ALEN; i++) {
-               mac <<= 8;
-               mac |= addr[i];
-       }
-       return mac;
-}
 
 static const __be32 mlx4_ib_opcode[] = {
        [IB_WR_SEND]                            = cpu_to_be32(MLX4_OPCODE_SEND),
@@ -1915,6 +1905,22 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                goto out;
        }
 
+       if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) {
+               if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
+                       if ((ibqp->qp_type == IB_QPT_RC) ||
+                           (ibqp->qp_type == IB_QPT_UD) ||
+                           (ibqp->qp_type == IB_QPT_UC) ||
+                           (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
+                           (ibqp->qp_type == IB_QPT_XRC_INI)) {
+                               attr->port_num = mlx4_ib_bond_next_port(dev);
+                       }
+               } else {
+                       /* no sense in changing port_num
+                        * when ports are bonded */
+                       attr_mask &= ~IB_QP_PORT;
+               }
+       }
+
        if ((attr_mask & IB_QP_PORT) &&
            (attr->port_num == 0 || attr->port_num > dev->num_ports)) {
                pr_debug("qpn 0x%x: invalid port number (%d) specified "
@@ -1965,6 +1971,9 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 
        err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
 
+       if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT))
+               attr->port_num = 1;
+
 out:
        mutex_unlock(&qp->mutex);
        return err;
index 7aaaf51e1596c5dc399cfa096040822275a05bae..35f19a6838222e411e3ab4e427488915a9a841f8 100644 (file)
@@ -370,12 +370,12 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
                        to_copy = size - bytes_copied;
 
                if (is_iovec) {
-                       struct iovec *iov = (struct iovec *)src;
+                       struct msghdr *msg = (struct msghdr *)src;
                        int err;
 
                        /* The iovec will track bytes_copied internally. */
-                       err = memcpy_fromiovec((u8 *)va + page_offset,
-                                              iov, to_copy);
+                       err = memcpy_from_msg((u8 *)va + page_offset,
+                                             msg, to_copy);
                        if (err != 0) {
                                if (kernel_if->host)
                                        kunmap(kernel_if->u.h.page[page_index]);
@@ -580,7 +580,7 @@ static int qp_memcpy_from_queue(void *dest,
  */
 static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
                                  u64 queue_offset,
-                                 const void *src,
+                                 const void *msg,
                                  size_t src_offset, size_t size)
 {
 
@@ -588,7 +588,7 @@ static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
         * We ignore src_offset because src is really a struct iovec * and will
         * maintain offset internally.
         */
-       return __qp_memcpy_to_queue(queue, queue_offset, src, size, true);
+       return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
 }
 
 /*
@@ -3223,13 +3223,13 @@ EXPORT_SYMBOL_GPL(vmci_qpair_peek);
  * of bytes enqueued or < 0 on error.
  */
 ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
-                         void *iov,
+                         struct msghdr *msg,
                          size_t iov_size,
                          int buf_type)
 {
        ssize_t result;
 
-       if (!qpair || !iov)
+       if (!qpair)
                return VMCI_ERROR_INVALID_ARGS;
 
        qp_lock(qpair);
@@ -3238,7 +3238,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
                result = qp_enqueue_locked(qpair->produce_q,
                                           qpair->consume_q,
                                           qpair->produce_q_size,
-                                          iov, iov_size,
+                                          msg, iov_size,
                                           qp_memcpy_to_queue_iov);
 
                if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
index c9e519cb921412ac1e73d67a3a95be96c5c1c2ec..679ef00d6b16a102dd9681121b6ae91cc3714a67 100644 (file)
@@ -790,7 +790,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                        }
 
                        new_active->delay = 0;
-                       new_active->link = BOND_LINK_UP;
+                       bond_set_slave_link_state(new_active, BOND_LINK_UP);
 
                        if (BOND_MODE(bond) == BOND_MODE_8023AD)
                                bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
@@ -1181,6 +1181,62 @@ static void bond_free_slave(struct slave *slave)
        kfree(slave);
 }
 
+static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
+{
+       info->bond_mode = BOND_MODE(bond);
+       info->miimon = bond->params.miimon;
+       info->num_slaves = bond->slave_cnt;
+}
+
+static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
+{
+       strcpy(info->slave_name, slave->dev->name);
+       info->link = slave->link;
+       info->state = bond_slave_state(slave);
+       info->link_failure_count = slave->link_failure_count;
+}
+
+static void bond_netdev_notify(struct slave *slave, struct net_device *dev)
+{
+       struct bonding *bond = slave->bond;
+       struct netdev_bonding_info bonding_info;
+
+       rtnl_lock();
+       /* make sure that slave is still valid */
+       if (dev->priv_flags & IFF_BONDING) {
+               bond_fill_ifslave(slave, &bonding_info.slave);
+               bond_fill_ifbond(bond, &bonding_info.master);
+               netdev_bonding_info_change(slave->dev, &bonding_info);
+       }
+       rtnl_unlock();
+}
+
+static void bond_netdev_notify_work(struct work_struct *_work)
+{
+       struct netdev_notify_work *w =
+               container_of(_work, struct netdev_notify_work, work.work);
+
+       bond_netdev_notify(w->slave, w->dev);
+       dev_put(w->dev);
+}
+
+void bond_queue_slave_event(struct slave *slave)
+{
+       struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
+
+       if (!nnw)
+               return;
+
+       INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
+       nnw->slave = slave;
+       nnw->dev = slave->dev;
+
+       if (queue_delayed_work(slave->bond->wq, &nnw->work, 0))
+               dev_hold(slave->dev);
+       else
+               kfree(nnw);
+}
+
 /* enslave device <slave> to bond device <master> */
 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 {
@@ -1444,19 +1500,22 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        if (bond->params.miimon) {
                if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
                        if (bond->params.updelay) {
-                               new_slave->link = BOND_LINK_BACK;
+                               bond_set_slave_link_state(new_slave,
+                                                         BOND_LINK_BACK);
                                new_slave->delay = bond->params.updelay;
                        } else {
-                               new_slave->link = BOND_LINK_UP;
+                               bond_set_slave_link_state(new_slave,
+                                                         BOND_LINK_UP);
                        }
                } else {
-                       new_slave->link = BOND_LINK_DOWN;
+                       bond_set_slave_link_state(new_slave, BOND_LINK_DOWN);
                }
        } else if (bond->params.arp_interval) {
-               new_slave->link = (netif_carrier_ok(slave_dev) ?
-                       BOND_LINK_UP : BOND_LINK_DOWN);
+               bond_set_slave_link_state(new_slave,
+                                         (netif_carrier_ok(slave_dev) ?
+                                         BOND_LINK_UP : BOND_LINK_DOWN));
        } else {
-               new_slave->link = BOND_LINK_UP;
+               bond_set_slave_link_state(new_slave, BOND_LINK_UP);
        }
 
        if (new_slave->link != BOND_LINK_DOWN)
@@ -1572,6 +1631,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                    new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
 
        /* enslave is successful */
+       bond_queue_slave_event(new_slave);
        return 0;
 
 /* Undo stages on error */
@@ -1821,11 +1881,7 @@ static int  bond_release_and_destroy(struct net_device *bond_dev,
 static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-
-       info->bond_mode = BOND_MODE(bond);
-       info->miimon = bond->params.miimon;
-       info->num_slaves = bond->slave_cnt;
-
+       bond_fill_ifbond(bond, info);
        return 0;
 }
 
@@ -1839,10 +1895,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
        bond_for_each_slave(bond, slave, iter) {
                if (i++ == (int)info->slave_id) {
                        res = 0;
-                       strcpy(info->slave_name, slave->dev->name);
-                       info->link = slave->link;
-                       info->state = bond_slave_state(slave);
-                       info->link_failure_count = slave->link_failure_count;
+                       bond_fill_ifslave(slave, info);
                        break;
                }
        }
@@ -1872,7 +1925,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                        if (link_state)
                                continue;
 
-                       slave->link = BOND_LINK_FAIL;
+                       bond_set_slave_link_state(slave, BOND_LINK_FAIL);
                        slave->delay = bond->params.downdelay;
                        if (slave->delay) {
                                netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
@@ -1887,7 +1940,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                case BOND_LINK_FAIL:
                        if (link_state) {
                                /* recovered before downdelay expired */
-                               slave->link = BOND_LINK_UP;
+                               bond_set_slave_link_state(slave, BOND_LINK_UP);
                                slave->last_link_up = jiffies;
                                netdev_info(bond->dev, "link status up again after %d ms for interface %s\n",
                                            (bond->params.downdelay - slave->delay) *
@@ -1909,7 +1962,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                        if (!link_state)
                                continue;
 
-                       slave->link = BOND_LINK_BACK;
+                       bond_set_slave_link_state(slave, BOND_LINK_BACK);
                        slave->delay = bond->params.updelay;
 
                        if (slave->delay) {
@@ -1922,7 +1975,8 @@ static int bond_miimon_inspect(struct bonding *bond)
                        /*FALLTHRU*/
                case BOND_LINK_BACK:
                        if (!link_state) {
-                               slave->link = BOND_LINK_DOWN;
+                               bond_set_slave_link_state(slave,
+                                                         BOND_LINK_DOWN);
                                netdev_info(bond->dev, "link status down again after %d ms for interface %s\n",
                                            (bond->params.updelay - slave->delay) *
                                            bond->params.miimon,
@@ -1960,7 +2014,7 @@ static void bond_miimon_commit(struct bonding *bond)
                        continue;
 
                case BOND_LINK_UP:
-                       slave->link = BOND_LINK_UP;
+                       bond_set_slave_link_state(slave, BOND_LINK_UP);
                        slave->last_link_up = jiffies;
 
                        primary = rtnl_dereference(bond->primary_slave);
@@ -2000,7 +2054,7 @@ static void bond_miimon_commit(struct bonding *bond)
                        if (slave->link_failure_count < UINT_MAX)
                                slave->link_failure_count++;
 
-                       slave->link = BOND_LINK_DOWN;
+                       bond_set_slave_link_state(slave, BOND_LINK_DOWN);
 
                        if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
                            BOND_MODE(bond) == BOND_MODE_8023AD)
@@ -2583,7 +2637,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
                                struct slave *current_arp_slave;
 
                                current_arp_slave = rtnl_dereference(bond->current_arp_slave);
-                               slave->link = BOND_LINK_UP;
+                               bond_set_slave_link_state(slave, BOND_LINK_UP);
                                if (current_arp_slave) {
                                        bond_set_slave_inactive_flags(
                                                current_arp_slave,
@@ -2606,7 +2660,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
                        if (slave->link_failure_count < UINT_MAX)
                                slave->link_failure_count++;
 
-                       slave->link = BOND_LINK_DOWN;
+                       bond_set_slave_link_state(slave, BOND_LINK_DOWN);
                        bond_set_slave_inactive_flags(slave,
                                                      BOND_SLAVE_NOTIFY_NOW);
 
@@ -2685,7 +2739,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
                 * up when it is actually down
                 */
                if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
-                       slave->link = BOND_LINK_DOWN;
+                       bond_set_slave_link_state(slave, BOND_LINK_DOWN);
                        if (slave->link_failure_count < UINT_MAX)
                                slave->link_failure_count++;
 
@@ -2705,7 +2759,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
        if (!new_slave)
                goto check_state;
 
-       new_slave->link = BOND_LINK_BACK;
+       bond_set_slave_link_state(new_slave, BOND_LINK_BACK);
        bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
        bond_arp_send_all(bond, new_slave);
        new_slave->last_link_up = jiffies;
index fb6980a099819768535a10e9f7b78b939e895824..55019c93387da5d08e20355813f6c7810804e247 100644 (file)
@@ -476,6 +476,22 @@ struct sge_rspq {                   /* state for an SGE response queue */
        struct adapter *adap;
        struct net_device *netdev;  /* associated net device */
        rspq_handler_t handler;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#define CXGB_POLL_STATE_IDLE           0
+#define CXGB_POLL_STATE_NAPI           BIT(0) /* NAPI owns this poll */
+#define CXGB_POLL_STATE_POLL           BIT(1) /* poll owns this poll */
+#define CXGB_POLL_STATE_NAPI_YIELD     BIT(2) /* NAPI yielded this poll */
+#define CXGB_POLL_STATE_POLL_YIELD     BIT(3) /* poll yielded this poll */
+#define CXGB_POLL_YIELD                        (CXGB_POLL_STATE_NAPI_YIELD |   \
+                                        CXGB_POLL_STATE_POLL_YIELD)
+#define CXGB_POLL_LOCKED               (CXGB_POLL_STATE_NAPI |         \
+                                        CXGB_POLL_STATE_POLL)
+#define CXGB_POLL_USER_PEND            (CXGB_POLL_STATE_POLL |         \
+                                        CXGB_POLL_STATE_POLL_YIELD)
+       unsigned int bpoll_state;
+       spinlock_t bpoll_lock;          /* lock for busy poll */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
 };
 
 struct sge_eth_stats {              /* Ethernet queue statistics */
@@ -880,6 +896,102 @@ static inline struct adapter *netdev2adap(const struct net_device *dev)
        return netdev2pinfo(dev)->adapter;
 }
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
+{
+       spin_lock_init(&q->bpoll_lock);
+       q->bpoll_state = CXGB_POLL_STATE_IDLE;
+}
+
+static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
+{
+       bool rc = true;
+
+       spin_lock(&q->bpoll_lock);
+       if (q->bpoll_state & CXGB_POLL_LOCKED) {
+               q->bpoll_state |= CXGB_POLL_STATE_NAPI_YIELD;
+               rc = false;
+       } else {
+               q->bpoll_state = CXGB_POLL_STATE_NAPI;
+       }
+       spin_unlock(&q->bpoll_lock);
+       return rc;
+}
+
+static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
+{
+       bool rc = false;
+
+       spin_lock(&q->bpoll_lock);
+       if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
+               rc = true;
+       q->bpoll_state = CXGB_POLL_STATE_IDLE;
+       spin_unlock(&q->bpoll_lock);
+       return rc;
+}
+
+static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
+{
+       bool rc = true;
+
+       spin_lock_bh(&q->bpoll_lock);
+       if (q->bpoll_state & CXGB_POLL_LOCKED) {
+               q->bpoll_state |= CXGB_POLL_STATE_POLL_YIELD;
+               rc = false;
+       } else {
+               q->bpoll_state |= CXGB_POLL_STATE_POLL;
+       }
+       spin_unlock_bh(&q->bpoll_lock);
+       return rc;
+}
+
+static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
+{
+       bool rc = false;
+
+       spin_lock_bh(&q->bpoll_lock);
+       if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
+               rc = true;
+       q->bpoll_state = CXGB_POLL_STATE_IDLE;
+       spin_unlock_bh(&q->bpoll_lock);
+       return rc;
+}
+
+static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
+{
+       return q->bpoll_state & CXGB_POLL_USER_PEND;
+}
+#else
+static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
+{
+}
+
+static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
+{
+       return true;
+}
+
+static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
+{
+       return false;
+}
+
+static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
+{
+       return false;
+}
+
+static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
+{
+       return false;
+}
+
+static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
+{
+       return false;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
 void t4_os_portmod_changed(const struct adapter *adap, int port_id);
 void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
 
@@ -908,6 +1020,7 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
 int t4_sge_init(struct adapter *adap);
 void t4_sge_start(struct adapter *adap);
 void t4_sge_stop(struct adapter *adap);
+int cxgb_busy_poll(struct napi_struct *napi);
 extern int dbfifo_int_thresh;
 
 #define for_each_port(adapter, iter) \
index 5bf490a781aaad9de63b772d5202be5c6fe4bdba..5db5b4f7b94d7ff29f86ba478cbdc7f75bf2e0e5 100644 (file)
@@ -923,8 +923,14 @@ static void quiesce_rx(struct adapter *adap)
        for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
                struct sge_rspq *q = adap->sge.ingr_map[i];
 
-               if (q && q->handler)
+               if (q && q->handler) {
                        napi_disable(&q->napi);
+                       local_bh_disable();
+                       while (!cxgb_poll_lock_napi(q))
+                               mdelay(1);
+                       local_bh_enable();
+               }
+
        }
 }
 
@@ -940,8 +946,10 @@ static void enable_rx(struct adapter *adap)
 
                if (!q)
                        continue;
-               if (q->handler)
+               if (q->handler) {
+                       cxgb_busy_poll_init_lock(q);
                        napi_enable(&q->napi);
+               }
                /* 0-increment GTS to start the timer and enable interrupts */
                t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
                             SEINTARM_V(q->intr_params) |
@@ -4563,6 +4571,10 @@ static const struct net_device_ops cxgb4_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller  = cxgb_netpoll,
 #endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       .ndo_busy_poll        = cxgb_busy_poll,
+#endif
+
 };
 
 void t4_fatal_err(struct adapter *adap)
@@ -5130,8 +5142,7 @@ static int adap_init0(struct adapter *adap)
                                 state, &reset);
 
                /* Cleaning up */
-               if (fw != NULL)
-                       release_firmware(fw);
+               release_firmware(fw);
                t4_free_mem(card_fw);
 
                if (ret < 0)
index 619156112b21a2a0ac4ceff37cb77a0bbf7da0b3..b4b9f6048fe730dc1287a648a5e61c7f2d92dd7d 100644 (file)
@@ -43,6 +43,9 @@
 #include <linux/export.h>
 #include <net/ipv6.h>
 #include <net/tcp.h>
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#endif /* CONFIG_NET_RX_BUSY_POLL */
 #include "cxgb4.h"
 #include "t4_regs.h"
 #include "t4_values.h"
@@ -1720,6 +1723,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
        skb->truesize += skb->data_len;
        skb->ip_summed = CHECKSUM_UNNECESSARY;
        skb_record_rx_queue(skb, rxq->rspq.idx);
+       skb_mark_napi_id(skb, &rxq->rspq.napi);
        if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
                skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
                             PKT_HASH_TYPE_L3);
@@ -1763,6 +1767,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
        csum_ok = pkt->csum_calc && !pkt->err_vec &&
                  (q->netdev->features & NETIF_F_RXCSUM);
        if ((pkt->l2info & htonl(RXF_TCP_F)) &&
+           !(cxgb_poll_busy_polling(q)) &&
            (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
                do_gro(rxq, si, pkt);
                return 0;
@@ -1801,6 +1806,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
                rxq->stats.vlan_ex++;
        }
+       skb_mark_napi_id(skb, &q->napi);
        netif_receive_skb(skb);
        return 0;
 }
@@ -1963,6 +1969,38 @@ static int process_responses(struct sge_rspq *q, int budget)
        return budget - budget_left;
 }
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+int cxgb_busy_poll(struct napi_struct *napi)
+{
+       struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
+       unsigned int params, work_done;
+       u32 val;
+
+       if (!cxgb_poll_lock_poll(q))
+               return LL_FLUSH_BUSY;
+
+       work_done = process_responses(q, 4);
+       params = QINTR_TIMER_IDX(TIMERREG_COUNTER0_X) | QINTR_CNT_EN;
+       q->next_intr_params = params;
+       val = CIDXINC_V(work_done) | SEINTARM_V(params);
+
+       /* If we don't have access to the new User GTS (T5+), use the old
+        * doorbell mechanism; otherwise use the new BAR2 mechanism.
+        */
+       if (unlikely(!q->bar2_addr))
+               t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
+                            val | INGRESSQID_V((u32)q->cntxt_id));
+       else {
+               writel(val | INGRESSQID_V(q->bar2_qid),
+                      q->bar2_addr + SGE_UDB_GTS);
+               wmb();
+       }
+
+       cxgb_poll_unlock_poll(q);
+       return work_done;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
 /**
  *     napi_rx_handler - the NAPI handler for Rx processing
  *     @napi: the napi instance
@@ -1978,9 +2016,13 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
 {
        unsigned int params;
        struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
-       int work_done = process_responses(q, budget);
+       int work_done;
        u32 val;
 
+       if (!cxgb_poll_lock_napi(q))
+               return budget;
+
+       work_done = process_responses(q, budget);
        if (likely(work_done < budget)) {
                int timer_index;
 
@@ -2018,6 +2060,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
                       q->bar2_addr + SGE_UDB_GTS);
                wmb();
        }
+       cxgb_poll_unlock_napi(q);
        return work_done;
 }
 
@@ -2341,6 +2384,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
                goto err;
 
        netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
+       napi_hash_add(&iq->napi);
        iq->cur_desc = iq->desc;
        iq->cidx = 0;
        iq->gen = 1;
@@ -2598,6 +2642,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
                   rq->cntxt_id, fl_id, 0xffff);
        dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
                          rq->desc, rq->phys_addr);
+       napi_hash_del(&rq->napi);
        netif_napi_del(&rq->napi);
        rq->netdev = NULL;
        rq->cntxt_id = rq->abs_id = 0;
index a40484432ebf47fadb0f7a8588b1f23562fe4748..997ec87470c765a43efcdc46b6309bd30c4b4fd1 100644 (file)
@@ -59,6 +59,7 @@
 
 /* GTS register */
 #define SGE_TIMERREGS                  6
+#define TIMERREG_COUNTER0_X            0
 
 /* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
  * The User Doorbells are each 128 bytes in length with a Simple Doorbell at
index 58cabee00abf9f35294f0b39294bdd9e70f657a8..9bb6220663b21a505f2332f028ce3a1e13b77f86 100644 (file)
@@ -2596,12 +2596,9 @@ static void fec_enet_free_queue(struct net_device *ndev)
                }
 
        for (i = 0; i < fep->num_rx_queues; i++)
-               if (fep->rx_queue[i])
-                       kfree(fep->rx_queue[i]);
-
+               kfree(fep->rx_queue[i]);
        for (i = 0; i < fep->num_tx_queues; i++)
-               if (fep->tx_queue[i])
-                       kfree(fep->tx_queue[i]);
+               kfree(fep->tx_queue[i]);
 }
 
 static int fec_enet_alloc_queue(struct net_device *ndev)
index 9e2bcb8079236d4e579de07fc43811bfc2801897..a17628769a1f0de4c749ac59138d300b8e02e07f 100644 (file)
@@ -278,14 +278,20 @@ static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
                        fep->stats.collisions++;
 
                /* unmap */
-               dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
-                               skb->len, DMA_TO_DEVICE);
+               if (fep->mapped_as_page[dirtyidx])
+                       dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
+                                      CBDR_DATLEN(bdp), DMA_TO_DEVICE);
+               else
+                       dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+                                        CBDR_DATLEN(bdp), DMA_TO_DEVICE);
 
                /*
                 * Free the sk buffer associated with this last transmit.
                 */
-               dev_kfree_skb(skb);
-               fep->tx_skbuff[dirtyidx] = NULL;
+               if (skb) {
+                       dev_kfree_skb(skb);
+                       fep->tx_skbuff[dirtyidx] = NULL;
+               }
 
                /*
                 * Update pointer to next buffer descriptor to be transmitted.
@@ -299,7 +305,7 @@ static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
                 * Since we have freed up a buffer, the ring is no longer
                 * full.
                 */
-               if (!fep->tx_free++)
+               if (++fep->tx_free >= MAX_SKB_FRAGS)
                        do_wake = 1;
                has_tx_work = 1;
        }
@@ -509,6 +515,9 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        cbd_t __iomem *bdp;
        int curidx;
        u16 sc;
+       int nr_frags = skb_shinfo(skb)->nr_frags;
+       skb_frag_t *frag;
+       int len;
 
 #ifdef CONFIG_FS_ENET_MPC5121_FEC
        if (((unsigned long)skb->data) & 0x3) {
@@ -530,7 +539,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
         */
        bdp = fep->cur_tx;
 
-       if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
+       if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
                netif_stop_queue(dev);
                spin_unlock(&fep->tx_lock);
 
@@ -543,35 +552,42 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        curidx = bdp - fep->tx_bd_base;
-       /*
-        * Clear all of the status flags.
-        */
-       CBDC_SC(bdp, BD_ENET_TX_STATS);
-
-       /*
-        * Save skb pointer.
-        */
-       fep->tx_skbuff[curidx] = skb;
-
-       fep->stats.tx_bytes += skb->len;
 
+       len = skb->len;
+       fep->stats.tx_bytes += len;
+       if (nr_frags)
+               len -= skb->data_len;
+       fep->tx_free -= nr_frags + 1;
        /*
         * Push the data cache so the CPM does not get stale memory data.
         */
        CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
-                               skb->data, skb->len, DMA_TO_DEVICE));
-       CBDW_DATLEN(bdp, skb->len);
+                               skb->data, len, DMA_TO_DEVICE));
+       CBDW_DATLEN(bdp, len);
+
+       fep->mapped_as_page[curidx] = 0;
+       frag = skb_shinfo(skb)->frags;
+       while (nr_frags) {
+               CBDC_SC(bdp,
+                       BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+               CBDS_SC(bdp, BD_ENET_TX_READY);
+
+               if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
+                       bdp++, curidx++;
+               else
+                       bdp = fep->tx_bd_base, curidx = 0;
 
-       /*
-        * If this was the last BD in the ring, start at the beginning again.
-        */
-       if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
-               fep->cur_tx++;
-       else
-               fep->cur_tx = fep->tx_bd_base;
+               len = skb_frag_size(frag);
+               CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len,
+                                                  DMA_TO_DEVICE));
+               CBDW_DATLEN(bdp, len);
 
-       if (!--fep->tx_free)
-               netif_stop_queue(dev);
+               fep->tx_skbuff[curidx] = NULL;
+               fep->mapped_as_page[curidx] = 1;
+
+               frag++;
+               nr_frags--;
+       }
 
        /* Trigger transmission start */
        sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
@@ -582,8 +598,22 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
         * yay for hw reuse :) */
        if (skb->len <= 60)
                sc |= BD_ENET_TX_PAD;
+       CBDC_SC(bdp, BD_ENET_TX_STATS);
        CBDS_SC(bdp, sc);
 
+       /* Save skb pointer. */
+       fep->tx_skbuff[curidx] = skb;
+
+       /* If this was the last BD in the ring, start at the beginning again. */
+       if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
+               bdp++;
+       else
+               bdp = fep->tx_bd_base;
+       fep->cur_tx = bdp;
+
+       if (fep->tx_free < MAX_SKB_FRAGS)
+               netif_stop_queue(dev);
+
        skb_tx_timestamp(skb);
 
        (*fep->ops->tx_kickstart)(dev);
@@ -917,7 +947,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
        }
 
        fpi->rx_ring = 32;
-       fpi->tx_ring = 32;
+       fpi->tx_ring = 64;
        fpi->rx_copybreak = 240;
        fpi->napi_weight = 17;
        fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
@@ -955,7 +985,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
 
        privsize = sizeof(*fep) +
                   sizeof(struct sk_buff **) *
-                  (fpi->rx_ring + fpi->tx_ring);
+                    (fpi->rx_ring + fpi->tx_ring) +
+                  sizeof(char) * fpi->tx_ring;
 
        ndev = alloc_etherdev(privsize);
        if (!ndev) {
@@ -978,6 +1009,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
 
        fep->rx_skbuff = (struct sk_buff **)&fep[1];
        fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
+       fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring +
+                                      fpi->tx_ring);
 
        spin_lock_init(&fep->lock);
        spin_lock_init(&fep->tx_lock);
@@ -1007,6 +1040,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
 
        netif_carrier_off(ndev);
 
+       ndev->features |= NETIF_F_SG;
+
        ret = register_netdev(ndev);
        if (ret)
                goto out_free_bd;
index 3a4b49e0e717b980e09c54d4cf98f42b56b56e53..f184d8f952e21d269e33179ea1abb1e36b16dda2 100644 (file)
@@ -134,6 +134,7 @@ struct fs_enet_private {
        void __iomem *ring_base;
        struct sk_buff **rx_skbuff;
        struct sk_buff **tx_skbuff;
+       char *mapped_as_page;
        cbd_t __iomem *rx_bd_base;      /* Address of Rx and Tx buffers.    */
        cbd_t __iomem *tx_bd_base;
        cbd_t __iomem *dirty_tx;        /* ring entries to be free()ed.     */
index 9388a83818f2f408446654adef22cb7020d567ab..162762d1a12cb1ffcb34a2325150278650168c29 100644 (file)
@@ -2367,7 +2367,7 @@ static int emac_wait_deps(struct emac_instance *dev)
        err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
        for (i = 0; i < EMAC_DEP_COUNT; i++) {
                of_node_put(deps[i].node);
-               if (err && deps[i].ofdev)
+               if (err)
                        of_dev_put(deps[i].ofdev);
        }
        if (err == 0) {
index 154effbfd8bef5b49f6ab06a381e1fa4db289c2c..a681d7c0bb9f066f8d48ed068f68f0001dad8d0f 100644 (file)
@@ -1583,6 +1583,15 @@ static struct mlx4_cmd_info cmd_info[] = {
                .verify = NULL,
                .wrapper = mlx4_CMD_EPERM_wrapper
        },
+       {
+               .opcode = MLX4_CMD_VIRT_PORT_MAP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_CMD_EPERM_wrapper
+       },
 };
 
 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
index c643d2bbb7b9251dd2d5da7a85d0ad1d074e7af6..58d5a07d0ff4da6397118fb4b3b051ac936ed95b 100644 (file)
@@ -214,6 +214,8 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
        iounmap(mdev->uar_map);
        mlx4_uar_free(dev, &mdev->priv_uar);
        mlx4_pd_free(dev, mdev->priv_pdn);
+       if (mdev->nb.notifier_call)
+               unregister_netdevice_notifier(&mdev->nb);
        kfree(mdev);
 }
 
@@ -298,6 +300,12 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
                if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
                        mdev->pndev[i] = NULL;
        }
+       /* register notifier */
+       mdev->nb.notifier_call = mlx4_en_netdev_event;
+       if (register_netdevice_notifier(&mdev->nb)) {
+               mdev->nb.notifier_call = NULL;
+               mlx4_err(mdev, "Failed to create notifier\n");
+       }
 
        return mdev;
 
index e075ff1f4e80a248f5480bd9e3f105f0ac6468be..2a210c4efb895728ec6ad12eaef9ec8f9ff7fd08 100644 (file)
@@ -2062,6 +2062,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
        /* Detach the netdev so tasks would not attempt to access it */
        mutex_lock(&mdev->state_lock);
        mdev->pndev[priv->port] = NULL;
+       mdev->upper[priv->port] = NULL;
        mutex_unlock(&mdev->state_lock);
 
        mlx4_en_free_resources(priv);
@@ -2201,6 +2202,10 @@ static int mlx4_en_set_features(struct net_device *netdev,
                        return ret;
        }
 
+       if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
+               en_info(priv, "Turn %s TX vlan strip offload\n",
+                       (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
+
        if (features & NETIF_F_LOOPBACK)
                priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
        else
@@ -2441,6 +2446,180 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
 #endif
 };
 
+struct mlx4_en_bond {
+       struct work_struct work;
+       struct mlx4_en_priv *priv;
+       int is_bonded;
+       struct mlx4_port_map port_map;
+};
+
+static void mlx4_en_bond_work(struct work_struct *work)
+{
+       struct mlx4_en_bond *bond = container_of(work,
+                                                    struct mlx4_en_bond,
+                                                    work);
+       int err = 0;
+       struct mlx4_dev *dev = bond->priv->mdev->dev;
+
+       if (bond->is_bonded) {
+               if (!mlx4_is_bonded(dev)) {
+                       err = mlx4_bond(dev);
+                       if (err)
+                               en_err(bond->priv, "Fail to bond device\n");
+               }
+               if (!err) {
+                       err = mlx4_port_map_set(dev, &bond->port_map);
+                       if (err)
+                               en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
+                                      bond->port_map.port1,
+                                      bond->port_map.port2,
+                                      err);
+               }
+       } else if (mlx4_is_bonded(dev)) {
+               err = mlx4_unbond(dev);
+               if (err)
+                       en_err(bond->priv, "Fail to unbond device\n");
+       }
+       dev_put(bond->priv->dev);
+       kfree(bond);
+}
+
+static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
+                                  u8 v2p_p1, u8 v2p_p2)
+{
+       struct mlx4_en_bond *bond = NULL;
+
+       bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
+       if (!bond)
+               return -ENOMEM;
+
+       INIT_WORK(&bond->work, mlx4_en_bond_work);
+       bond->priv = priv;
+       bond->is_bonded = is_bonded;
+       bond->port_map.port1 = v2p_p1;
+       bond->port_map.port2 = v2p_p2;
+       dev_hold(priv->dev);
+       queue_work(priv->mdev->workqueue, &bond->work);
+       return 0;
+}
+
+int mlx4_en_netdev_event(struct notifier_block *this,
+                        unsigned long event, void *ptr)
+{
+       struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+       u8 port = 0;
+       struct mlx4_en_dev *mdev;
+       struct mlx4_dev *dev;
+       int i, num_eth_ports = 0;
+       bool do_bond = true;
+       struct mlx4_en_priv *priv;
+       u8 v2p_port1 = 0;
+       u8 v2p_port2 = 0;
+
+       if (!net_eq(dev_net(ndev), &init_net))
+               return NOTIFY_DONE;
+
+       mdev = container_of(this, struct mlx4_en_dev, nb);
+       dev = mdev->dev;
+
+       /* Go into this mode only when two network devices set on two ports
+        * of the same mlx4 device are slaves of the same bonding master
+        */
+       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+               ++num_eth_ports;
+               if (!port && (mdev->pndev[i] == ndev))
+                       port = i;
+               mdev->upper[i] = mdev->pndev[i] ?
+                       netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
+               /* condition not met: network device is a slave */
+               if (!mdev->upper[i])
+                       do_bond = false;
+               if (num_eth_ports < 2)
+                       continue;
+               /* condition not met: same master */
+               if (mdev->upper[i] != mdev->upper[i-1])
+                       do_bond = false;
+       }
+       /* condition not met: 2 salves */
+       do_bond = (num_eth_ports ==  2) ? do_bond : false;
+
+       /* handle only events that come with enough info */
+       if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
+               return NOTIFY_DONE;
+
+       priv = netdev_priv(ndev);
+       if (do_bond) {
+               struct netdev_notifier_bonding_info *notifier_info = ptr;
+               struct netdev_bonding_info *bonding_info =
+                       &notifier_info->bonding_info;
+
+               /* required mode 1, 2 or 4 */
+               if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
+                   (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
+                   (bonding_info->master.bond_mode != BOND_MODE_8023AD))
+                       do_bond = false;
+
+               /* require exactly 2 slaves */
+               if (bonding_info->master.num_slaves != 2)
+                       do_bond = false;
+
+               /* calc v2p */
+               if (do_bond) {
+                       if (bonding_info->master.bond_mode ==
+                           BOND_MODE_ACTIVEBACKUP) {
+                               /* in active-backup mode virtual ports are
+                                * mapped to the physical port of the active
+                                * slave */
+                               if (bonding_info->slave.state ==
+                                   BOND_STATE_BACKUP) {
+                                       if (port == 1) {
+                                               v2p_port1 = 2;
+                                               v2p_port2 = 2;
+                                       } else {
+                                               v2p_port1 = 1;
+                                               v2p_port2 = 1;
+                                       }
+                               } else { /* BOND_STATE_ACTIVE */
+                                       if (port == 1) {
+                                               v2p_port1 = 1;
+                                               v2p_port2 = 1;
+                                       } else {
+                                               v2p_port1 = 2;
+                                               v2p_port2 = 2;
+                                       }
+                               }
+                       } else { /* Active-Active */
+                               /* in active-active mode a virtual port is
+                                * mapped to the native physical port if and only
+                                * if the physical port is up */
+                               __s8 link = bonding_info->slave.link;
+
+                               if (port == 1)
+                                       v2p_port2 = 2;
+                               else
+                                       v2p_port1 = 1;
+                               if ((link == BOND_LINK_UP) ||
+                                   (link == BOND_LINK_FAIL)) {
+                                       if (port == 1)
+                                               v2p_port1 = 1;
+                                       else
+                                               v2p_port2 = 2;
+                               } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
+                                       if (port == 1)
+                                               v2p_port1 = 2;
+                                       else
+                                               v2p_port2 = 1;
+                               }
+                       }
+               }
+       }
+
+       mlx4_en_queue_bond_work(priv, do_bond,
+                               v2p_port1, v2p_port2);
+
+       return NOTIFY_DONE;
+}
+
 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                        struct mlx4_en_port_profile *prof)
 {
@@ -2623,6 +2802,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        }
 
        mdev->pndev[port] = dev;
+       mdev->upper[port] = NULL;
 
        netif_carrier_off(dev);
        mlx4_en_set_default_moderation(priv);
index f1a5500ff72de1ee07d7d8bfd2dc91e02e2d6388..34f2fdf4fe5d214154714d3e9a672e47e23afdae 100644 (file)
@@ -50,10 +50,14 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
        context->mtu_msgmax = 0xff;
        if (!is_tx && !rss)
                context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
-       if (is_tx)
+       if (is_tx) {
                context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
-       else
+               if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)
+                       context->params2 |= MLX4_QP_BIT_FPP;
+
+       } else {
                context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
+       }
        context->usr_page = cpu_to_be32(mdev->priv_uar.index);
        context->local_qpn = cpu_to_be32(qpn);
        context->pri_path.ackto = 1 & 0x07;
index 2ba5d368edce34e229ffdbe30d7e70aab58bbaed..698d60de1255269c11363c0196fd16800d5c4f13 100644 (file)
@@ -162,6 +162,10 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
                if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
                                     frag_info, GFP_KERNEL | __GFP_COLD))
                        goto out;
+
+               en_dbg(DRV, priv, "  frag %d allocator: - size:%d frags:%d\n",
+                      i, ring->page_alloc[i].page_size,
+                      atomic_read(&ring->page_alloc[i].page->_count));
        }
        return 0;
 
@@ -1059,8 +1063,9 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
                        (eff_mtu > buf_size + frag_sizes[i]) ?
                                frag_sizes[i] : eff_mtu - buf_size;
                priv->frag_info[i].frag_prefix_size = buf_size;
-               priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i],
-                                                      SMP_CACHE_BYTES);
+               priv->frag_info[i].frag_stride =
+                               ALIGN(priv->frag_info[i].frag_size,
+                                     SMP_CACHE_BYTES);
                buf_size += priv->frag_info[i].frag_size;
                i++;
        }
index dbabfae3a3deb43cc0e0b1ff6f4c1ac90c23b4c6..5a21e5dc94cbae7f8c35d989aba039afcb5c4f77 100644 (file)
@@ -142,7 +142,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
                [17] = "Asymmetric EQs support",
                [18] = "More than 80 VFs support",
                [19] = "Performance optimized for limited rule configuration flow steering support",
-               [20] = "Recoverable error events support"
+               [20] = "Recoverable error events support",
+               [21] = "Port Remap support"
        };
        int i;
 
@@ -863,6 +864,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
        MLX4_GET(dev_cap->bmme_flags, outbox,
                 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
+       if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP)
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
        if (field & 0x20)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
@@ -1120,9 +1123,10 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
        field &= 0x7f;
        MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
 
-       /* For guests, disable mw type 2 */
+       /* For guests, disable mw type 2 and port remap*/
        MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
        bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
+       bmme_flags &= ~MLX4_FLAG_PORT_REMAP;
        MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
 
        /* turn off device-managed steering capability if not enabled */
@@ -2100,13 +2104,16 @@ struct mlx4_config_dev {
        __be32  rsvd1[3];
        __be16  vxlan_udp_dport;
        __be16  rsvd2;
-       __be32  rsvd3[27];
-       __be16  rsvd4;
-       u8      rsvd5;
+       __be32  rsvd3;
+       __be32  roce_flags;
+       __be32  rsvd4[25];
+       __be16  rsvd5;
+       u8      rsvd6;
        u8      rx_checksum_val;
 };
 
 #define MLX4_VXLAN_UDP_DPORT (1 << 0)
+#define MLX4_DISABLE_RX_PORT BIT(18)
 
 static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
 {
@@ -2162,7 +2169,7 @@ static const u8 config_dev_csum_flags[] = {
 int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
                              struct mlx4_config_dev_params *params)
 {
-       struct mlx4_config_dev config_dev;
+       struct mlx4_config_dev config_dev = {0};
        int err;
        u8 csum_mask;
 
@@ -2209,6 +2216,45 @@ int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
 }
 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
 
+#define CONFIG_DISABLE_RX_PORT BIT(15)
+int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis)
+{
+       struct mlx4_config_dev config_dev;
+
+       memset(&config_dev, 0, sizeof(config_dev));
+       config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT);
+       if (dis)
+               config_dev.roce_flags =
+                       cpu_to_be32(CONFIG_DISABLE_RX_PORT);
+
+       return mlx4_CONFIG_DEV_set(dev, &config_dev);
+}
+
+int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct {
+               __be32 v_port1;
+               __be32 v_port2;
+       } *v2p;
+       int err;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return -ENOMEM;
+
+       v2p = mailbox->buf;
+       v2p->v_port1 = cpu_to_be32(port1);
+       v2p->v_port2 = cpu_to_be32(port2);
+
+       err = mlx4_cmd(dev, mailbox->dma, 0,
+                      MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
 
 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
 {
index 68d2bad325d5c5ef1c086167aa55f3af60b94823..6fce58718837202bd82739dd8592b753ece7ef42 100644 (file)
 
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/errno.h>
 
 #include "mlx4.h"
 
 struct mlx4_device_context {
        struct list_head        list;
+       struct list_head        bond_list;
        struct mlx4_interface  *intf;
        void                   *context;
 };
@@ -115,6 +117,58 @@ void mlx4_unregister_interface(struct mlx4_interface *intf)
 }
 EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
 
+int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx;
+       unsigned long flags;
+       int ret;
+       LIST_HEAD(bond_list);
+
+       if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
+               return -ENOTSUPP;
+
+       ret = mlx4_disable_rx_port_check(dev, enable);
+       if (ret) {
+               mlx4_err(dev, "Fail to %s rx port check\n",
+                        enable ? "enable" : "disable");
+               return ret;
+       }
+       if (enable) {
+               dev->flags |= MLX4_FLAG_BONDED;
+       } else {
+                ret = mlx4_virt2phy_port_map(dev, 1, 2);
+               if (ret) {
+                       mlx4_err(dev, "Fail to reset port map\n");
+                       return ret;
+               }
+               dev->flags &= ~MLX4_FLAG_BONDED;
+       }
+
+       spin_lock_irqsave(&priv->ctx_lock, flags);
+       list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) {
+               if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) {
+                       list_add_tail(&dev_ctx->bond_list, &bond_list);
+                       list_del(&dev_ctx->list);
+               }
+       }
+       spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+       list_for_each_entry(dev_ctx, &bond_list, bond_list) {
+               dev_ctx->intf->remove(dev, dev_ctx->context);
+               dev_ctx->context =  dev_ctx->intf->add(dev);
+
+               spin_lock_irqsave(&priv->ctx_lock, flags);
+               list_add_tail(&dev_ctx->list, &priv->ctx_list);
+               spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+               mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n",
+                        dev_ctx->intf->protocol, enable ?
+                        "enabled" : "disabled");
+       }
+       return 0;
+}
+
 void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
                         unsigned long param)
 {
index cc9f484392446e74543ded5a5626e40f518f1367..7e487223489a467071155f0e67ea052ba2b18949 100644 (file)
@@ -251,7 +251,8 @@ static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
                if (mlx4_is_master(dev))
                        dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
        } else {
-               mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n");
+               if (cache_line_size() != 32  && cache_line_size() != 64)
+                       mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n");
                dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
                dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
        }
@@ -1160,6 +1161,91 @@ err_set_port:
        return err ? err : count;
 }
 
+int mlx4_bond(struct mlx4_dev *dev)
+{
+       int ret = 0;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       mutex_lock(&priv->bond_mutex);
+
+       if (!mlx4_is_bonded(dev))
+               ret = mlx4_do_bond(dev, true);
+       else
+               ret = 0;
+
+       mutex_unlock(&priv->bond_mutex);
+       if (ret)
+               mlx4_err(dev, "Failed to bond device: %d\n", ret);
+       else
+               mlx4_dbg(dev, "Device is bonded\n");
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_bond);
+
+int mlx4_unbond(struct mlx4_dev *dev)
+{
+       int ret = 0;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       mutex_lock(&priv->bond_mutex);
+
+       if (mlx4_is_bonded(dev))
+               ret = mlx4_do_bond(dev, false);
+
+       mutex_unlock(&priv->bond_mutex);
+       if (ret)
+               mlx4_err(dev, "Failed to unbond device: %d\n", ret);
+       else
+               mlx4_dbg(dev, "Device is unbonded\n");
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_unbond);
+
+
+int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
+{
+       u8 port1 = v2p->port1;
+       u8 port2 = v2p->port2;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int err;
+
+       if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
+               return -ENOTSUPP;
+
+       mutex_lock(&priv->bond_mutex);
+
+       /* zero means keep current mapping for this port */
+       if (port1 == 0)
+               port1 = priv->v2p.port1;
+       if (port2 == 0)
+               port2 = priv->v2p.port2;
+
+       if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) ||
+           (port2 < 1) || (port2 > MLX4_MAX_PORTS) ||
+           (port1 == 2 && port2 == 1)) {
+               /* besides boundary checks cross mapping makes
+                * no sense and therefore not allowed */
+               err = -EINVAL;
+       } else if ((port1 == priv->v2p.port1) &&
+                (port2 == priv->v2p.port2)) {
+               err = 0;
+       } else {
+               err = mlx4_virt2phy_port_map(dev, port1, port2);
+               if (!err) {
+                       mlx4_dbg(dev, "port map changed: [%d][%d]\n",
+                                port1, port2);
+                       priv->v2p.port1 = port1;
+                       priv->v2p.port2 = port2;
+               } else {
+                       mlx4_err(dev, "Failed to change port mape: %d\n", err);
+               }
+       }
+
+       mutex_unlock(&priv->bond_mutex);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_port_map_set);
+
 static int mlx4_load_fw(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2638,6 +2724,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
        spin_lock_init(&priv->ctx_lock);
 
        mutex_init(&priv->port_mutex);
+       mutex_init(&priv->bond_mutex);
 
        INIT_LIST_HEAD(&priv->pgdir_list);
        mutex_init(&priv->pgdir_mutex);
@@ -2934,6 +3021,9 @@ slave_start:
                        goto err_port;
        }
 
+       priv->v2p.port1 = 1;
+       priv->v2p.port2 = 2;
+
        err = mlx4_register_device(dev);
        if (err)
                goto err_port;
index 148dc0945aabcc7cfcaa18d3579fc1b050ac2a58..803f17653da71b0e5a3dfb0502f0ff9c5b6e6ee9 100644 (file)
@@ -885,6 +885,8 @@ struct mlx4_priv {
        int                     reserved_mtts;
        int                     fs_hash_mode;
        u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
+       struct mlx4_port_map    v2p; /* cached port mapping configuration */
+       struct mutex            bond_mutex; /* for bond mode */
        __be64                  slave_node_guids[MLX4_MFUNC_MAX];
 
        atomic_t                opreq_count;
@@ -1364,6 +1366,7 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
 /* Returns the VF index of slave */
 int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
 int mlx4_config_mad_demux(struct mlx4_dev *dev);
+int mlx4_do_bond(struct mlx4_dev *dev, bool enable);
 
 enum mlx4_zone_flags {
        MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO   = 1UL << 0,
index 944a112dff374ef919a216e5ee13415b9f3974bd..2a8268e6be15d0b8682b8ad47bb4bb4ac071b243 100644 (file)
@@ -390,6 +390,7 @@ struct mlx4_en_dev {
        struct pci_dev          *pdev;
        struct mutex            state_lock;
        struct net_device       *pndev[MLX4_MAX_PORTS + 1];
+       struct net_device       *upper[MLX4_MAX_PORTS + 1];
        u32                     port_cnt;
        bool                    device_up;
        struct mlx4_en_profile  profile;
@@ -410,6 +411,7 @@ struct mlx4_en_dev {
        unsigned long           overflow_period;
        struct ptp_clock        *ptp_clock;
        struct ptp_clock_info   ptp_clock_info;
+       struct notifier_block   nb;
 };
 
 
@@ -845,6 +847,9 @@ int mlx4_en_reset_config(struct net_device *dev,
                         struct hwtstamp_config ts_config,
                         netdev_features_t new_features);
 
+int mlx4_en_netdev_event(struct notifier_block *this,
+                        unsigned long event, void *ptr);
+
 /*
  * Functions for time stamping
  */
index d21e884a08387c82f6a5f3b58ec2a760aa198ea7..78f51e103880d4dcae7745ec5cb5b2425e370e73 100644 (file)
@@ -598,14 +598,11 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
        if (err)
                return err;
 
-       mpt_entry->start       = cpu_to_be64(mr->iova);
-       mpt_entry->length      = cpu_to_be64(mr->size);
-       mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
-
-       mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
-                                          MLX4_MPT_PD_FLAG_EN_INV);
-       mpt_entry->flags    &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
-                                          MLX4_MPT_FLAG_SW_OWNS);
+       mpt_entry->start       = cpu_to_be64(iova);
+       mpt_entry->length      = cpu_to_be64(size);
+       mpt_entry->entity_size = cpu_to_be32(page_shift);
+       mpt_entry->flags    &= ~(cpu_to_be32(MLX4_MPT_FLAG_FREE |
+                                          MLX4_MPT_FLAG_SW_OWNS));
        if (mr->mtt.order < 0) {
                mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
                mpt_entry->mtt_addr = 0;
index 1586ecce13c719b1eaddfb43f0d96d80e27b6389..2bb8553bd9054b25456ec694ee25696e93ebde25 100644 (file)
@@ -882,6 +882,8 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
        for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
                context->flags &= cpu_to_be32(~(0xf << 28));
                context->flags |= cpu_to_be32(states[i + 1] << 28);
+               if (states[i + 1] != MLX4_QP_STATE_RTR)
+                       context->params2 &= ~MLX4_QP_BIT_FPP;
                err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
                                     context, 0, 0, qp);
                if (err) {
index 79feeb6b0d87b9ec07c84e412f9cbb2fd2d9a499..486e3d26cd4a9ef4bb6a23995b85ac50cd413776 100644 (file)
@@ -2541,7 +2541,7 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
        /* Make sure that the PD bits related to the slave id are zeros. */
        pd = mr_get_pd(inbox->buf);
        pd_slave = (pd >> 17) & 0x7f;
-       if (pd_slave != 0 && pd_slave != slave) {
+       if (pd_slave != 0 && --pd_slave != slave) {
                err = -EPERM;
                goto ex_abort;
        }
@@ -2944,6 +2944,9 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
        qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
        optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
 
+       if (slave != mlx4_master_func_num(dev))
+               qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
+
        switch (qp_type) {
        case MLX4_QP_ST_RC:
        case MLX4_QP_ST_XRC:
index 3f4525619a07efb6de4711189565a1f06e624293..d6651937d8996188b249fe911b09e9cbbdc480a5 100644 (file)
@@ -903,12 +903,12 @@ static void remove_one(struct pci_dev *pdev)
 }
 
 static const struct pci_device_id mlx5_core_pci_table[] = {
-       { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */
-       { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */
-       { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */
-       { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */
-       { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */
-       { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */
+       { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
+       { PCI_VDEVICE(MELLANOX, 0x1012) }, /* Connect-IB VF */
+       { PCI_VDEVICE(MELLANOX, 0x1013) }, /* ConnectX-4 */
+       { PCI_VDEVICE(MELLANOX, 0x1014) }, /* ConnectX-4 VF */
+       { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
+       { PCI_VDEVICE(MELLANOX, 0x1016) }, /* ConnectX-4LX VF */
        { 0, }
 };
 
index 71af98bb72cbeb1cc2376013847835c4195ec805..1412f5af05ecf521e41ff109dc1a24e7621090ce 100644 (file)
@@ -4226,8 +4226,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
                mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
 #endif
        myri10ge_free_slices(mgp);
-       if (mgp->msix_vectors != NULL)
-               kfree(mgp->msix_vectors);
+       kfree(mgp->msix_vectors);
        dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
                          mgp->cmd, mgp->cmd_bus);
 
index afb8efb25781aca2e1ed1f4b591247cb5264db89..e0c31e3947d1091371bfa742fbea5cee9743002d 100644 (file)
@@ -176,9 +176,7 @@ netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
 static void
 netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
 {
-       if (recv_ctx->sds_rings != NULL)
-               kfree(recv_ctx->sds_rings);
-
+       kfree(recv_ctx->sds_rings);
        recv_ctx->sds_rings = NULL;
 }
 
index 763ada18ad3de25acb0fe9a37349f518b96c0d72..f59509486113150fb8162fa834ad8272e7823d4b 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/regmap.h>
 #include <linux/mfd/syscon.h>
 
+#include "cpsw.h"
+
 #define AM33XX_CTRL_MAC_LO_REG(offset, id) ((offset) + 0x8 * (id))
 #define AM33XX_CTRL_MAC_HI_REG(offset, id) ((offset) + 0x8 * (id) + 0x4)
 
index 345cd25637726284ebc6530af937866d77c4d474..84f5ce525750d2c44ea80194975c92602f5793b5 100644 (file)
@@ -2011,12 +2011,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
 quit:
        if (gbe_dev->hw_stats)
                devm_kfree(dev, gbe_dev->hw_stats);
-       if (gbe_dev->ale)
-               cpsw_ale_destroy(gbe_dev->ale);
+       cpsw_ale_destroy(gbe_dev->ale);
        if (gbe_dev->ss_regs)
                devm_iounmap(dev, gbe_dev->ss_regs);
-       if (interfaces)
-               of_node_put(interfaces);
+       of_node_put(interfaces);
        devm_kfree(dev, gbe_dev);
        return ret;
 }
index f2ff0074aac9b272e4a136c033890abd2db12877..691ec936e88d53601e3ed257278708ae823670c8 100644 (file)
@@ -2540,7 +2540,7 @@ static void tlan_phy_power_down(struct net_device *dev)
         * This is abitrary.  It is intended to make sure the
         * transceiver settles.
         */
-       tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
+       tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_PUP);
 
 }
 
@@ -2561,7 +2561,7 @@ static void tlan_phy_power_up(struct net_device *dev)
         * transceiver.  The TLAN docs say both 50 ms and
         * 500 ms, so do the longer, just in case.
         */
-       tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
+       tlan_set_timer(dev, msecs_to_jiffies(500), TLAN_TIMER_PHY_RESET);
 
 }
 
@@ -2593,7 +2593,7 @@ static void tlan_phy_reset(struct net_device *dev)
         * I don't remember why I wait this long.
         * I've changed this to 50ms, as it seems long enough.
         */
-       tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
+       tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_START_LINK);
 
 }
 
@@ -2658,7 +2658,7 @@ static void tlan_phy_start_link(struct net_device *dev)
                data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
                        | TLAN_NET_CFG_PHY_EN;
                tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
-               tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
+               tlan_set_timer(dev, msecs_to_jiffies(40), TLAN_TIMER_PHY_PDOWN);
                return;
        } else if (priv->phy_num == 0) {
                control = 0;
@@ -2725,7 +2725,7 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
            (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
            (priv->phy_num != 0)) {
                priv->phy_num = 0;
-               tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
+               tlan_set_timer(dev, msecs_to_jiffies(400), TLAN_TIMER_PHY_PDOWN);
                return;
        }
 
@@ -2744,7 +2744,7 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
 
        /* Wait for 100 ms.  No reason in partiticular.
         */
-       tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
+       tlan_set_timer(dev, msecs_to_jiffies(100), TLAN_TIMER_FINISH_RESET);
 
 }
 
@@ -2796,7 +2796,7 @@ static void tlan_phy_monitor(unsigned long data)
                                /* set to external PHY */
                                priv->phy_num = 1;
                                /* restart autonegotiation */
-                               tlan_set_timer(dev, 4 * HZ / 10,
+                               tlan_set_timer(dev, msecs_to_jiffies(400),
                                               TLAN_TIMER_PHY_PDOWN);
                                return;
                        }
index a43c8acb7268d6ef897f5a992101e0ee125231ff..181b349b060ee552dcc5be089c8cf616d7d965b2 100644 (file)
@@ -44,9 +44,9 @@
 #define        CC2520_FREG_MASK        0x3F
 
 /* status byte values */
-#define        CC2520_STATUS_XOSC32M_STABLE    (1 << 7)
-#define        CC2520_STATUS_RSSI_VALID        (1 << 6)
-#define        CC2520_STATUS_TX_UNDERFLOW      (1 << 3)
+#define        CC2520_STATUS_XOSC32M_STABLE    BIT(7)
+#define        CC2520_STATUS_RSSI_VALID        BIT(6)
+#define        CC2520_STATUS_TX_UNDERFLOW      BIT(3)
 
 /* IEEE-802.15.4 defined constants (2.4 GHz logical channels) */
 #define        CC2520_MINCHANNEL               11
@@ -549,14 +549,14 @@ cc2520_ed(struct ieee802154_hw *hw, u8 *level)
        u8 rssi;
        int ret;
 
-       ret = cc2520_read_register(priv , CC2520_RSSISTAT, &status);
+       ret = cc2520_read_register(priv, CC2520_RSSISTAT, &status);
        if (ret)
                return ret;
 
        if (status != RSSI_VALID)
                return -EINVAL;
 
-       ret = cc2520_read_register(priv , CC2520_RSSI, &rssi);
+       ret = cc2520_read_register(priv, CC2520_RSSI, &rssi);
        if (ret)
                return ret;
 
index 31bac2a21ce302562edcf0b3a7b3786e3ad2054b..c184717e8b283b3d4ae5c57ef5290c2d311ce215 100644 (file)
@@ -558,7 +558,6 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
                                          u32 data)
 {
        size_t start, offset, plen;
-       __wsum delta;
 
        if (skb->remcsum_offload)
                return vh;
@@ -580,12 +579,7 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
                        return NULL;
        }
 
-       delta = remcsum_adjust((void *)vh + hdrlen,
-                              NAPI_GRO_CB(skb)->csum, start, offset);
-
-       /* Adjust skb->csum since we changed the packet */
-       skb->csum = csum_add(skb->csum, delta);
-       NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+       skb_gro_remcsum_process(skb, (void *)vh + hdrlen, start, offset);
 
        skb->remcsum_offload = 1;
 
@@ -1159,7 +1153,6 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
                                      size_t hdrlen, u32 data)
 {
        size_t start, offset, plen;
-       __wsum delta;
 
        if (skb->remcsum_offload) {
                /* Already processed in GRO path */
@@ -1179,14 +1172,7 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
 
        vh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
 
-       if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE))
-               __skb_checksum_complete(skb);
-
-       delta = remcsum_adjust((void *)vh + hdrlen,
-                              skb->csum, start, offset);
-
-       /* Adjust skb->csum since we changed the packet */
-       skb->csum = csum_add(skb->csum, delta);
+       skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset);
 
        return vh;
 }
index 08f293411bf0957cc720c67b3b1a1e2bfe0733a3..60a524b732079c6487a68b5494d8ad74c938815f 100644 (file)
@@ -76,6 +76,9 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
                if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN))
                        arg.key_flags = WMI_KEY_PAIRWISE;
                break;
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+               /* this one needs to be done in software */
+               return 1;
        default:
                ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
                return -EOPNOTSUPP;
@@ -5035,6 +5038,13 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
 
 int ath10k_mac_register(struct ath10k *ar)
 {
+       static const u32 cipher_suites[] = {
+               WLAN_CIPHER_SUITE_WEP40,
+               WLAN_CIPHER_SUITE_WEP104,
+               WLAN_CIPHER_SUITE_TKIP,
+               WLAN_CIPHER_SUITE_CCMP,
+               WLAN_CIPHER_SUITE_AES_CMAC,
+       };
        struct ieee80211_supported_band *band;
        struct ieee80211_sta_vht_cap vht_cap;
        struct ieee80211_sta_ht_cap ht_cap;
@@ -5108,7 +5118,8 @@ int ath10k_mac_register(struct ath10k *ar)
                        IEEE80211_HW_REPORTS_TX_ACK_STATUS |
                        IEEE80211_HW_HAS_RATE_CONTROL |
                        IEEE80211_HW_AP_LINK_PS |
-                       IEEE80211_HW_SPECTRUM_MGMT;
+                       IEEE80211_HW_SPECTRUM_MGMT |
+                       IEEE80211_HW_SW_CRYPTO_CONTROL;
 
        ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
 
@@ -5182,6 +5193,9 @@ int ath10k_mac_register(struct ath10k *ar)
                goto err_free;
        }
 
+       ar->hw->wiphy->cipher_suites = cipher_suites;
+       ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
        ret = ieee80211_register_hw(ar->hw);
        if (ret) {
                ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
index c81b06bcf827b8b1fb261c1f360a2ce3d14e3d93..2e66f34ebb79c9caba0b40027e412b17d85dcce6 100644 (file)
@@ -1372,7 +1372,8 @@ csio_config_device_caps(struct csio_hw *hw)
        }
 
        /* Validate device capabilities */
-       if (csio_hw_validate_caps(hw, mbp))
+       rv = csio_hw_validate_caps(hw, mbp);
+       if (rv != 0)
                goto out;
 
        /* Don't config device capabilities if already configured */
@@ -1776,7 +1777,8 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
        }
 
        /* Validate device capabilities */
-       if (csio_hw_validate_caps(hw, mbp))
+       rv = csio_hw_validate_caps(hw, mbp);
+       if (rv != 0)
                goto bye;
        /*
         * Note that we're operating with parameters
index a5f624f755df0f0eee29b3169b4d13dc4ebdace5..b56a11d817be32318976b15111dfcee8b9e58bcb 100644 (file)
 #define FW_FNAME_T5                            "cxgb4/t5fw.bin"
 #define FW_CFG_NAME_T5                         "cxgb4/t5-config.txt"
 
-#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0B
-#define T5FW_VERSION_MICRO 0x1B
-#define T5FW_VERSION_BUILD 0x00
-
 #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
 #define CHELSIO_CHIP_FPGA          0x100
 #define CHELSIO_CHIP_VERSION(code) (((code) >> 12) & 0xf)
@@ -74,6 +69,7 @@ static inline int csio_is_t5(uint16_t chip)
        { PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
 
 #include "t4fw_api.h"
+#include "t4fw_version.h"
 
 #define FW_VERSION(chip) ( \
                FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \
index 1132c41d99cebc80026b8254b82bad0b03209d4c..9451787ca7f299715a97e34692315d69e7968910 100644 (file)
@@ -327,7 +327,8 @@ csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
 }
 
 #define CSIO_ADVERT_MASK     (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
-                             FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
+                             FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G |\
+                             FW_PORT_CAP_ANEG)
 
 /*
  * csio_mb_port- FW PORT command helper
index 6906f76332f4a91020a45ef04b538ca62b0cf3fe..e022cc40303d69cd2664cab909157672cd58f744 100644 (file)
@@ -84,10 +84,6 @@ struct vhost_net_ubuf_ref {
 
 struct vhost_net_virtqueue {
        struct vhost_virtqueue vq;
-       /* hdr is used to store the virtio header.
-        * Since each iovec has >= 1 byte length, we never need more than
-        * header length entries to store the header. */
-       struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
        size_t vhost_hlen;
        size_t sock_hlen;
        /* vhost zerocopy support fields below: */
@@ -235,44 +231,6 @@ static bool vhost_sock_zcopy(struct socket *sock)
                sock_flag(sock->sk, SOCK_ZEROCOPY);
 }
 
-/* Pop first len bytes from iovec. Return number of segments used. */
-static int move_iovec_hdr(struct iovec *from, struct iovec *to,
-                         size_t len, int iov_count)
-{
-       int seg = 0;
-       size_t size;
-
-       while (len && seg < iov_count) {
-               size = min(from->iov_len, len);
-               to->iov_base = from->iov_base;
-               to->iov_len = size;
-               from->iov_len -= size;
-               from->iov_base += size;
-               len -= size;
-               ++from;
-               ++to;
-               ++seg;
-       }
-       return seg;
-}
-/* Copy iovec entries for len bytes from iovec. */
-static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
-                          size_t len, int iovcount)
-{
-       int seg = 0;
-       size_t size;
-
-       while (len && seg < iovcount) {
-               size = min(from->iov_len, len);
-               to->iov_base = from->iov_base;
-               to->iov_len = size;
-               len -= size;
-               ++from;
-               ++to;
-               ++seg;
-       }
-}
-
 /* In case of DMA done not in order in lower device driver for some reason.
  * upend_idx is used to track end of used idx, done_idx is used to track head
  * of used idx. Once lower device DMA done contiguously, we will signal KVM
@@ -336,7 +294,7 @@ static void handle_tx(struct vhost_net *net)
 {
        struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
        struct vhost_virtqueue *vq = &nvq->vq;
-       unsigned out, in, s;
+       unsigned out, in;
        int head;
        struct msghdr msg = {
                .msg_name = NULL,
@@ -395,16 +353,17 @@ static void handle_tx(struct vhost_net *net)
                        break;
                }
                /* Skip header. TODO: support TSO. */
-               s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out);
                len = iov_length(vq->iov, out);
                iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len);
+               iov_iter_advance(&msg.msg_iter, hdr_size);
                /* Sanity check */
-               if (!len) {
+               if (!iov_iter_count(&msg.msg_iter)) {
                        vq_err(vq, "Unexpected header len for TX: "
                               "%zd expected %zd\n",
-                              iov_length(nvq->hdr, s), hdr_size);
+                              len, hdr_size);
                        break;
                }
+               len = iov_iter_count(&msg.msg_iter);
 
                zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
                                   && (nvq->upend_idx + 1) % UIO_MAXIOV !=
@@ -569,9 +528,9 @@ static void handle_rx(struct vhost_net *net)
                .msg_controllen = 0,
                .msg_flags = MSG_DONTWAIT,
        };
-       struct virtio_net_hdr_mrg_rxbuf hdr = {
-               .hdr.flags = 0,
-               .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
+       struct virtio_net_hdr hdr = {
+               .flags = 0,
+               .gso_type = VIRTIO_NET_HDR_GSO_NONE
        };
        size_t total_len = 0;
        int err, mergeable;
@@ -579,6 +538,7 @@ static void handle_rx(struct vhost_net *net)
        size_t vhost_hlen, sock_hlen;
        size_t vhost_len, sock_len;
        struct socket *sock;
+       struct iov_iter fixup;
 
        mutex_lock(&vq->mutex);
        sock = vq->private_data;
@@ -623,14 +583,19 @@ static void handle_rx(struct vhost_net *net)
                        break;
                }
                /* We don't need to be notified again. */
-               if (unlikely((vhost_hlen)))
-                       /* Skip header. TODO: support TSO. */
-                       move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in);
-               else
-                       /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
-                        * needed because recvmsg can modify msg_iov. */
-                       copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in);
-               iov_iter_init(&msg.msg_iter, READ, vq->iov, in, sock_len);
+               iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
+               fixup = msg.msg_iter;
+               if (unlikely((vhost_hlen))) {
+                       /* We will supply the header ourselves
+                        * TODO: support TSO.
+                        */
+                       iov_iter_advance(&msg.msg_iter, vhost_hlen);
+               } else {
+                       /* It'll come from socket; we'll need to patch
+                        * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
+                        */
+                       iov_iter_advance(&fixup, sizeof(hdr));
+               }
                err = sock->ops->recvmsg(NULL, sock, &msg,
                                         sock_len, MSG_DONTWAIT | MSG_TRUNC);
                /* Userspace might have consumed the packet meanwhile:
@@ -642,18 +607,18 @@ static void handle_rx(struct vhost_net *net)
                        vhost_discard_vq_desc(vq, headcount);
                        continue;
                }
+               /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
                if (unlikely(vhost_hlen) &&
-                   memcpy_toiovecend(nvq->hdr, (unsigned char *)&hdr, 0,
-                                     vhost_hlen)) {
+                   copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) {
                        vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
                               vq->iov->iov_base);
                        break;
                }
-               /* TODO: Should check and handle checksum. */
+               /* Supply (or replace) ->num_buffers if VIRTIO_NET_F_MRG_RXBUF
+                * TODO: Should check and handle checksum.
+                */
                if (likely(mergeable) &&
-                   memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
-                                     offsetof(typeof(hdr), num_buffers),
-                                     sizeof hdr.num_buffers)) {
+                   copy_to_iter(&headcount, 2, &fixup) != 2) {
                        vq_err(vq, "Failed num_buffers write");
                        vhost_discard_vq_desc(vq, headcount);
                        break;
index d695b1673ae532d9ac873bdc5661ccab84995c04..dc78d87e0fc2c5e14832adf674899823ac223c5f 100644 (file)
@@ -1079,7 +1079,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                               req_size, vq->iov[0].iov_len);
                        break;
                }
-               ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size);
+               ret = copy_from_user(req, vq->iov[0].iov_base, req_size);
                if (unlikely(ret)) {
                        vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
                        break;
index cb807d0ea498df3a197d0c3ed70d5548e57d97b7..2ee28266fd0704fd1e1c4c64a6f19c8d863727fd 100644 (file)
@@ -1125,6 +1125,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
        struct vring_desc desc;
        unsigned int i = 0, count, found = 0;
        u32 len = vhost32_to_cpu(vq, indirect->len);
+       struct iov_iter from;
        int ret;
 
        /* Sanity check */
@@ -1142,6 +1143,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
                vq_err(vq, "Translation failure %d in indirect.\n", ret);
                return ret;
        }
+       iov_iter_init(&from, READ, vq->indirect, ret, len);
 
        /* We will use the result as an address to read from, so most
         * architectures only need a compiler barrier here. */
@@ -1164,8 +1166,8 @@ static int get_indirect(struct vhost_virtqueue *vq,
                               i, count);
                        return -EINVAL;
                }
-               if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
-                                             vq->indirect, sizeof desc))) {
+               if (unlikely(copy_from_iter(&desc, sizeof(desc), &from) !=
+                            sizeof(desc))) {
                        vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
                               i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
                        return -EINVAL;
index 06e14bfb3496c9d9aab923243c104addfbff7196..dbc732e9a5c01eb18ab91af910a997881dfe5fd8 100644 (file)
@@ -306,8 +306,8 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg,
 
                        _debug("- range %u-%u%s",
                               offset, to, msg->msg_flags ? " [more]" : "");
-                       iov_iter_init(&msg->msg_iter, WRITE,
-                                     (struct iovec *) iov, 1, to - offset);
+                       iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC,
+                                     iov, 1, to - offset);
 
                        /* have to change the state *before* sending the last
                         * packet as RxRPC might give us the reply before it
@@ -384,7 +384,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
 
        msg.msg_name            = NULL;
        msg.msg_namelen         = 0;
-       iov_iter_init(&msg.msg_iter, WRITE, (struct iovec *)iov, 1,
+       iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1,
                      call->request_size);
        msg.msg_control         = NULL;
        msg.msg_controllen      = 0;
@@ -770,7 +770,7 @@ static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
 void afs_send_empty_reply(struct afs_call *call)
 {
        struct msghdr msg;
-       struct iovec iov[1];
+       struct kvec iov[1];
 
        _enter("");
 
@@ -778,7 +778,7 @@ void afs_send_empty_reply(struct afs_call *call)
        iov[0].iov_len          = 0;
        msg.msg_name            = NULL;
        msg.msg_namelen         = 0;
-       iov_iter_init(&msg.msg_iter, WRITE, iov, 0, 0); /* WTF? */
+       iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 0, 0);     /* WTF? */
        msg.msg_control         = NULL;
        msg.msg_controllen      = 0;
        msg.msg_flags           = 0;
@@ -805,7 +805,7 @@ void afs_send_empty_reply(struct afs_call *call)
 void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
 {
        struct msghdr msg;
-       struct iovec iov[1];
+       struct kvec iov[1];
        int n;
 
        _enter("");
@@ -814,7 +814,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
        iov[0].iov_len          = len;
        msg.msg_name            = NULL;
        msg.msg_namelen         = 0;
-       iov_iter_init(&msg.msg_iter, WRITE, iov, 1, len);
+       iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, len);
        msg.msg_control         = NULL;
        msg.msg_controllen      = 0;
        msg.msg_flags           = 0;
index cd62bf4289e9573301b25c5559ac91b412883dc6..88ea64e9a91ce5df933a84ff8cfdc4ec3e66b5d0 100644 (file)
@@ -67,8 +67,7 @@ int af_alg_unregister_type(const struct af_alg_type *type);
 int af_alg_release(struct socket *sock);
 int af_alg_accept(struct sock *sk, struct socket *newsock);
 
-int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
-                  int write);
+int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
 void af_alg_free_sg(struct af_alg_sgl *sgl);
 
 int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
index 4f4eea8a62882914a86c11a084928a02225c6e0b..b9c7897dc5668c3fe59c30ab8505d334a6689168 100644 (file)
@@ -1017,6 +1017,15 @@ struct ieee80211_mmie {
        u8 mic[8];
 } __packed;
 
+/* Management MIC information element (IEEE 802.11w) for GMAC and CMAC-256 */
+struct ieee80211_mmie_16 {
+       u8 element_id;
+       u8 length;
+       __le16 key_id;
+       u8 sequence_number[6];
+       u8 mic[16];
+} __packed;
+
 struct ieee80211_vendor_ie {
        u8 element_id;
        u8 len;
@@ -1994,9 +2003,15 @@ enum ieee80211_key_len {
        WLAN_KEY_LEN_WEP40 = 5,
        WLAN_KEY_LEN_WEP104 = 13,
        WLAN_KEY_LEN_CCMP = 16,
+       WLAN_KEY_LEN_CCMP_256 = 32,
        WLAN_KEY_LEN_TKIP = 32,
        WLAN_KEY_LEN_AES_CMAC = 16,
        WLAN_KEY_LEN_SMS4 = 32,
+       WLAN_KEY_LEN_GCMP = 16,
+       WLAN_KEY_LEN_GCMP_256 = 32,
+       WLAN_KEY_LEN_BIP_CMAC_256 = 32,
+       WLAN_KEY_LEN_BIP_GMAC_128 = 16,
+       WLAN_KEY_LEN_BIP_GMAC_256 = 32,
 };
 
 #define IEEE80211_WEP_IV_LEN           4
@@ -2004,9 +2019,16 @@ enum ieee80211_key_len {
 #define IEEE80211_CCMP_HDR_LEN         8
 #define IEEE80211_CCMP_MIC_LEN         8
 #define IEEE80211_CCMP_PN_LEN          6
+#define IEEE80211_CCMP_256_HDR_LEN     8
+#define IEEE80211_CCMP_256_MIC_LEN     16
+#define IEEE80211_CCMP_256_PN_LEN      6
 #define IEEE80211_TKIP_IV_LEN          8
 #define IEEE80211_TKIP_ICV_LEN         4
 #define IEEE80211_CMAC_PN_LEN          6
+#define IEEE80211_GMAC_PN_LEN          6
+#define IEEE80211_GCMP_HDR_LEN         8
+#define IEEE80211_GCMP_MIC_LEN         16
+#define IEEE80211_GCMP_PN_LEN          6
 
 /* Public action codes */
 enum ieee80211_pub_actioncode {
@@ -2230,6 +2252,11 @@ enum ieee80211_sa_query_action {
 #define WLAN_CIPHER_SUITE_WEP104       0x000FAC05
 #define WLAN_CIPHER_SUITE_AES_CMAC     0x000FAC06
 #define WLAN_CIPHER_SUITE_GCMP         0x000FAC08
+#define WLAN_CIPHER_SUITE_GCMP_256     0x000FAC09
+#define WLAN_CIPHER_SUITE_CCMP_256     0x000FAC0A
+#define WLAN_CIPHER_SUITE_BIP_GMAC_128 0x000FAC0B
+#define WLAN_CIPHER_SUITE_BIP_GMAC_256 0x000FAC0C
+#define WLAN_CIPHER_SUITE_BIP_CMAC_256 0x000FAC0D
 
 #define WLAN_CIPHER_SUITE_SMS4         0x00147201
 
index ae95adc78509a1c683f24886a26b132a37189b50..7b6d4e9ff603828181239f5f64cbdf3a6d2cd282 100644 (file)
@@ -71,6 +71,7 @@ enum {
 
        /*master notify fw on finish for slave's flr*/
        MLX4_CMD_INFORM_FLR_DONE = 0x5b,
+       MLX4_CMD_VIRT_PORT_MAP   = 0x5c,
        MLX4_CMD_GET_OP_REQ      = 0x59,
 
        /* TPT commands */
@@ -170,6 +171,12 @@ enum {
        MLX4_CMD_TIME_CLASS_C   = 60000,
 };
 
+enum {
+       /* virtual to physical port mapping opcode modifiers */
+       MLX4_GET_PORT_VIRT2PHY = 0x0,
+       MLX4_SET_PORT_VIRT2PHY = 0x1,
+};
+
 enum {
        MLX4_MAILBOX_SIZE       = 4096,
        MLX4_ACCESS_MEM_ALIGN   = 256,
index c95d659a39f28e3424d42891acbde38e1ffba97d..977b0b16443140fe17bf81a21c73e903b809cb3a 100644 (file)
@@ -70,6 +70,7 @@ enum {
        MLX4_FLAG_SLAVE         = 1 << 3,
        MLX4_FLAG_SRIOV         = 1 << 4,
        MLX4_FLAG_OLD_REG_MAC   = 1 << 6,
+       MLX4_FLAG_BONDED        = 1 << 7
 };
 
 enum {
@@ -201,7 +202,8 @@ enum {
        MLX4_DEV_CAP_FLAG2_SYS_EQS              = 1LL <<  17,
        MLX4_DEV_CAP_FLAG2_80_VFS               = 1LL <<  18,
        MLX4_DEV_CAP_FLAG2_FS_A0                = 1LL <<  19,
-       MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20
+       MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20,
+       MLX4_DEV_CAP_FLAG2_PORT_REMAP           = 1LL <<  21
 };
 
 enum {
@@ -253,9 +255,14 @@ enum {
        MLX4_BMME_FLAG_TYPE_2_WIN       = 1 <<  9,
        MLX4_BMME_FLAG_RESERVED_LKEY    = 1 << 10,
        MLX4_BMME_FLAG_FAST_REG_WR      = 1 << 11,
+       MLX4_BMME_FLAG_PORT_REMAP       = 1 << 24,
        MLX4_BMME_FLAG_VSD_INIT2RTR     = 1 << 28,
 };
 
+enum {
+       MLX4_FLAG_PORT_REMAP            = MLX4_BMME_FLAG_PORT_REMAP
+};
+
 enum mlx4_event {
        MLX4_EVENT_TYPE_COMP               = 0x00,
        MLX4_EVENT_TYPE_PATH_MIG           = 0x01,
@@ -1378,6 +1385,8 @@ int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port);
 int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port);
 
 int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port);
+int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis);
+int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2);
 int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
 int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
 int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
index 022055c8fb2649456b19197f8417f8011ee2dc16..9553a73d2049e425bc72bf4fbb7c151ffc9bbe43 100644 (file)
@@ -49,6 +49,10 @@ enum mlx4_dev_event {
        MLX4_DEV_EVENT_SLAVE_SHUTDOWN,
 };
 
+enum {
+       MLX4_INTFF_BONDING      = 1 << 0
+};
+
 struct mlx4_interface {
        void *                  (*add)   (struct mlx4_dev *dev);
        void                    (*remove)(struct mlx4_dev *dev, void *context);
@@ -57,11 +61,26 @@ struct mlx4_interface {
        void *                  (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
        struct list_head        list;
        enum mlx4_protocol      protocol;
+       int                     flags;
 };
 
 int mlx4_register_interface(struct mlx4_interface *intf);
 void mlx4_unregister_interface(struct mlx4_interface *intf);
 
+int mlx4_bond(struct mlx4_dev *dev);
+int mlx4_unbond(struct mlx4_dev *dev);
+static inline int mlx4_is_bonded(struct mlx4_dev *dev)
+{
+       return !!(dev->flags & MLX4_FLAG_BONDED);
+}
+
+struct mlx4_port_map {
+       u8      port1;
+       u8      port2;
+};
+
+int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p);
+
 void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
 
 static inline u64 mlx4_mac_to_u64(u8 *addr)
index 467ccdf94c981f7a7f90b04b6d2855c4b4a56d18..2bbc62aa818a374d1c488f2eecf4232230bd3f4e 100644 (file)
@@ -96,6 +96,7 @@ enum {
        MLX4_QP_BIT_RRE                         = 1 << 15,
        MLX4_QP_BIT_RWE                         = 1 << 14,
        MLX4_QP_BIT_RAE                         = 1 << 13,
+       MLX4_QP_BIT_FPP                         = 1 <<  3,
        MLX4_QP_BIT_RIC                         = 1 <<  4,
 };
 
index 16251e96e6aa506f0f19a8b0f3f38d06c6a5aee1..ce784d5018e07dab38807313fa4666e6b7cc4223 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/netdev_features.h>
 #include <linux/neighbour.h>
 #include <uapi/linux/netdevice.h>
+#include <uapi/linux/if_bonding.h>
 
 struct netpoll_info;
 struct device;
@@ -2056,6 +2057,7 @@ struct pcpu_sw_netstats {
 #define NETDEV_RESEND_IGMP     0x0016
 #define NETDEV_PRECHANGEMTU    0x0017 /* notify before mtu change happened */
 #define NETDEV_CHANGEINFODATA  0x0018
+#define NETDEV_BONDING_INFO    0x0019
 
 int register_netdevice_notifier(struct notifier_block *nb);
 int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2318,6 +2320,21 @@ do {                                                                     \
                                           compute_pseudo(skb, proto)); \
 } while (0)
 
+static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+                                          int start, int offset)
+{
+       __wsum delta;
+
+       BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
+
+       delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset);
+
+       /* Adjust skb->csum since we changed the packet */
+       skb->csum = csum_add(skb->csum, delta);
+       NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+}
+
+
 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
                                  unsigned short type,
                                  const void *daddr, const void *saddr,
@@ -3479,6 +3496,19 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
                                    netdev_features_t features);
 
+struct netdev_bonding_info {
+       ifslave slave;
+       ifbond  master;
+};
+
+struct netdev_notifier_bonding_info {
+       struct netdev_notifier_info info; /* must be first */
+       struct netdev_bonding_info  bonding_info;
+};
+
+void netdev_bonding_info_change(struct net_device *dev,
+                               struct netdev_bonding_info *bonding_info);
+
 static inline
 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
 {
index e0337844358e164e189b58ae90c530f71963b6bf..58851275fed98c352fdd4995e95f1ebe806649e7 100644 (file)
@@ -18,6 +18,7 @@
 #ifndef _LINUX_RHASHTABLE_H
 #define _LINUX_RHASHTABLE_H
 
+#include <linux/compiler.h>
 #include <linux/list_nulls.h>
 #include <linux/workqueue.h>
 #include <linux/mutex.h>
@@ -111,6 +112,7 @@ struct rhashtable_params {
  * @p: Configuration parameters
  * @run_work: Deferred worker to expand/shrink asynchronously
  * @mutex: Mutex to protect current/future table swapping
+ * @walkers: List of active walkers
  * @being_destroyed: True if table is set up for destruction
  */
 struct rhashtable {
@@ -121,9 +123,36 @@ struct rhashtable {
        struct rhashtable_params        p;
        struct work_struct              run_work;
        struct mutex                    mutex;
+       struct list_head                walkers;
        bool                            being_destroyed;
 };
 
+/**
+ * struct rhashtable_walker - Hash table walker
+ * @list: List entry on list of walkers
+ * @resize: Resize event occured
+ */
+struct rhashtable_walker {
+       struct list_head list;
+       bool resize;
+};
+
+/**
+ * struct rhashtable_iter - Hash table iterator, fits into netlink cb
+ * @ht: Table to iterate through
+ * @p: Current pointer
+ * @walker: Associated rhashtable walker
+ * @slot: Current slot
+ * @skip: Number of entries to skip in slot
+ */
+struct rhashtable_iter {
+       struct rhashtable *ht;
+       struct rhash_head *p;
+       struct rhashtable_walker *walker;
+       unsigned int slot;
+       unsigned int skip;
+};
+
 static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
 {
        return NULLS_MARKER(ht->p.nulls_base + hash);
@@ -179,6 +208,12 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
                                      bool (*compare)(void *, void *),
                                      void *arg);
 
+int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
+void rhashtable_walk_exit(struct rhashtable_iter *iter);
+int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
+void *rhashtable_walk_next(struct rhashtable_iter *iter);
+void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
+
 void rhashtable_destroy(struct rhashtable *ht);
 
 #define rht_dereference(p, ht) \
index 85ab7d72b54c2f269812015b19544674bc6dcd72..111e665455c37c631b1ed8c3128cd05effab8498 100644 (file)
@@ -626,8 +626,11 @@ struct sk_buff {
        __u32                   hash;
        __be16                  vlan_proto;
        __u16                   vlan_tci;
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       unsigned int    napi_id;
+#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
+       union {
+               unsigned int    napi_id;
+               unsigned int    sender_cpu;
+       };
 #endif
 #ifdef CONFIG_NETWORK_SECMARK
        __u32                   secmark;
@@ -2484,19 +2487,18 @@ static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
 }
 
 static inline int skb_add_data(struct sk_buff *skb,
-                              char __user *from, int copy)
+                              struct iov_iter *from, int copy)
 {
        const int off = skb->len;
 
        if (skb->ip_summed == CHECKSUM_NONE) {
-               int err = 0;
-               __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
-                                                           copy, 0, &err);
-               if (!err) {
+               __wsum csum = 0;
+               if (csum_and_copy_from_iter(skb_put(skb, copy), copy,
+                                           &csum, from) == copy) {
                        skb->csum = csum_block_add(skb->csum, csum, off);
                        return 0;
                }
-       } else if (!copy_from_user(skb_put(skb, copy), from, copy))
+       } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy)
                return 0;
 
        __skb_trim(skb, off);
@@ -2693,8 +2695,7 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
 
 static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
 {
-       /* XXX: stripping const */
-       return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len);
+       return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
 }
 
 static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
@@ -3096,6 +3097,27 @@ do {                                                                     \
                                       compute_pseudo(skb, proto));     \
 } while (0)
 
+/* Update skbuf and packet to reflect the remote checksum offload operation.
+ * When called, ptr indicates the starting point for skb->csum when
+ * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
+ * here, skb_postpull_rcsum is done so skb->csum start is ptr.
+ */
+static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
+                                      int start, int offset)
+{
+       __wsum delta;
+
+        if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
+               __skb_checksum_complete(skb);
+               skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
+       }
+
+       delta = remcsum_adjust(ptr, skb->csum, start, offset);
+
+       /* Adjust skb->csum since we changed the packet */
+       skb->csum = csum_add(skb->csum, delta);
+}
+
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 void nf_conntrack_destroy(struct nf_conntrack *nfct);
 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
index 6e49a14365dc1bea4bc442097dcaebb7bb0c08a9..5c19cba34dce023a49c45d776751190834b88889 100644 (file)
@@ -318,13 +318,6 @@ struct ucred {
 /* IPX options */
 #define IPX_TYPE       1
 
-extern int csum_partial_copy_fromiovecend(unsigned char *kdata, 
-                                         struct iovec *iov, 
-                                         int offset, 
-                                         unsigned int len, __wsum *csump);
-extern unsigned long iov_pages(const struct iovec *iov, int offset,
-                              unsigned long nr_segs);
-
 extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
 extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
 
index 1c5e453f7ea997364a4d26852a2b532ec3b8acd5..3e0cb4ea3905905cd3c6c22082754818f5f7798d 100644 (file)
@@ -135,10 +135,4 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
 size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
 
-int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
-int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
-                       int offset, int len);
-int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
-                     int offset, int len);
-
 #endif
index 5691f752ce8f0b6fdd83f43502621f0ca06eebca..63df3a2a8ce54aa184d8a7806f62d9e9cf121050 100644 (file)
@@ -74,7 +74,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
 ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size,
                        int mode);
 ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
-                         void *iov, size_t iov_size, int mode);
+                         struct msghdr *msg, size_t iov_size, int mode);
 ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
                          struct msghdr *msg, size_t iov_size, int mode);
 ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size,
index 7777124bff5586af30158ef3386278191fbdfb54..52863c3e0b132bc59224feef3f9e7acd678ef45a 100644 (file)
@@ -79,6 +79,8 @@ struct discovery_state {
        s8                      rssi;
        u16                     uuid_count;
        u8                      (*uuids)[16];
+       unsigned long           scan_start;
+       unsigned long           scan_duration;
 };
 
 struct hci_conn_hash {
@@ -145,6 +147,7 @@ struct oob_data {
        struct list_head list;
        bdaddr_t bdaddr;
        u8 bdaddr_type;
+       u8 present;
        u8 hash192[16];
        u8 rand192[16];
        u8 hash256[16];
@@ -232,6 +235,7 @@ struct hci_dev {
        __u16           conn_info_min_age;
        __u16           conn_info_max_age;
        __u8            ssp_debug_mode;
+       __u8            hw_error_code;
        __u32           clock;
 
        __u16           devid_source;
@@ -293,6 +297,7 @@ struct hci_dev {
 
        struct work_struct      power_on;
        struct delayed_work     power_off;
+       struct work_struct      error_reset;
 
        __u16                   discov_timeout;
        struct delayed_work     discov_off;
@@ -351,6 +356,7 @@ struct hci_dev {
        unsigned long           dev_flags;
 
        struct delayed_work     le_scan_disable;
+       struct delayed_work     le_scan_restart;
 
        __s8                    adv_tx_power;
        __u8                    adv_data[HCI_MAX_AD_LENGTH];
@@ -369,6 +375,7 @@ struct hci_dev {
        int (*setup)(struct hci_dev *hdev);
        int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
        void (*notify)(struct hci_dev *hdev, unsigned int evt);
+       void (*hw_error)(struct hci_dev *hdev, u8 code);
        int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
 };
 
@@ -527,6 +534,8 @@ static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
        hdev->discovery.uuid_count = 0;
        kfree(hdev->discovery.uuids);
        hdev->discovery.uuids = NULL;
+       hdev->discovery.scan_start = 0;
+       hdev->discovery.scan_duration = 0;
 }
 
 bool hci_discovery_active(struct hci_dev *hdev);
@@ -1325,6 +1334,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
 #define DISCOV_INTERLEAVED_TIMEOUT     5120    /* msec */
 #define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04
 #define DISCOV_BREDR_INQUIRY_LEN       0x08
+#define DISCOV_LE_RESTART_DELAY                msecs_to_jiffies(200)   /* msec */
 
 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
 int mgmt_new_settings(struct hci_dev *hdev);
@@ -1369,7 +1379,6 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
 void mgmt_auth_failed(struct hci_conn *conn, u8 status);
 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
-void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
                                    u8 status);
 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
index 95c34d5180fa1ec656bf996ed034817674b34ecf..e218a30f206134776ab2eed1a19a98c0d31ef395 100644 (file)
@@ -301,10 +301,6 @@ struct mgmt_cp_user_passkey_neg_reply {
 #define MGMT_OP_READ_LOCAL_OOB_DATA    0x0020
 #define MGMT_READ_LOCAL_OOB_DATA_SIZE  0
 struct mgmt_rp_read_local_oob_data {
-       __u8    hash[16];
-       __u8    rand[16];
-} __packed;
-struct mgmt_rp_read_local_oob_ext_data {
        __u8    hash192[16];
        __u8    rand192[16];
        __u8    hash256[16];
index 29f53eacac0a4c1101521c547a4c9ad5b6fd53b7..4e17095ad46a1fb196ac3724e2e0c33603e4b3fa 100644 (file)
@@ -150,6 +150,12 @@ struct bond_parm_tbl {
        int mode;
 };
 
+struct netdev_notify_work {
+       struct delayed_work     work;
+       struct slave            *slave;
+       struct net_device       *dev;
+};
+
 struct slave {
        struct net_device *dev; /* first - useful for panic debug */
        struct bonding *bond; /* our master */
@@ -243,6 +249,8 @@ struct bonding {
 #define bond_slave_get_rtnl(dev) \
        ((struct slave *) rtnl_dereference(dev->rx_handler_data))
 
+void bond_queue_slave_event(struct slave *slave);
+
 struct bond_vlan_tag {
        __be16          vlan_proto;
        unsigned short  vlan_id;
@@ -315,6 +323,7 @@ static inline void bond_set_active_slave(struct slave *slave)
 {
        if (slave->backup) {
                slave->backup = 0;
+               bond_queue_slave_event(slave);
                rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
        }
 }
@@ -323,6 +332,7 @@ static inline void bond_set_backup_slave(struct slave *slave)
 {
        if (!slave->backup) {
                slave->backup = 1;
+               bond_queue_slave_event(slave);
                rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
        }
 }
@@ -336,6 +346,7 @@ static inline void bond_set_slave_state(struct slave *slave,
        slave->backup = slave_state;
        if (notify) {
                rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
+               bond_queue_slave_event(slave);
                slave->should_notify = 0;
        } else {
                if (slave->should_notify)
@@ -490,6 +501,12 @@ static inline bool bond_is_slave_inactive(struct slave *slave)
        return slave->inactive;
 }
 
+static inline void bond_set_slave_link_state(struct slave *slave, int state)
+{
+       slave->link = state;
+       bond_queue_slave_event(slave);
+}
+
 static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be32 local)
 {
        struct in_device *in_dev;
index 7b44ba0a76328eed71693790923f8ebf8f516c4c..64e09e1e809960be7daaef505e2e39dbab01c238 100644 (file)
@@ -1493,6 +1493,10 @@ struct cfg80211_match_set {
  * @rcu_head: RCU callback used to free the struct
  * @owner_nlportid: netlink portid of owner (if this should is a request
  *     owned by a particular socket)
+ * @delay: delay in seconds to use before starting the first scan
+ *     cycle.  The driver may ignore this parameter and start
+ *     immediately (or at any other time), if this feature is not
+ *     supported.
  */
 struct cfg80211_sched_scan_request {
        struct cfg80211_ssid *ssids;
@@ -1506,6 +1510,7 @@ struct cfg80211_sched_scan_request {
        struct cfg80211_match_set *match_sets;
        int n_match_sets;
        s32 min_rssi_thold;
+       u32 delay;
 
        u8 mac_addr[ETH_ALEN] __aligned(2);
        u8 mac_addr_mask[ETH_ALEN] __aligned(2);
index 275ee56152ade074418d5485fdc74ab38db8d8d3..d52914b75331a7bf2a384e10d9ec4a4ce427260a 100644 (file)
@@ -376,6 +376,12 @@ enum ieee80211_rssi_event {
  * @ssid_len: Length of SSID given in @ssid.
  * @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode.
  * @txpower: TX power in dBm
+ * @txpower_type: TX power adjustment used to control per packet Transmit
+ *     Power Control (TPC) in lower driver for the current vif. In particular
+ *     TPC is enabled if value passed in %txpower_type is
+ *     NL80211_TX_POWER_LIMITED (allow using less than specified from
+ *     userspace), whereas TPC is disabled if %txpower_type is set to
+ *     NL80211_TX_POWER_FIXED (use value configured from userspace)
  * @p2p_noa_attr: P2P NoA attribute for P2P powersave
  */
 struct ieee80211_bss_conf {
@@ -411,6 +417,7 @@ struct ieee80211_bss_conf {
        size_t ssid_len;
        bool hidden_ssid;
        int txpower;
+       enum nl80211_tx_power_setting txpower_type;
        struct ieee80211_p2p_noa_attr p2p_noa_attr;
 };
 
@@ -1287,8 +1294,8 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
  * @IEEE80211_KEY_FLAG_PAIRWISE: Set by mac80211, this flag indicates
  *     that the key is pairwise rather then a shared key.
  * @IEEE80211_KEY_FLAG_SW_MGMT_TX: This flag should be set by the driver for a
- *     CCMP key if it requires CCMP encryption of management frames (MFP) to
- *     be done in software.
+ *     CCMP/GCMP key if it requires CCMP/GCMP encryption of management frames
+ *     (MFP) to be done in software.
  * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
  *     if space should be prepared for the IV, but the IV
  *     itself should not be generated. Do not set together with
@@ -1303,7 +1310,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
  *     RX, if your crypto engine can't deal with TX you can also set the
  *     %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW.
  * @IEEE80211_KEY_FLAG_GENERATE_IV_MGMT: This flag should be set by the
- *     driver for a CCMP key to indicate that is requires IV generation
+ *     driver for a CCMP/GCMP key to indicate that is requires IV generation
  *     only for managment frames (MFP).
  * @IEEE80211_KEY_FLAG_RESERVE_TAILROOM: This flag should be set by the
  *     driver for a key to indicate that sufficient tailroom must always
@@ -1634,6 +1641,12 @@ struct ieee80211_tx_control {
  *     be created.  It is expected user-space will create vifs as
  *     desired (and thus have them named as desired).
  *
+ * @IEEE80211_HW_SW_CRYPTO_CONTROL: The driver wants to control which of the
+ *     crypto algorithms can be done in software - so don't automatically
+ *     try to fall back to it if hardware crypto fails, but do so only if
+ *     the driver returns 1. This also forces the driver to advertise its
+ *     supported cipher suites.
+ *
  * @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface
  *     queue mapping in order to use different queues (not just one per AC)
  *     for different virtual interfaces. See the doc section on HW queue
@@ -1681,6 +1694,7 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_MFP_CAPABLE                        = 1<<13,
        IEEE80211_HW_WANT_MONITOR_VIF                   = 1<<14,
        IEEE80211_HW_NO_AUTO_VIF                        = 1<<15,
+       IEEE80211_HW_SW_CRYPTO_CONTROL                  = 1<<16,
        /* free slots */
        IEEE80211_HW_REPORTS_TX_ACK_STATUS              = 1<<18,
        IEEE80211_HW_CONNECTION_MONITOR                 = 1<<19,
@@ -1955,6 +1969,11 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
  * added; if you return 0 then hw_key_idx must be assigned to the
  * hardware key index, you are free to use the full u8 range.
  *
+ * Note that in the case that the @IEEE80211_HW_SW_CRYPTO_CONTROL flag is
+ * set, mac80211 will not automatically fall back to software crypto if
+ * enabling hardware crypto failed. The set_key() call may also return the
+ * value 1 to permit this specific key/algorithm to be done in software.
+ *
  * When the cmd is %DISABLE_KEY then it must succeed.
  *
  * Note that it is permissible to not decrypt a frame even if a key
@@ -4079,6 +4098,10 @@ void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
  *     reverse order than in packet)
  * @aes_cmac: PN data, most significant byte first (big endian,
  *     reverse order than in packet)
+ * @aes_gmac: PN data, most significant byte first (big endian,
+ *     reverse order than in packet)
+ * @gcmp: PN data, most significant byte first (big endian,
+ *     reverse order than in packet)
  */
 struct ieee80211_key_seq {
        union {
@@ -4092,6 +4115,12 @@ struct ieee80211_key_seq {
                struct {
                        u8 pn[6];
                } aes_cmac;
+               struct {
+                       u8 pn[6];
+               } aes_gmac;
+               struct {
+                       u8 pn[6];
+               } gcmp;
        };
 };
 
@@ -4116,7 +4145,7 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
  * ieee80211_get_key_rx_seq - get key RX sequence counter
  *
  * @keyconf: the parameter passed with the set key
- * @tid: The TID, or -1 for the management frame value (CCMP only);
+ * @tid: The TID, or -1 for the management frame value (CCMP/GCMP only);
  *     the value on TID 0 is also used for non-QoS frames. For
  *     CMAC, only TID 0 is valid.
  * @seq: buffer to receive the sequence data
@@ -4152,7 +4181,7 @@ void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
  * ieee80211_set_key_rx_seq - set key RX sequence counter
  *
  * @keyconf: the parameter passed with the set key
- * @tid: The TID, or -1 for the management frame value (CCMP only);
+ * @tid: The TID, or -1 for the management frame value (CCMP/GCMP only);
  *     the value on TID 0 is also used for non-QoS frames. For
  *     CMAC, only TID 0 is valid.
  * @seq: new sequence data
index f074060bc5de763a3488054db67a67e7a0a9ab00..cc16d413f681c077f743547af38ab8d3293a0007 100644 (file)
@@ -59,7 +59,7 @@ extern struct pingv6_ops pingv6_ops;
 
 struct pingfakehdr {
        struct icmphdr icmph;
-       struct iovec *iov;
+       struct msghdr *msg;
        sa_family_t family;
        __wsum wcheck;
 };
index 511ef7c8889b100e36c26d361c00e72f8b10e972..d28b8fededd60946c65dfcfaefb1388442e1e8d9 100644 (file)
@@ -1803,27 +1803,25 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
 }
 
 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
-                                          char __user *from, char *to,
+                                          struct iov_iter *from, char *to,
                                           int copy, int offset)
 {
        if (skb->ip_summed == CHECKSUM_NONE) {
-               int err = 0;
-               __wsum csum = csum_and_copy_from_user(from, to, copy, 0, &err);
-               if (err)
-                       return err;
+               __wsum csum = 0;
+               if (csum_and_copy_from_iter(to, copy, &csum, from) != copy)
+                       return -EFAULT;
                skb->csum = csum_block_add(skb->csum, csum, offset);
        } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
-               if (!access_ok(VERIFY_READ, from, copy) ||
-                   __copy_from_user_nocache(to, from, copy))
+               if (copy_from_iter_nocache(to, copy, from) != copy)
                        return -EFAULT;
-       } else if (copy_from_user(to, from, copy))
+       } else if (copy_from_iter(to, copy, from) != copy)
                return -EFAULT;
 
        return 0;
 }
 
 static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
-                                      char __user *from, int copy)
+                                      struct iov_iter *from, int copy)
 {
        int err, offset = skb->len;
 
@@ -1835,7 +1833,7 @@ static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
        return err;
 }
 
-static inline int skb_copy_to_page_nocache(struct sock *sk, char __user *from,
+static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
                                           struct sk_buff *skb,
                                           struct page *page,
                                           int off, int copy)
index b8fdc6bab3f3ac9fe8d3992dc6105f9b857612b8..637ee490ec81605054777e559c93c3977ff6a230 100644 (file)
@@ -1713,4 +1713,19 @@ static inline struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
        return dopt;
 }
 
+/* locally generated TCP pure ACKs have skb->truesize == 2
+ * (check tcp_send_ack() in net/ipv4/tcp_output.c )
+ * This is much faster than dissecting the packet to find out.
+ * (Think of GRE encapsulations, IPv4, IPv6, ...)
+ */
+static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
+{
+       return skb->truesize == 2;
+}
+
+static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
+{
+       skb->truesize = 2;
+}
+
 #endif /* _TCP_H */
index ae7c8d1fbcadbccd635edc56a54dba6d94563170..80761938b9a78081822a4b82b4bd3fb30b5f6625 100644 (file)
@@ -20,8 +20,7 @@ static __inline__ int udplite_getfrag(void *from, char *to, int  offset,
                                      int len, int odd, struct sk_buff *skb)
 {
        struct msghdr *msg = from;
-       /* XXX: stripping const */
-       return memcpy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len);
+       return copy_from_iter(to, len, &msg->msg_iter) != len ? -EFAULT : 0;
 }
 
 /* Designate sk as UDP-Lite socket */
index f52797a90816e022f752dd5ccf7465e43cfeb41b..68b294e839447ab5a4bfeeaf0b2bebf6fe2ba427 100644 (file)
  *     %NL80211_ATTR_WIPHY and %NL80211_ATTR_WIPHY_NAME.
  *
  * @NL80211_CMD_GET_INTERFACE: Request an interface's configuration;
- *     either a dump request on a %NL80211_ATTR_WIPHY or a specific get
- *     on an %NL80211_ATTR_IFINDEX is supported.
+ *     either a dump request for all interfaces or a specific get with a
+ *     single %NL80211_ATTR_IFINDEX is supported.
  * @NL80211_CMD_SET_INTERFACE: Set type of a virtual interface, requires
  *     %NL80211_ATTR_IFINDEX and %NL80211_ATTR_IFTYPE.
  * @NL80211_CMD_NEW_INTERFACE: Newly created virtual interface or response
  *     if passed, define which channels should be scanned; if not
  *     passed, all channels allowed for the current regulatory domain
  *     are used.  Extra IEs can also be passed from the userspace by
- *     using the %NL80211_ATTR_IE attribute.
+ *     using the %NL80211_ATTR_IE attribute.  The first cycle of the
+ *     scheduled scan can be delayed by %NL80211_ATTR_SCHED_SCAN_DELAY
+ *     is supplied.
  * @NL80211_CMD_STOP_SCHED_SCAN: stop a scheduled scan. Returns -ENOENT if
  *     scheduled scan is not running. The caller may assume that as soon
  *     as the call returns, it is safe to start a new scheduled scan again.
@@ -1735,6 +1737,9 @@ enum nl80211_commands {
  *     should be contained in the result as the sum of the respective counters
  *     over all channels.
  *
+ * @NL80211_ATTR_SCHED_SCAN_DELAY: delay before a scheduled scan (or a
+ *     WoWLAN net-detect scan) is started, u32 in seconds.
+ *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2098,6 +2103,10 @@ enum nl80211_attrs {
 
        NL80211_ATTR_SURVEY_RADIO_STATS,
 
+       NL80211_ATTR_NETNS_FD,
+
+       NL80211_ATTR_SCHED_SCAN_DELAY,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -3741,11 +3750,12 @@ struct nl80211_pattern_support {
  * @NL80211_WOWLAN_TRIG_NET_DETECT: wake up when a configured network
  *     is detected.  This is a nested attribute that contains the
  *     same attributes used with @NL80211_CMD_START_SCHED_SCAN.  It
- *     specifies how the scan is performed (e.g. the interval and the
- *     channels to scan) as well as the scan results that will
- *     trigger a wake (i.e. the matchsets).  This attribute is also
- *     sent in a response to @NL80211_CMD_GET_WIPHY, indicating the
- *     number of match sets supported by the driver (u32).
+ *     specifies how the scan is performed (e.g. the interval, the
+ *     channels to scan and the initial delay) as well as the scan
+ *     results that will trigger a wake (i.e. the matchsets).  This
+ *     attribute is also sent in a response to
+ *     @NL80211_CMD_GET_WIPHY, indicating the number of match sets
+ *     supported by the driver (u32).
  * @NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS: nested attribute
  *     containing an array with information about what triggered the
  *     wake up.  If no elements are present in the array, it means
index d62316baae942c43b2558ed2768c88950516126c..534b847107453019d362e9f9f9c0969fc3100c8b 100644 (file)
@@ -774,6 +774,8 @@ enum {
 
        TCA_FQ_FLOW_REFILL_DELAY,       /* flow credit refill delay in usec */
 
+       TCA_FQ_ORPHAN_MASK,     /* mask applied to orphaned skb hashes */
+
        __TCA_FQ_MAX
 };
 
index a8cf98d14199a5695e04b97306396df16da0c272..7db78934ec07c25cae7d62db863d227da41cd546 100644 (file)
@@ -24,7 +24,7 @@ obj-y += lockref.o
 
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
         bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
-        gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
+        gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \
         bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
         percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o
 obj-y += string_helpers.o
diff --git a/lib/iovec.c b/lib/iovec.c
deleted file mode 100644 (file)
index 2d99cb4..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-#include <linux/uaccess.h>
-#include <linux/export.h>
-#include <linux/uio.h>
-
-/*
- *     Copy iovec to kernel. Returns -EFAULT on error.
- *
- *     Note: this modifies the original iovec.
- */
-
-int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
-{
-       while (len > 0) {
-               if (iov->iov_len) {
-                       int copy = min_t(unsigned int, len, iov->iov_len);
-                       if (copy_from_user(kdata, iov->iov_base, copy))
-                               return -EFAULT;
-                       len -= copy;
-                       kdata += copy;
-                       iov->iov_base += copy;
-                       iov->iov_len -= copy;
-               }
-               iov++;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(memcpy_fromiovec);
-
-/*
- *     Copy kernel to iovec. Returns -EFAULT on error.
- */
-
-int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
-                     int offset, int len)
-{
-       int copy;
-       for (; len > 0; ++iov) {
-               /* Skip over the finished iovecs */
-               if (unlikely(offset >= iov->iov_len)) {
-                       offset -= iov->iov_len;
-                       continue;
-               }
-               copy = min_t(unsigned int, iov->iov_len - offset, len);
-               if (copy_to_user(iov->iov_base + offset, kdata, copy))
-                       return -EFAULT;
-               offset = 0;
-               kdata += copy;
-               len -= copy;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(memcpy_toiovecend);
-
-/*
- *     Copy iovec to kernel. Returns -EFAULT on error.
- */
-
-int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
-                       int offset, int len)
-{
-       /* No data? Done! */
-       if (len == 0)
-               return 0;
-
-       /* Skip over the finished iovecs */
-       while (offset >= iov->iov_len) {
-               offset -= iov->iov_len;
-               iov++;
-       }
-
-       while (len > 0) {
-               u8 __user *base = iov->iov_base + offset;
-               int copy = min_t(unsigned int, len, iov->iov_len - offset);
-
-               offset = 0;
-               if (copy_from_user(kdata, base, copy))
-                       return -EFAULT;
-               len -= copy;
-               kdata += copy;
-               iov++;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(memcpy_fromiovecend);
index c41e21096373156ab78cb5279280234e2dcb656d..057919164e23d7ecc3f6e4258e541eb064a2dd73 100644 (file)
@@ -484,16 +484,24 @@ static void rht_deferred_worker(struct work_struct *work)
 {
        struct rhashtable *ht;
        struct bucket_table *tbl;
+       struct rhashtable_walker *walker;
 
        ht = container_of(work, struct rhashtable, run_work);
        mutex_lock(&ht->mutex);
+       if (ht->being_destroyed)
+               goto unlock;
+
        tbl = rht_dereference(ht->tbl, ht);
 
+       list_for_each_entry(walker, &ht->walkers, list)
+               walker->resize = true;
+
        if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
                rhashtable_expand(ht);
        else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
                rhashtable_shrink(ht);
 
+unlock:
        mutex_unlock(&ht->mutex);
 }
 
@@ -818,6 +826,164 @@ exit:
 }
 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
 
+/**
+ * rhashtable_walk_init - Initialise an iterator
+ * @ht:                Table to walk over
+ * @iter:      Hash table Iterator
+ *
+ * This function prepares a hash table walk.
+ *
+ * Note that if you restart a walk after rhashtable_walk_stop you
+ * may see the same object twice.  Also, you may miss objects if
+ * there are removals in between rhashtable_walk_stop and the next
+ * call to rhashtable_walk_start.
+ *
+ * For a completely stable walk you should construct your own data
+ * structure outside the hash table.
+ *
+ * This function may sleep so you must not call it from interrupt
+ * context or with spin locks held.
+ *
+ * You must call rhashtable_walk_exit if this function returns
+ * successfully.
+ */
+int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
+{
+       iter->ht = ht;
+       iter->p = NULL;
+       iter->slot = 0;
+       iter->skip = 0;
+
+       iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
+       if (!iter->walker)
+               return -ENOMEM;
+
+       mutex_lock(&ht->mutex);
+       list_add(&iter->walker->list, &ht->walkers);
+       mutex_unlock(&ht->mutex);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_init);
+
+/**
+ * rhashtable_walk_exit - Free an iterator
+ * @iter:      Hash table Iterator
+ *
+ * This function frees resources allocated by rhashtable_walk_init.
+ */
+void rhashtable_walk_exit(struct rhashtable_iter *iter)
+{
+       mutex_lock(&iter->ht->mutex);
+       list_del(&iter->walker->list);
+       mutex_unlock(&iter->ht->mutex);
+       kfree(iter->walker);
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
+
+/**
+ * rhashtable_walk_start - Start a hash table walk
+ * @iter:      Hash table iterator
+ *
+ * Start a hash table walk.  Note that we take the RCU lock in all
+ * cases including when we return an error.  So you must always call
+ * rhashtable_walk_stop to clean up.
+ *
+ * Returns zero if successful.
+ *
+ * Returns -EAGAIN if resize event occured.  Note that the iterator
+ * will rewind back to the beginning and you may use it immediately
+ * by calling rhashtable_walk_next.
+ */
+int rhashtable_walk_start(struct rhashtable_iter *iter)
+{
+       rcu_read_lock();
+
+       if (iter->walker->resize) {
+               iter->slot = 0;
+               iter->skip = 0;
+               iter->walker->resize = false;
+               return -EAGAIN;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_start);
+
+/**
+ * rhashtable_walk_next - Return the next object and advance the iterator
+ * @iter:      Hash table iterator
+ *
+ * Note that you must call rhashtable_walk_stop when you are finished
+ * with the walk.
+ *
+ * Returns the next object or NULL when the end of the table is reached.
+ *
+ * Returns -EAGAIN if resize event occured.  Note that the iterator
+ * will rewind back to the beginning and you may continue to use it.
+ */
+void *rhashtable_walk_next(struct rhashtable_iter *iter)
+{
+       const struct bucket_table *tbl;
+       struct rhashtable *ht = iter->ht;
+       struct rhash_head *p = iter->p;
+       void *obj = NULL;
+
+       tbl = rht_dereference_rcu(ht->tbl, ht);
+
+       if (p) {
+               p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
+               goto next;
+       }
+
+       for (; iter->slot < tbl->size; iter->slot++) {
+               int skip = iter->skip;
+
+               rht_for_each_rcu(p, tbl, iter->slot) {
+                       if (!skip)
+                               break;
+                       skip--;
+               }
+
+next:
+               if (!rht_is_a_nulls(p)) {
+                       iter->skip++;
+                       iter->p = p;
+                       obj = rht_obj(ht, p);
+                       goto out;
+               }
+
+               iter->skip = 0;
+       }
+
+       iter->p = NULL;
+
+out:
+       if (iter->walker->resize) {
+               iter->p = NULL;
+               iter->slot = 0;
+               iter->skip = 0;
+               iter->walker->resize = false;
+               return ERR_PTR(-EAGAIN);
+       }
+
+       return obj;
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_next);
+
+/**
+ * rhashtable_walk_stop - Finish a hash table walk
+ * @iter:      Hash table iterator
+ *
+ * Finish a hash table walk.
+ */
+void rhashtable_walk_stop(struct rhashtable_iter *iter)
+{
+       rcu_read_unlock();
+       iter->p = NULL;
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
+
 static size_t rounded_hashtable_size(struct rhashtable_params *params)
 {
        return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
@@ -890,6 +1056,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
        memset(ht, 0, sizeof(*ht));
        mutex_init(&ht->mutex);
        memcpy(&ht->p, params, sizeof(*params));
+       INIT_LIST_HEAD(&ht->walkers);
 
        if (params->locks_mul)
                ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
index ce82722d049b7c013fd06f66f726fbbf75a01f56..05f57e491ccbd614a1d306c49df891e4a2ec00c6 100644 (file)
@@ -511,13 +511,12 @@ static int bnep_session(void *arg)
 
 static struct device *bnep_get_device(struct bnep_session *session)
 {
-       struct hci_conn *conn;
+       struct l2cap_conn *conn = l2cap_pi(session->sock->sk)->chan->conn;
 
-       conn = l2cap_pi(session->sock->sk)->chan->conn->hcon;
-       if (!conn)
+       if (!conn || !conn->hcon)
                return NULL;
 
-       return &conn->dev;
+       return &conn->hcon->dev;
 }
 
 static struct device_type bnep_type = {
index 34c17a0645ce874c350328adb7c478d47ae7f98e..3322d3f4c85a25eb4bed8dbfaa1802b907b3f1d3 100644 (file)
@@ -609,6 +609,7 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
 
                if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
                        u8 mode = 0x01;
+
                        hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
                                    sizeof(mode), &mode);
                } else {
@@ -870,8 +871,10 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
                hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
 
        /* Enable Secure Connections if supported and configured */
-       if (bredr_sc_enabled(hdev)) {
+       if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
+           bredr_sc_enabled(hdev)) {
                u8 support = 0x01;
+
                hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
                            sizeof(support), &support);
        }
@@ -1614,6 +1617,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
                cancel_delayed_work(&hdev->service_cache);
 
        cancel_delayed_work_sync(&hdev->le_scan_disable);
+       cancel_delayed_work_sync(&hdev->le_scan_restart);
 
        if (test_bit(HCI_MGMT, &hdev->dev_flags))
                cancel_delayed_work_sync(&hdev->rpa_expired);
@@ -1625,6 +1629,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
 
        hci_dev_lock(hdev);
 
+       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+
        if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
                if (hdev->dev_type == HCI_BREDR)
                        mgmt_powered(hdev, 0);
@@ -1635,6 +1641,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
        hci_conn_hash_flush(hdev);
        hci_dev_unlock(hdev);
 
+       smp_unregister(hdev);
+
        hci_notify(hdev, HCI_DEV_DOWN);
 
        if (hdev->flush)
@@ -1714,32 +1722,14 @@ done:
        return err;
 }
 
-int hci_dev_reset(__u16 dev)
+static int hci_dev_do_reset(struct hci_dev *hdev)
 {
-       struct hci_dev *hdev;
-       int ret = 0;
+       int ret;
 
-       hdev = hci_dev_get(dev);
-       if (!hdev)
-               return -ENODEV;
+       BT_DBG("%s %p", hdev->name, hdev);
 
        hci_req_lock(hdev);
 
-       if (!test_bit(HCI_UP, &hdev->flags)) {
-               ret = -ENETDOWN;
-               goto done;
-       }
-
-       if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
-               ret = -EBUSY;
-               goto done;
-       }
-
-       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
-               ret = -EOPNOTSUPP;
-               goto done;
-       }
-
        /* Drop queues */
        skb_queue_purge(&hdev->rx_q);
        skb_queue_purge(&hdev->cmd_q);
@@ -1762,12 +1752,41 @@ int hci_dev_reset(__u16 dev)
 
        ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
 
-done:
        hci_req_unlock(hdev);
-       hci_dev_put(hdev);
        return ret;
 }
 
+int hci_dev_reset(__u16 dev)
+{
+       struct hci_dev *hdev;
+       int err;
+
+       hdev = hci_dev_get(dev);
+       if (!hdev)
+               return -ENODEV;
+
+       if (!test_bit(HCI_UP, &hdev->flags)) {
+               err = -ENETDOWN;
+               goto done;
+       }
+
+       if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+               err = -EBUSY;
+               goto done;
+       }
+
+       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+               err = -EOPNOTSUPP;
+               goto done;
+       }
+
+       err = hci_dev_do_reset(hdev);
+
+done:
+       hci_dev_put(hdev);
+       return err;
+}
+
 int hci_dev_reset_stat(__u16 dev)
 {
        struct hci_dev *hdev;
@@ -2131,8 +2150,24 @@ static void hci_power_off(struct work_struct *work)
        BT_DBG("%s", hdev->name);
 
        hci_dev_do_close(hdev);
+}
 
-       smp_unregister(hdev);
+static void hci_error_reset(struct work_struct *work)
+{
+       struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
+
+       BT_DBG("%s", hdev->name);
+
+       if (hdev->hw_error)
+               hdev->hw_error(hdev, hdev->hw_error_code);
+       else
+               BT_ERR("%s hardware error 0x%2.2x", hdev->name,
+                      hdev->hw_error_code);
+
+       if (hci_dev_do_close(hdev))
+               return;
+
+       hci_dev_do_open(hdev);
 }
 
 static void hci_discov_off(struct work_struct *work)
@@ -2547,9 +2582,15 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
        if (hash192 && rand192) {
                memcpy(data->hash192, hash192, sizeof(data->hash192));
                memcpy(data->rand192, rand192, sizeof(data->rand192));
+               if (hash256 && rand256)
+                       data->present = 0x03;
        } else {
                memset(data->hash192, 0, sizeof(data->hash192));
                memset(data->rand192, 0, sizeof(data->rand192));
+               if (hash256 && rand256)
+                       data->present = 0x02;
+               else
+                       data->present = 0x00;
        }
 
        if (hash256 && rand256) {
@@ -2558,6 +2599,8 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
        } else {
                memset(data->hash256, 0, sizeof(data->hash256));
                memset(data->rand256, 0, sizeof(data->rand256));
+               if (hash192 && rand192)
+                       data->present = 0x01;
        }
 
        BT_DBG("%s for %pMR", hdev->name, bdaddr);
@@ -2788,6 +2831,8 @@ static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
                return;
        }
 
+       hdev->discovery.scan_start = 0;
+
        switch (hdev->discovery.type) {
        case DISCOV_TYPE_LE:
                hci_dev_lock(hdev);
@@ -2827,6 +2872,8 @@ static void le_scan_disable_work(struct work_struct *work)
 
        BT_DBG("%s", hdev->name);
 
+       cancel_delayed_work_sync(&hdev->le_scan_restart);
+
        hci_req_init(&req, hdev);
 
        hci_req_add_le_scan_disable(&req);
@@ -2836,6 +2883,74 @@ static void le_scan_disable_work(struct work_struct *work)
                BT_ERR("Disable LE scanning request failed: err %d", err);
 }
 
+static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
+                                         u16 opcode)
+{
+       unsigned long timeout, duration, scan_start, now;
+
+       BT_DBG("%s", hdev->name);
+
+       if (status) {
+               BT_ERR("Failed to restart LE scan: status %d", status);
+               return;
+       }
+
+       if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
+           !hdev->discovery.scan_start)
+               return;
+
+       /* When the scan was started, hdev->le_scan_disable has been queued
+        * after duration from scan_start. During scan restart this job
+        * has been canceled, and we need to queue it again after proper
+        * timeout, to make sure that scan does not run indefinitely.
+        */
+       duration = hdev->discovery.scan_duration;
+       scan_start = hdev->discovery.scan_start;
+       now = jiffies;
+       if (now - scan_start <= duration) {
+               int elapsed;
+
+               if (now >= scan_start)
+                       elapsed = now - scan_start;
+               else
+                       elapsed = ULONG_MAX - scan_start + now;
+
+               timeout = duration - elapsed;
+       } else {
+               timeout = 0;
+       }
+       queue_delayed_work(hdev->workqueue,
+                          &hdev->le_scan_disable, timeout);
+}
+
+static void le_scan_restart_work(struct work_struct *work)
+{
+       struct hci_dev *hdev = container_of(work, struct hci_dev,
+                                           le_scan_restart.work);
+       struct hci_request req;
+       struct hci_cp_le_set_scan_enable cp;
+       int err;
+
+       BT_DBG("%s", hdev->name);
+
+       /* If controller is not scanning we are done. */
+       if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+               return;
+
+       hci_req_init(&req, hdev);
+
+       hci_req_add_le_scan_disable(&req);
+
+       memset(&cp, 0, sizeof(cp));
+       cp.enable = LE_SCAN_ENABLE;
+       cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+       hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+
+       err = hci_req_run(&req, le_scan_restart_work_complete);
+       if (err)
+               BT_ERR("Restart LE scan request failed: err %d", err);
+}
+
 /* Copy the Identity Address of the controller.
  *
  * If the controller has a public BD_ADDR, then by default use that one.
@@ -2927,10 +3042,12 @@ struct hci_dev *hci_alloc_dev(void)
        INIT_WORK(&hdev->cmd_work, hci_cmd_work);
        INIT_WORK(&hdev->tx_work, hci_tx_work);
        INIT_WORK(&hdev->power_on, hci_power_on);
+       INIT_WORK(&hdev->error_reset, hci_error_reset);
 
        INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
        INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
        INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
+       INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
 
        skb_queue_head_init(&hdev->rx_q);
        skb_queue_head_init(&hdev->cmd_q);
@@ -3100,8 +3217,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
                rfkill_destroy(hdev->rfkill);
        }
 
-       smp_unregister(hdev);
-
        device_del(&hdev->dev);
 
        debugfs_remove_recursive(hdev->debugfs);
index ead89a5ad9ced2dd2b8d79a8ffd0ab38f38ed127..65261e5d4b84bbbdc1da1d11083b9f32b6233d7a 100644 (file)
@@ -156,6 +156,35 @@ static const struct file_operations uuids_fops = {
        .release        = single_release,
 };
 
+static int remote_oob_show(struct seq_file *f, void *ptr)
+{
+       struct hci_dev *hdev = f->private;
+       struct oob_data *data;
+
+       hci_dev_lock(hdev);
+       list_for_each_entry(data, &hdev->remote_oob_data, list) {
+               seq_printf(f, "%pMR (type %u) %u %*phN %*phN %*phN %*phN\n",
+                          &data->bdaddr, data->bdaddr_type, data->present,
+                          16, data->hash192, 16, data->rand192,
+                          16, data->hash256, 19, data->rand256);
+       }
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int remote_oob_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, remote_oob_show, inode->i_private);
+}
+
+static const struct file_operations remote_oob_fops = {
+       .open           = remote_oob_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 static int conn_info_min_age_set(void *data, u64 val)
 {
        struct hci_dev *hdev = data;
@@ -212,6 +241,24 @@ static int conn_info_max_age_get(void *data, u64 *val)
 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
                        conn_info_max_age_set, "%llu\n");
 
+static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
+                                  size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[3];
+
+       buf[0] = test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
+       buf[1] = '\n';
+       buf[2] = '\0';
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static const struct file_operations use_debug_keys_fops = {
+       .open           = simple_open,
+       .read           = use_debug_keys_read,
+       .llseek         = default_llseek,
+};
+
 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
                                 size_t count, loff_t *ppos)
 {
@@ -238,17 +285,26 @@ void hci_debugfs_create_common(struct hci_dev *hdev)
                           &hdev->manufacturer);
        debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
        debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
+       debugfs_create_u8("hardware_error", 0444, hdev->debugfs,
+                         &hdev->hw_error_code);
+
        debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
                            &device_list_fops);
        debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
                            &blacklist_fops);
        debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
+       debugfs_create_file("remote_oob", 0400, hdev->debugfs, hdev,
+                           &remote_oob_fops);
 
        debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
                            &conn_info_min_age_fops);
        debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
                            &conn_info_max_age_fops);
 
+       if (lmp_ssp_capable(hdev) || lmp_le_capable(hdev))
+               debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
+                                   hdev, &use_debug_keys_fops);
+
        if (lmp_sc_capable(hdev) || lmp_le_capable(hdev))
                debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
                                    hdev, &sc_only_mode_fops);
@@ -354,6 +410,24 @@ static int voice_setting_get(void *data, u64 *val)
 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
                        NULL, "0x%4.4llx\n");
 
+static ssize_t ssp_debug_mode_read(struct file *file, char __user *user_buf,
+                                  size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[3];
+
+       buf[0] = hdev->ssp_debug_mode ? 'Y': 'N';
+       buf[1] = '\n';
+       buf[2] = '\0';
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static const struct file_operations ssp_debug_mode_fops = {
+       .open           = simple_open,
+       .read           = ssp_debug_mode_read,
+       .llseek         = default_llseek,
+};
+
 static int auto_accept_delay_set(void *data, u64 val)
 {
        struct hci_dev *hdev = data;
@@ -474,9 +548,12 @@ void hci_debugfs_create_bredr(struct hci_dev *hdev)
        debugfs_create_file("voice_setting", 0444, hdev->debugfs, hdev,
                            &voice_setting_fops);
 
-       if (lmp_ssp_capable(hdev))
+       if (lmp_ssp_capable(hdev)) {
+               debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs,
+                                   hdev, &ssp_debug_mode_fops);
                debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
                                    hdev, &auto_accept_delay_fops);
+       }
 
        if (lmp_sniff_capable(hdev)) {
                debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
index a58845e98921da6a04d00794d8ce5563e3e52aa4..a3fb094822b621e5ef3b3205d1d5fce7c9d3f6b8 100644 (file)
@@ -36,6 +36,9 @@
 #include "amp.h"
 #include "smp.h"
 
+#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
+                "\x00\x00\x00\x00\x00\x00\x00\x00"
+
 /* Handle HCI Event packets */
 
 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
@@ -197,7 +200,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
        /* Reset all non-persistent flags */
        hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
 
-       hdev->discovery.state = DISCOVERY_STOPPED;
+       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+
        hdev->inq_tx_power = HCI_TX_POWER_INVALID;
        hdev->adv_tx_power = HCI_TX_POWER_INVALID;
 
@@ -525,9 +529,7 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
                        hdev->features[1][0] &= ~LMP_HOST_SC;
        }
 
-       if (test_bit(HCI_MGMT, &hdev->dev_flags))
-               mgmt_sc_enable_complete(hdev, sent->support, status);
-       else if (!status) {
+       if (!test_bit(HCI_MGMT, &hdev->dev_flags) && !status) {
                if (sent->support)
                        set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
                else
@@ -1487,6 +1489,21 @@ unlock:
        hci_dev_unlock(hdev);
 }
 
+static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       u8 status = *((u8 *) skb->data);
+       u8 *mode;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       if (status)
+               return;
+
+       mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
+       if (mode)
+               hdev->ssp_debug_mode = *mode;
+}
+
 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
 {
        BT_DBG("%s status 0x%2.2x", hdev->name, status);
@@ -2669,7 +2686,8 @@ static void hci_remote_features_evt(struct hci_dev *hdev,
        if (conn->state != BT_CONFIG)
                goto unlock;
 
-       if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
+       if (!ev->status && lmp_ext_feat_capable(hdev) &&
+           lmp_ext_feat_capable(conn)) {
                struct hci_cp_read_remote_ext_features cp;
                cp.handle = ev->handle;
                cp.page = 0x01;
@@ -2980,6 +2998,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cc_read_tx_power(hdev, skb);
                break;
 
+       case HCI_OP_WRITE_SSP_DEBUG_MODE:
+               hci_cc_write_ssp_debug_mode(hdev, skb);
+               break;
+
        default:
                BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
                break;
@@ -3098,7 +3120,9 @@ static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_hardware_error *ev = (void *) skb->data;
 
-       BT_ERR("%s hardware error 0x%2.2x", hdev->name, ev->code);
+       hdev->hw_error_code = ev->code;
+
+       queue_work(hdev->req_workqueue, &hdev->error_reset);
 }
 
 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -3857,6 +3881,52 @@ static u8 hci_get_auth_req(struct hci_conn *conn)
        return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
 }
 
+static u8 bredr_oob_data_present(struct hci_conn *conn)
+{
+       struct hci_dev *hdev = conn->hdev;
+       struct oob_data *data;
+
+       data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
+       if (!data)
+               return 0x00;
+
+       if (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) {
+               if (bredr_sc_enabled(hdev)) {
+                       /* When Secure Connections is enabled, then just
+                        * return the present value stored with the OOB
+                        * data. The stored value contains the right present
+                        * information. However it can only be trusted when
+                        * not in Secure Connection Only mode.
+                        */
+                       if (!test_bit(HCI_SC_ONLY, &hdev->dev_flags))
+                               return data->present;
+
+                       /* When Secure Connections Only mode is enabled, then
+                        * the P-256 values are required. If they are not
+                        * available, then do not declare that OOB data is
+                        * present.
+                        */
+                       if (!memcmp(data->rand256, ZERO_KEY, 16) ||
+                           !memcmp(data->hash256, ZERO_KEY, 16))
+                               return 0x00;
+
+                       return 0x02;
+               }
+
+               /* When Secure Connections is not enabled or actually
+                * not supported by the hardware, then check that if
+                * P-192 data values are present.
+                */
+               if (!memcmp(data->rand192, ZERO_KEY, 16) ||
+                   !memcmp(data->hash192, ZERO_KEY, 16))
+                       return 0x00;
+
+               return 0x01;
+       }
+
+       return 0x00;
+}
+
 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_io_capa_request *ev = (void *) skb->data;
@@ -3908,12 +3978,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
                        conn->auth_type &= HCI_AT_NO_BONDING_MITM;
 
                cp.authentication = conn->auth_type;
-
-               if (hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR) &&
-                   (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
-                       cp.oob_data = 0x01;
-               else
-                       cp.oob_data = 0x00;
+               cp.oob_data = bredr_oob_data_present(conn);
 
                hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
                             sizeof(cp), &cp);
@@ -4165,33 +4230,39 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
                goto unlock;
 
        data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
-       if (data) {
-               if (bredr_sc_enabled(hdev)) {
-                       struct hci_cp_remote_oob_ext_data_reply cp;
-
-                       bacpy(&cp.bdaddr, &ev->bdaddr);
-                       memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
-                       memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
-                       memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
-                       memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
+       if (!data) {
+               struct hci_cp_remote_oob_data_neg_reply cp;
 
-                       hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
-                                    sizeof(cp), &cp);
-               } else {
-                       struct hci_cp_remote_oob_data_reply cp;
+               bacpy(&cp.bdaddr, &ev->bdaddr);
+               hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
+                            sizeof(cp), &cp);
+               goto unlock;
+       }
 
-                       bacpy(&cp.bdaddr, &ev->bdaddr);
-                       memcpy(cp.hash, data->hash192, sizeof(cp.hash));
-                       memcpy(cp.rand, data->rand192, sizeof(cp.rand));
+       if (bredr_sc_enabled(hdev)) {
+               struct hci_cp_remote_oob_ext_data_reply cp;
 
-                       hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
-                                    sizeof(cp), &cp);
+               bacpy(&cp.bdaddr, &ev->bdaddr);
+               if (test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
+                       memset(cp.hash192, 0, sizeof(cp.hash192));
+                       memset(cp.rand192, 0, sizeof(cp.rand192));
+               } else {
+                       memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
+                       memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
                }
+               memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
+               memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
+
+               hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
+                            sizeof(cp), &cp);
        } else {
-               struct hci_cp_remote_oob_data_neg_reply cp;
+               struct hci_cp_remote_oob_data_reply cp;
 
                bacpy(&cp.bdaddr, &ev->bdaddr);
-               hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
+               memcpy(cp.hash, data->hash192, sizeof(cp.hash));
+               memcpy(cp.rand, data->rand192, sizeof(cp.rand));
+
+               hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
                             sizeof(cp), &cp);
        }
 
index 20206cd3acbcab3feef0b858f380d163a5e23ee8..60694f0f4c73768dee1db1a4926ced43522dbe83 100644 (file)
@@ -302,7 +302,7 @@ done:
 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
                             int flags)
 {
-       DECLARE_WAITQUEUE(wait, current);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
        struct sock *sk = sock->sk, *nsk;
        long timeo;
        int err = 0;
@@ -316,8 +316,6 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
        /* Wait for an incoming connection. (wake-one). */
        add_wait_queue_exclusive(sk_sleep(sk), &wait);
        while (1) {
-               set_current_state(TASK_INTERRUPTIBLE);
-
                if (sk->sk_state != BT_LISTEN) {
                        err = -EBADFD;
                        break;
@@ -338,10 +336,11 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
                }
 
                release_sock(sk);
-               timeo = schedule_timeout(timeo);
+
+               timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
+
                lock_sock_nested(sk, L2CAP_NESTING_PARENT);
        }
-       __set_current_state(TASK_RUNNING);
        remove_wait_queue(sk_sleep(sk), &wait);
 
        if (err)
index f5c4d2eed9a18e9b5e8b71659ed06d8eda4fd8ed..9ec5390c85eba61c3c3bcb5a813c8d8af326cf33 100644 (file)
@@ -131,6 +131,9 @@ static const u16 mgmt_events[] = {
 
 #define CACHE_TIMEOUT  msecs_to_jiffies(2 * 1000)
 
+#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
+                "\x00\x00\x00\x00\x00\x00\x00\x00"
+
 struct pending_cmd {
        struct list_head list;
        u16 opcode;
@@ -3633,10 +3636,16 @@ unlock:
 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
                               void *data, u16 len)
 {
+       struct mgmt_addr_info *addr = data;
        int err;
 
        BT_DBG("%s ", hdev->name);
 
+       if (!bdaddr_type_is_valid(addr->type))
+               return cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
+                                   MGMT_STATUS_INVALID_PARAMS, addr,
+                                   sizeof(*addr));
+
        hci_dev_lock(hdev);
 
        if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
@@ -3663,28 +3672,53 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
                                   status, &cp->addr, sizeof(cp->addr));
        } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
                struct mgmt_cp_add_remote_oob_ext_data *cp = data;
-               u8 *rand192, *hash192;
+               u8 *rand192, *hash192, *rand256, *hash256;
                u8 status;
 
-               if (cp->addr.type != BDADDR_BREDR) {
-                       err = cmd_complete(sk, hdev->id,
-                                          MGMT_OP_ADD_REMOTE_OOB_DATA,
-                                          MGMT_STATUS_INVALID_PARAMS,
-                                          &cp->addr, sizeof(cp->addr));
-                       goto unlock;
-               }
-
                if (bdaddr_type_is_le(cp->addr.type)) {
+                       /* Enforce zero-valued 192-bit parameters as
+                        * long as legacy SMP OOB isn't implemented.
+                        */
+                       if (memcmp(cp->rand192, ZERO_KEY, 16) ||
+                           memcmp(cp->hash192, ZERO_KEY, 16)) {
+                               err = cmd_complete(sk, hdev->id,
+                                                  MGMT_OP_ADD_REMOTE_OOB_DATA,
+                                                  MGMT_STATUS_INVALID_PARAMS,
+                                                  addr, sizeof(*addr));
+                               goto unlock;
+                       }
+
                        rand192 = NULL;
                        hash192 = NULL;
                } else {
-                       rand192 = cp->rand192;
-                       hash192 = cp->hash192;
+                       /* In case one of the P-192 values is set to zero,
+                        * then just disable OOB data for P-192.
+                        */
+                       if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
+                           !memcmp(cp->hash192, ZERO_KEY, 16)) {
+                               rand192 = NULL;
+                               hash192 = NULL;
+                       } else {
+                               rand192 = cp->rand192;
+                               hash192 = cp->hash192;
+                       }
+               }
+
+               /* In case one of the P-256 values is set to zero, then just
+                * disable OOB data for P-256.
+                */
+               if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
+                   !memcmp(cp->hash256, ZERO_KEY, 16)) {
+                       rand256 = NULL;
+                       hash256 = NULL;
+               } else {
+                       rand256 = cp->rand256;
+                       hash256 = cp->hash256;
                }
 
                err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
                                              cp->addr.type, hash192, rand192,
-                                             cp->hash256, cp->rand256);
+                                             hash256, rand256);
                if (err < 0)
                        status = MGMT_STATUS_FAILED;
                else
@@ -3862,6 +3896,9 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status,
 
        hci_discovery_set_state(hdev, DISCOVERY_FINDING);
 
+       /* If the scan involves LE scan, pick proper timeout to schedule
+        * hdev->le_scan_disable that will stop it.
+        */
        switch (hdev->discovery.type) {
        case DISCOV_TYPE_LE:
                timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
@@ -3878,9 +3915,23 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status,
                break;
        }
 
-       if (timeout)
+       if (timeout) {
+               /* When service discovery is used and the controller has
+                * a strict duplicate filter, it is important to remember
+                * the start and duration of the scan. This is required
+                * for restarting scanning during the discovery phase.
+                */
+               if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
+                            &hdev->quirks) &&
+                   (hdev->discovery.uuid_count > 0 ||
+                    hdev->discovery.rssi != HCI_RSSI_INVALID)) {
+                       hdev->discovery.scan_start = jiffies;
+                       hdev->discovery.scan_duration = timeout;
+               }
+
                queue_delayed_work(hdev->workqueue,
                                   &hdev->le_scan_disable, timeout);
+       }
 
 unlock:
        hci_dev_unlock(hdev);
@@ -4691,9 +4742,16 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                 * Dual-mode controllers shall operate with the public
                 * address as its identity address for BR/EDR and LE. So
                 * reject the attempt to create an invalid configuration.
+                *
+                * The same restrictions applies when secure connections
+                * has been enabled. For BR/EDR this is a controller feature
+                * while for LE it is a host stack feature. This means that
+                * switching BR/EDR back on when secure connections has been
+                * enabled is not a supported transaction.
                 */
                if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
-                   bacmp(&hdev->static_addr, BDADDR_ANY)) {
+                   (bacmp(&hdev->static_addr, BDADDR_ANY) ||
+                    test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) {
                        err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
                                         MGMT_STATUS_REJECTED);
                        goto unlock;
@@ -4736,11 +4794,57 @@ unlock:
        return err;
 }
 
+static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+{
+       struct pending_cmd *cmd;
+       struct mgmt_mode *cp;
+
+       BT_DBG("%s status %u", hdev->name, status);
+
+       hci_dev_lock(hdev);
+
+       cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
+       if (!cmd)
+               goto unlock;
+
+       if (status) {
+               cmd_status(cmd->sk, cmd->index, cmd->opcode,
+                          mgmt_status(status));
+               goto remove;
+       }
+
+       cp = cmd->param;
+
+       switch (cp->val) {
+       case 0x00:
+               clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+               clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+               break;
+       case 0x01:
+               set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+               clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+               break;
+       case 0x02:
+               set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+               set_bit(HCI_SC_ONLY, &hdev->dev_flags);
+               break;
+       }
+
+       send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
+       new_settings(hdev, cmd->sk);
+
+remove:
+       mgmt_pending_remove(cmd);
+unlock:
+       hci_dev_unlock(hdev);
+}
+
 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
                           void *data, u16 len)
 {
        struct mgmt_mode *cp = data;
        struct pending_cmd *cmd;
+       struct hci_request req;
        u8 val;
        int err;
 
@@ -4751,6 +4855,12 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
                return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
                                  MGMT_STATUS_NOT_SUPPORTED);
 
+       if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+           lmp_sc_capable(hdev) &&
+           !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+                                 MGMT_STATUS_REJECTED);
+
        if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
                return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
                                  MGMT_STATUS_INVALID_PARAMS);
@@ -4804,17 +4914,14 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
                goto failed;
        }
 
-       err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
+       hci_req_init(&req, hdev);
+       hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
+       err = hci_req_run(&req, sc_enable_complete);
        if (err < 0) {
                mgmt_pending_remove(cmd);
                goto failed;
        }
 
-       if (cp->val == 0x02)
-               set_bit(HCI_SC_ONLY, &hdev->dev_flags);
-       else
-               clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
-
 failed:
        hci_dev_unlock(hdev);
        return err;
@@ -6262,14 +6369,16 @@ static int powered_update_hci(struct hci_dev *hdev)
 
        if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
            !lmp_host_ssp_capable(hdev)) {
-               u8 ssp = 1;
+               u8 mode = 0x01;
 
-               hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
-       }
+               hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
 
-       if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
-               u8 sc = 0x01;
-               hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, sizeof(sc), &sc);
+               if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
+                       u8 support = 0x01;
+
+                       hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
+                                   sizeof(support), &support);
+               }
        }
 
        if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
@@ -6989,43 +7098,6 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
        hci_req_run(&req, NULL);
 }
 
-void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
-{
-       struct cmd_lookup match = { NULL, hdev };
-       bool changed = false;
-
-       if (status) {
-               u8 mgmt_err = mgmt_status(status);
-
-               if (enable) {
-                       if (test_and_clear_bit(HCI_SC_ENABLED,
-                                              &hdev->dev_flags))
-                               new_settings(hdev, NULL);
-                       clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
-               }
-
-               mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
-                                    cmd_status_rsp, &mgmt_err);
-               return;
-       }
-
-       if (enable) {
-               changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
-       } else {
-               changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
-               clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
-       }
-
-       mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
-                            settings_rsp, &match);
-
-       if (changed)
-               new_settings(hdev, match.sk);
-
-       if (match.sk)
-               sock_put(match.sk);
-}
-
 static void sk_lookup(struct pending_cmd *cmd, void *data)
 {
        struct cmd_lookup *match = data;
@@ -7096,28 +7168,21 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
                cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
                           mgmt_status(status));
        } else {
-               if (bredr_sc_enabled(hdev) && hash256 && rand256) {
-                       struct mgmt_rp_read_local_oob_ext_data rp;
+               struct mgmt_rp_read_local_oob_data rp;
+               size_t rp_size = sizeof(rp);
 
-                       memcpy(rp.hash192, hash192, sizeof(rp.hash192));
-                       memcpy(rp.rand192, rand192, sizeof(rp.rand192));
+               memcpy(rp.hash192, hash192, sizeof(rp.hash192));
+               memcpy(rp.rand192, rand192, sizeof(rp.rand192));
 
+               if (bredr_sc_enabled(hdev) && hash256 && rand256) {
                        memcpy(rp.hash256, hash256, sizeof(rp.hash256));
                        memcpy(rp.rand256, rand256, sizeof(rp.rand256));
-
-                       cmd_complete(cmd->sk, hdev->id,
-                                    MGMT_OP_READ_LOCAL_OOB_DATA, 0,
-                                    &rp, sizeof(rp));
                } else {
-                       struct mgmt_rp_read_local_oob_data rp;
-
-                       memcpy(rp.hash, hash192, sizeof(rp.hash));
-                       memcpy(rp.rand, rand192, sizeof(rp.rand));
-
-                       cmd_complete(cmd->sk, hdev->id,
-                                    MGMT_OP_READ_LOCAL_OOB_DATA, 0,
-                                    &rp, sizeof(rp));
+                       rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
                }
+
+               cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 0,
+                            &rp, rp_size);
        }
 
        mgmt_pending_remove(cmd);
@@ -7190,6 +7255,21 @@ static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
        return false;
 }
 
+static void restart_le_scan(struct hci_dev *hdev)
+{
+       /* If controller is not scanning we are done. */
+       if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+               return;
+
+       if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
+                      hdev->discovery.scan_start +
+                      hdev->discovery.scan_duration))
+               return;
+
+       queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
+                          DISCOV_LE_RESTART_DELAY);
+}
+
 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
                       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
@@ -7212,14 +7292,18 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
 
        /* When using service discovery with a RSSI threshold, then check
         * if such a RSSI threshold is specified. If a RSSI threshold has
-        * been specified, then all results with a RSSI smaller than the
-        * RSSI threshold will be dropped.
+        * been specified, and HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set,
+        * then all results with a RSSI smaller than the RSSI threshold will be
+        * dropped. If the quirk is set, let it through for further processing,
+        * as we might need to restart the scan.
         *
         * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
         * the results are also dropped.
         */
        if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
-           (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID))
+           (rssi == HCI_RSSI_INVALID ||
+           (rssi < hdev->discovery.rssi &&
+            !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
                return;
 
        /* Make sure that the buffer is big enough. The 5 extra bytes
@@ -7238,7 +7322,8 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
         * However when using service discovery, the value 127 will be
         * returned when the RSSI is not available.
         */
-       if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi)
+       if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
+           link_type == ACL_LINK)
                rssi = 0;
 
        bacpy(&ev->addr.bdaddr, bdaddr);
@@ -7253,12 +7338,20 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                 * kept and checking possible scan response data
                 * will be skipped.
                 */
-               if (hdev->discovery.uuid_count > 0)
+               if (hdev->discovery.uuid_count > 0) {
                        match = eir_has_uuids(eir, eir_len,
                                              hdev->discovery.uuid_count,
                                              hdev->discovery.uuids);
-               else
+                       /* If duplicate filtering does not report RSSI changes,
+                        * then restart scanning to ensure updated result with
+                        * updated RSSI values.
+                        */
+                       if (match && test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
+                                             &hdev->quirks))
+                               restart_le_scan(hdev);
+               } else {
                        match = true;
+               }
 
                if (!match && !scan_rsp_len)
                        return;
@@ -7291,6 +7384,14 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                                                     hdev->discovery.uuid_count,
                                                     hdev->discovery.uuids))
                                return;
+
+                       /* If duplicate filtering does not report RSSI changes,
+                        * then restart scanning to ensure updated result with
+                        * updated RSSI values.
+                        */
+                       if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
+                                    &hdev->quirks))
+                               restart_le_scan(hdev);
                }
 
                /* Append scan response data to event */
@@ -7304,6 +7405,14 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                        return;
        }
 
+       /* Validate the reported RSSI value against the RSSI threshold once more
+        * incase HCI_QUIRK_STRICT_DUPLICATE_FILTER forced a restart of LE
+        * scanning.
+        */
+       if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
+           rssi < hdev->discovery.rssi)
+               return;
+
        ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
        ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
 
index d8a95755a8a89e2ab99c22b16ea56f339c8229b9..3c6d2c8ac1a47bc7f5a96b576ca93e9542ee4fe3 100644 (file)
@@ -468,7 +468,7 @@ done:
 
 static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
 {
-       DECLARE_WAITQUEUE(wait, current);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
        struct sock *sk = sock->sk, *nsk;
        long timeo;
        int err = 0;
@@ -487,8 +487,6 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
        /* Wait for an incoming connection. (wake-one). */
        add_wait_queue_exclusive(sk_sleep(sk), &wait);
        while (1) {
-               set_current_state(TASK_INTERRUPTIBLE);
-
                if (sk->sk_state != BT_LISTEN) {
                        err = -EBADFD;
                        break;
@@ -509,10 +507,11 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
                }
 
                release_sock(sk);
-               timeo = schedule_timeout(timeo);
+
+               timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
+
                lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
        }
-       __set_current_state(TASK_RUNNING);
        remove_wait_queue(sk_sleep(sk), &wait);
 
        if (err)
index 07ec7d23b843d7c18a866472e7451e1f279bfcf0..76321b546e8426146dba242c281c2915464e7d6f 100644 (file)
@@ -618,7 +618,7 @@ done:
 
 static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags)
 {
-       DECLARE_WAITQUEUE(wait, current);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
        struct sock *sk = sock->sk, *ch;
        long timeo;
        int err = 0;
@@ -632,8 +632,6 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
        /* Wait for an incoming connection. (wake-one). */
        add_wait_queue_exclusive(sk_sleep(sk), &wait);
        while (1) {
-               set_current_state(TASK_INTERRUPTIBLE);
-
                if (sk->sk_state != BT_LISTEN) {
                        err = -EBADFD;
                        break;
@@ -654,10 +652,10 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
                }
 
                release_sock(sk);
-               timeo = schedule_timeout(timeo);
+
+               timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
                lock_sock(sk);
        }
-       __set_current_state(TASK_RUNNING);
        remove_wait_queue(sk_sleep(sk), &wait);
 
        if (err)
index 37d9180bfe1c559902279da1ae4ba08422e2d50a..c09a821f381d0b648b45ca5c722d89161775f638 100644 (file)
@@ -620,7 +620,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
 
                oob_data = hci_find_remote_oob_data(hdev, &hcon->dst,
                                                    bdaddr_type);
-               if (oob_data) {
+               if (oob_data && oob_data->present) {
                        set_bit(SMP_FLAG_OOB, &smp->flags);
                        oob_flag = SMP_OOB_PRESENT;
                        memcpy(smp->rr, oob_data->rand256, 16);
index 235e6c50708d73baf290679c45affc564202042b..fec0856dd6c031a2ae369410fc5d7f9c25a1fcf6 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for the Linux networking core.
 #
 
-obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
+obj-y := sock.o request_sock.o skbuff.o datagram.o stream.o scm.o \
         gen_stats.o gen_estimator.o net_namespace.o secure_seq.o flow_dissector.o
 
 obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
index 1d564d68e31a5f361fc233ae90a3a19e82915664..ede0b161b115c10cda6c2e65d307d3c91cc44d5d 100644 (file)
@@ -5355,6 +5355,26 @@ void netdev_upper_dev_unlink(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_upper_dev_unlink);
 
+/**
+ * netdev_bonding_info_change - Dispatch event about slave change
+ * @dev: device
+ * @netdev_bonding_info: info to dispatch
+ *
+ * Send NETDEV_BONDING_INFO to netdev notifiers with info.
+ * The caller must hold the RTNL lock.
+ */
+void netdev_bonding_info_change(struct net_device *dev,
+                               struct netdev_bonding_info *bonding_info)
+{
+       struct netdev_notifier_bonding_info     info;
+
+       memcpy(&info.bonding_info, bonding_info,
+              sizeof(struct netdev_bonding_info));
+       call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
+                                     &info.info);
+}
+EXPORT_SYMBOL(netdev_bonding_info_change);
+
 void netdev_adjacent_add_links(struct net_device *dev)
 {
        struct netdev_adjacent *iter;
index beb83d1ac1c688d7b593a5cad85236b6d94c3106..2c35c02a931e227fa368cd346873596d4b037a3d 100644 (file)
@@ -422,7 +422,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
        dev_maps = rcu_dereference(dev->xps_maps);
        if (dev_maps) {
                map = rcu_dereference(
-                   dev_maps->cpu_map[raw_smp_processor_id()]);
+                   dev_maps->cpu_map[skb->sender_cpu - 1]);
                if (map) {
                        if (map->len == 1)
                                queue_index = map->queues[0];
@@ -468,6 +468,11 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
 {
        int queue_index = 0;
 
+#ifdef CONFIG_XPS
+       if (skb->sender_cpu == 0)
+               skb->sender_cpu = raw_smp_processor_id() + 1;
+#endif
+
        if (dev->real_num_tx_queues != 1) {
                const struct net_device_ops *ops = dev->netdev_ops;
                if (ops->ndo_select_queue)
diff --git a/net/core/iovec.c b/net/core/iovec.c
deleted file mode 100644 (file)
index dcbe98b..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- *     iovec manipulation routines.
- *
- *
- *             This program is free software; you can redistribute it and/or
- *             modify it under the terms of the GNU General Public License
- *             as published by the Free Software Foundation; either version
- *             2 of the License, or (at your option) any later version.
- *
- *     Fixes:
- *             Andrew Lunn     :       Errors in iovec copying.
- *             Pedro Roque     :       Added memcpy_fromiovecend and
- *                                     csum_..._fromiovecend.
- *             Andi Kleen      :       fixed error handling for 2.1
- *             Alexey Kuznetsov:       2.1 optimisations
- *             Andi Kleen      :       Fix csum*fromiovecend for IPv6.
- */
-
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <linux/in6.h>
-#include <asm/uaccess.h>
-#include <asm/byteorder.h>
-#include <net/checksum.h>
-#include <net/sock.h>
-
-/*
- *     And now for the all-in-one: copy and checksum from a user iovec
- *     directly to a datagram
- *     Calls to csum_partial but the last must be in 32 bit chunks
- *
- *     ip_build_xmit must ensure that when fragmenting only the last
- *     call to this function will be unaligned also.
- */
-int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov,
-                                int offset, unsigned int len, __wsum *csump)
-{
-       __wsum csum = *csump;
-       int partial_cnt = 0, err = 0;
-
-       /* Skip over the finished iovecs */
-       while (offset >= iov->iov_len) {
-               offset -= iov->iov_len;
-               iov++;
-       }
-
-       while (len > 0) {
-               u8 __user *base = iov->iov_base + offset;
-               int copy = min_t(unsigned int, len, iov->iov_len - offset);
-
-               offset = 0;
-
-               /* There is a remnant from previous iov. */
-               if (partial_cnt) {
-                       int par_len = 4 - partial_cnt;
-
-                       /* iov component is too short ... */
-                       if (par_len > copy) {
-                               if (copy_from_user(kdata, base, copy))
-                                       goto out_fault;
-                               kdata += copy;
-                               base += copy;
-                               partial_cnt += copy;
-                               len -= copy;
-                               iov++;
-                               if (len)
-                                       continue;
-                               *csump = csum_partial(kdata - partial_cnt,
-                                                        partial_cnt, csum);
-                               goto out;
-                       }
-                       if (copy_from_user(kdata, base, par_len))
-                               goto out_fault;
-                       csum = csum_partial(kdata - partial_cnt, 4, csum);
-                       kdata += par_len;
-                       base  += par_len;
-                       copy  -= par_len;
-                       len   -= par_len;
-                       partial_cnt = 0;
-               }
-
-               if (len > copy) {
-                       partial_cnt = copy % 4;
-                       if (partial_cnt) {
-                               copy -= partial_cnt;
-                               if (copy_from_user(kdata + copy, base + copy,
-                                               partial_cnt))
-                                       goto out_fault;
-                       }
-               }
-
-               if (copy) {
-                       csum = csum_and_copy_from_user(base, kdata, copy,
-                                                       csum, &err);
-                       if (err)
-                               goto out;
-               }
-               len   -= copy + partial_cnt;
-               kdata += copy + partial_cnt;
-               iov++;
-       }
-       *csump = csum;
-out:
-       return err;
-
-out_fault:
-       err = -EFAULT;
-       goto out;
-}
-EXPORT_SYMBOL(csum_partial_copy_fromiovecend);
-
-unsigned long iov_pages(const struct iovec *iov, int offset,
-                       unsigned long nr_segs)
-{
-       unsigned long seg, base;
-       int pages = 0, len, size;
-
-       while (nr_segs && (offset >= iov->iov_len)) {
-               offset -= iov->iov_len;
-               ++iov;
-               --nr_segs;
-       }
-
-       for (seg = 0; seg < nr_segs; seg++) {
-               base = (unsigned long)iov[seg].iov_base + offset;
-               len = iov[seg].iov_len - offset;
-               size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
-               pages += size;
-               offset = 0;
-       }
-
-       return pages;
-}
-EXPORT_SYMBOL(iov_pages);
index b7bde551ef76901de0456d811112ba232aa350ba..cb5290b8c428c5c348b25d842ab9cf3797b70eba 100644 (file)
@@ -446,6 +446,7 @@ struct net *get_net_ns_by_fd(int fd)
        return ERR_PTR(-EINVAL);
 }
 #endif
+EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
 
 struct net *get_net_ns_by_pid(pid_t pid)
 {
index 673cb4c6f391b3d07d57dc378bb2c228d223fdb8..4cd5e350d129713552e7c2ccb7582d32e6b94fc3 100644 (file)
@@ -3180,6 +3180,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
        case NETDEV_UNREGISTER_FINAL:
        case NETDEV_RELEASE:
        case NETDEV_JOIN:
+       case NETDEV_BONDING_INFO:
                break;
        default:
                rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
index a5bff2767f15abe09b5f0d0a3bfecfb5775a4e64..88c613eab142962dc44f2075378fce0b94349e8e 100644 (file)
@@ -825,6 +825,9 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 #ifdef CONFIG_NET_RX_BUSY_POLL
        CHECK_SKB_FIELD(napi_id);
 #endif
+#ifdef CONFIG_XPS
+       CHECK_SKB_FIELD(sender_cpu);
+#endif
 #ifdef CONFIG_NET_SCHED
        CHECK_SKB_FIELD(tc_index);
 #ifdef CONFIG_NET_CLS_ACT
@@ -4169,6 +4172,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
        skb->ignore_df = 0;
        skb_dst_drop(skb);
        skb->mark = 0;
+       skb->sender_cpu = 0;
        skb_init_secmark(skb);
        secpath_reset(skb);
        nf_reset(skb);
index 3bc0cf07661c43fd6bcd81d65177680168a07f32..92ddea1e645732118d982685c4bab5b2d6c03641 100644 (file)
@@ -70,7 +70,6 @@ static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
        size_t start = ntohs(pd[0]);
        size_t offset = ntohs(pd[1]);
        size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
-       __wsum delta;
 
        if (skb->remcsum_offload) {
                /* Already processed in GRO path */
@@ -82,14 +81,7 @@ static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
                return NULL;
        guehdr = (struct guehdr *)&udp_hdr(skb)[1];
 
-       if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE))
-               __skb_checksum_complete(skb);
-
-       delta = remcsum_adjust((void *)guehdr + hdrlen,
-                              skb->csum, start, offset);
-
-       /* Adjust skb->csum since we changed the packet */
-       skb->csum = csum_add(skb->csum, delta);
+       skb_remcsum_process(skb, (void *)guehdr + hdrlen, start, offset);
 
        return guehdr;
 }
@@ -228,7 +220,6 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
        size_t start = ntohs(pd[0]);
        size_t offset = ntohs(pd[1]);
        size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
-       __wsum delta;
 
        if (skb->remcsum_offload)
                return guehdr;
@@ -243,12 +234,7 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
                        return NULL;
        }
 
-       delta = remcsum_adjust((void *)guehdr + hdrlen,
-                              NAPI_GRO_CB(skb)->csum, start, offset);
-
-       /* Adjust skb->csum since we changed the packet */
-       skb->csum = csum_add(skb->csum, delta);
-       NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+       skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen, start, offset);
 
        skb->remcsum_offload = 1;
 
index b50861b22b6bea036b1a99ddf141d7ed2d6cf6cd..f998bc87ae38ec0671b842b2219fb7d6e273fed1 100644 (file)
@@ -755,13 +755,11 @@ ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk
        struct msghdr *msg = from;
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               /* XXX: stripping const */
-               if (memcpy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len) < 0)
+               if (copy_from_iter(to, len, &msg->msg_iter) != len)
                        return -EFAULT;
        } else {
                __wsum csum = 0;
-               /* XXX: stripping const */
-               if (csum_partial_copy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len, &csum) < 0)
+               if (csum_and_copy_from_iter(to, len, &csum, &msg->msg_iter) != len)
                        return -EFAULT;
                skb->csum = csum_block_add(skb->csum, csum, odd);
        }
index 2a3720fb5a5ff5401c5efbef49427fb18dbbfa5e..e9f66e1cda507cf2d5cb532958d23a89beeccaba 100644 (file)
@@ -599,18 +599,18 @@ int ping_getfrag(void *from, char *to,
        struct pingfakehdr *pfh = (struct pingfakehdr *)from;
 
        if (offset == 0) {
-               if (fraglen < sizeof(struct icmphdr))
+               fraglen -= sizeof(struct icmphdr);
+               if (fraglen < 0)
                        BUG();
-               if (csum_partial_copy_fromiovecend(to + sizeof(struct icmphdr),
-                           pfh->iov, 0, fraglen - sizeof(struct icmphdr),
-                           &pfh->wcheck))
+               if (csum_and_copy_from_iter(to + sizeof(struct icmphdr),
+                           fraglen, &pfh->wcheck,
+                           &pfh->msg->msg_iter) != fraglen)
                        return -EFAULT;
        } else if (offset < sizeof(struct icmphdr)) {
                        BUG();
        } else {
-               if (csum_partial_copy_fromiovecend
-                               (to, pfh->iov, offset - sizeof(struct icmphdr),
-                                fraglen, &pfh->wcheck))
+               if (csum_and_copy_from_iter(to, fraglen, &pfh->wcheck,
+                                           &pfh->msg->msg_iter) != fraglen)
                        return -EFAULT;
        }
 
@@ -811,8 +811,7 @@ back_from_confirm:
        pfh.icmph.checksum = 0;
        pfh.icmph.un.echo.id = inet->inet_sport;
        pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence;
-       /* XXX: stripping const */
-       pfh.iov = (struct iovec *)msg->msg_iter.iov;
+       pfh.msg = msg;
        pfh.wcheck = 0;
        pfh.family = AF_INET;
 
index 0bb68df5055d2d3f92cb06e829a997b44b512d65..f027a708b7e01029574535e20f7461cfa4b84190 100644 (file)
@@ -337,7 +337,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
 }
 
 static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
-                          void *from, size_t length,
+                          struct msghdr *msg, size_t length,
                           struct rtable **rtp,
                           unsigned int flags)
 {
@@ -382,7 +382,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
 
        skb->transport_header = skb->network_header;
        err = -EFAULT;
-       if (memcpy_fromiovecend((void *)iph, from, 0, length))
+       if (memcpy_from_msg(iph, msg, length))
                goto error_free;
 
        iphlen = iph->ihl * 4;
@@ -625,8 +625,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 back_from_confirm:
 
        if (inet->hdrincl)
-               /* XXX: stripping const */
-               err = raw_send_hdrinc(sk, &fl4, (struct iovec *)msg->msg_iter.iov, len,
+               err = raw_send_hdrinc(sk, &fl4, msg, len,
                                      &rt, msg->msg_flags);
 
         else {
index 3075723c729bc98edf3a15eb0d0fbe172c300bbc..9d72a0fcd9284425e088cef6e1b8c14e95950ca4 100644 (file)
@@ -1067,11 +1067,10 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                size_t size)
 {
-       const struct iovec *iov;
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       int iovlen, flags, err, copied = 0;
-       int mss_now = 0, size_goal, copied_syn = 0, offset = 0;
+       int flags, err, copied = 0;
+       int mss_now = 0, size_goal, copied_syn = 0;
        bool sg;
        long timeo;
 
@@ -1084,7 +1083,6 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        goto out;
                else if (err)
                        goto out_err;
-               offset = copied_syn;
        }
 
        timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
@@ -1118,8 +1116,6 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        mss_now = tcp_send_mss(sk, &size_goal, flags);
 
        /* Ok commence sending. */
-       iovlen = msg->msg_iter.nr_segs;
-       iov = msg->msg_iter.iov;
        copied = 0;
 
        err = -EPIPE;
@@ -1128,151 +1124,134 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        sg = !!(sk->sk_route_caps & NETIF_F_SG);
 
-       while (--iovlen >= 0) {
-               size_t seglen = iov->iov_len;
-               unsigned char __user *from = iov->iov_base;
+       while (iov_iter_count(&msg->msg_iter)) {
+               int copy = 0;
+               int max = size_goal;
 
-               iov++;
-               if (unlikely(offset > 0)) {  /* Skip bytes copied in SYN */
-                       if (offset >= seglen) {
-                               offset -= seglen;
-                               continue;
-                       }
-                       seglen -= offset;
-                       from += offset;
-                       offset = 0;
+               skb = tcp_write_queue_tail(sk);
+               if (tcp_send_head(sk)) {
+                       if (skb->ip_summed == CHECKSUM_NONE)
+                               max = mss_now;
+                       copy = max - skb->len;
                }
 
-               while (seglen > 0) {
-                       int copy = 0;
-                       int max = size_goal;
-
-                       skb = tcp_write_queue_tail(sk);
-                       if (tcp_send_head(sk)) {
-                               if (skb->ip_summed == CHECKSUM_NONE)
-                                       max = mss_now;
-                               copy = max - skb->len;
-                       }
-
-                       if (copy <= 0) {
+               if (copy <= 0) {
 new_segment:
-                               /* Allocate new segment. If the interface is SG,
-                                * allocate skb fitting to single page.
-                                */
-                               if (!sk_stream_memory_free(sk))
-                                       goto wait_for_sndbuf;
+                       /* Allocate new segment. If the interface is SG,
+                        * allocate skb fitting to single page.
+                        */
+                       if (!sk_stream_memory_free(sk))
+                               goto wait_for_sndbuf;
 
-                               skb = sk_stream_alloc_skb(sk,
-                                                         select_size(sk, sg),
-                                                         sk->sk_allocation);
-                               if (!skb)
-                                       goto wait_for_memory;
+                       skb = sk_stream_alloc_skb(sk,
+                                                 select_size(sk, sg),
+                                                 sk->sk_allocation);
+                       if (!skb)
+                               goto wait_for_memory;
 
-                               /*
-                                * Check whether we can use HW checksum.
-                                */
-                               if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
-                                       skb->ip_summed = CHECKSUM_PARTIAL;
+                       /*
+                        * Check whether we can use HW checksum.
+                        */
+                       if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
+                               skb->ip_summed = CHECKSUM_PARTIAL;
 
-                               skb_entail(sk, skb);
-                               copy = size_goal;
-                               max = size_goal;
+                       skb_entail(sk, skb);
+                       copy = size_goal;
+                       max = size_goal;
 
-                               /* All packets are restored as if they have
-                                * already been sent. skb_mstamp isn't set to
-                                * avoid wrong rtt estimation.
-                                */
-                               if (tp->repair)
-                                       TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
-                       }
+                       /* All packets are restored as if they have
+                        * already been sent. skb_mstamp isn't set to
+                        * avoid wrong rtt estimation.
+                        */
+                       if (tp->repair)
+                               TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
+               }
 
-                       /* Try to append data to the end of skb. */
-                       if (copy > seglen)
-                               copy = seglen;
-
-                       /* Where to copy to? */
-                       if (skb_availroom(skb) > 0) {
-                               /* We have some space in skb head. Superb! */
-                               copy = min_t(int, copy, skb_availroom(skb));
-                               err = skb_add_data_nocache(sk, skb, from, copy);
-                               if (err)
-                                       goto do_fault;
-                       } else {
-                               bool merge = true;
-                               int i = skb_shinfo(skb)->nr_frags;
-                               struct page_frag *pfrag = sk_page_frag(sk);
-
-                               if (!sk_page_frag_refill(sk, pfrag))
-                                       goto wait_for_memory;
-
-                               if (!skb_can_coalesce(skb, i, pfrag->page,
-                                                     pfrag->offset)) {
-                                       if (i == MAX_SKB_FRAGS || !sg) {
-                                               tcp_mark_push(tp, skb);
-                                               goto new_segment;
-                                       }
-                                       merge = false;
-                               }
+               /* Try to append data to the end of skb. */
+               if (copy > iov_iter_count(&msg->msg_iter))
+                       copy = iov_iter_count(&msg->msg_iter);
+
+               /* Where to copy to? */
+               if (skb_availroom(skb) > 0) {
+                       /* We have some space in skb head. Superb! */
+                       copy = min_t(int, copy, skb_availroom(skb));
+                       err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
+                       if (err)
+                               goto do_fault;
+               } else {
+                       bool merge = true;
+                       int i = skb_shinfo(skb)->nr_frags;
+                       struct page_frag *pfrag = sk_page_frag(sk);
+
+                       if (!sk_page_frag_refill(sk, pfrag))
+                               goto wait_for_memory;
 
-                               copy = min_t(int, copy, pfrag->size - pfrag->offset);
-
-                               if (!sk_wmem_schedule(sk, copy))
-                                       goto wait_for_memory;
-
-                               err = skb_copy_to_page_nocache(sk, from, skb,
-                                                              pfrag->page,
-                                                              pfrag->offset,
-                                                              copy);
-                               if (err)
-                                       goto do_error;
-
-                               /* Update the skb. */
-                               if (merge) {
-                                       skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
-                               } else {
-                                       skb_fill_page_desc(skb, i, pfrag->page,
-                                                          pfrag->offset, copy);
-                                       get_page(pfrag->page);
+                       if (!skb_can_coalesce(skb, i, pfrag->page,
+                                             pfrag->offset)) {
+                               if (i == MAX_SKB_FRAGS || !sg) {
+                                       tcp_mark_push(tp, skb);
+                                       goto new_segment;
                                }
-                               pfrag->offset += copy;
+                               merge = false;
                        }
 
-                       if (!copied)
-                               TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
+                       copy = min_t(int, copy, pfrag->size - pfrag->offset);
 
-                       tp->write_seq += copy;
-                       TCP_SKB_CB(skb)->end_seq += copy;
-                       tcp_skb_pcount_set(skb, 0);
+                       if (!sk_wmem_schedule(sk, copy))
+                               goto wait_for_memory;
 
-                       from += copy;
-                       copied += copy;
-                       if ((seglen -= copy) == 0 && iovlen == 0) {
-                               tcp_tx_timestamp(sk, skb);
-                               goto out;
+                       err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
+                                                      pfrag->page,
+                                                      pfrag->offset,
+                                                      copy);
+                       if (err)
+                               goto do_error;
+
+                       /* Update the skb. */
+                       if (merge) {
+                               skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
+                       } else {
+                               skb_fill_page_desc(skb, i, pfrag->page,
+                                                  pfrag->offset, copy);
+                               get_page(pfrag->page);
                        }
+                       pfrag->offset += copy;
+               }
 
-                       if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
-                               continue;
+               if (!copied)
+                       TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
+
+               tp->write_seq += copy;
+               TCP_SKB_CB(skb)->end_seq += copy;
+               tcp_skb_pcount_set(skb, 0);
+
+               copied += copy;
+               if (!iov_iter_count(&msg->msg_iter)) {
+                       tcp_tx_timestamp(sk, skb);
+                       goto out;
+               }
 
-                       if (forced_push(tp)) {
-                               tcp_mark_push(tp, skb);
-                               __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
-                       } else if (skb == tcp_send_head(sk))
-                               tcp_push_one(sk, mss_now);
+               if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
                        continue;
 
+               if (forced_push(tp)) {
+                       tcp_mark_push(tp, skb);
+                       __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
+               } else if (skb == tcp_send_head(sk))
+                       tcp_push_one(sk, mss_now);
+               continue;
+
 wait_for_sndbuf:
-                       set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+               set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 wait_for_memory:
-                       if (copied)
-                               tcp_push(sk, flags & ~MSG_MORE, mss_now,
-                                        TCP_NAGLE_PUSH, size_goal);
+               if (copied)
+                       tcp_push(sk, flags & ~MSG_MORE, mss_now,
+                                TCP_NAGLE_PUSH, size_goal);
 
-                       if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
-                               goto do_error;
+               if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
+                       goto do_error;
 
-                       mss_now = tcp_send_mss(sk, &size_goal, flags);
-               }
+               mss_now = tcp_send_mss(sk, &size_goal, flags);
        }
 
 out:
index 20ab06b228ac3bcc599a80cc3dffab202a30e30a..4fcc9a7688499ed13cca430f2d2542c0f92136ee 100644 (file)
@@ -948,7 +948,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
 
        skb_orphan(skb);
        skb->sk = sk;
-       skb->destructor = tcp_wfree;
+       skb->destructor = skb_is_tcp_pure_ack(skb) ? sock_wfree : tcp_wfree;
        skb_set_hash_from_sk(skb, sk);
        atomic_add(skb->truesize, &sk->sk_wmem_alloc);
 
@@ -3055,7 +3055,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_fastopen_request *fo = tp->fastopen_req;
-       int syn_loss = 0, space, err = 0;
+       int syn_loss = 0, space, err = 0, copied;
        unsigned long last_syn_loss = 0;
        struct sk_buff *syn_data;
 
@@ -3093,11 +3093,16 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
                goto fallback;
        syn_data->ip_summed = CHECKSUM_PARTIAL;
        memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
-       if (unlikely(memcpy_fromiovecend(skb_put(syn_data, space),
-                                        fo->data->msg_iter.iov, 0, space))) {
+       copied = copy_from_iter(skb_put(syn_data, space), space,
+                               &fo->data->msg_iter);
+       if (unlikely(!copied)) {
                kfree_skb(syn_data);
                goto fallback;
        }
+       if (copied != space) {
+               skb_trim(syn_data, copied);
+               space = copied;
+       }
 
        /* No more data pending in inet_wait_for_connect() */
        if (space == fo->size)
@@ -3265,6 +3270,14 @@ void tcp_send_ack(struct sock *sk)
        skb_reserve(buff, MAX_TCP_HEADER);
        tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
 
+       /* We do not want pure acks influencing TCP Small Queues or fq/pacing
+        * too much.
+        * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
+        * We also avoid tcp_wfree() overhead (cache line miss accessing
+        * tp->tsq_flags) by using regular sock_wfree()
+        */
+       skb_set_tcp_pure_ack(buff);
+
        /* Send it off, this clears delayed acks for us. */
        skb_mstamp_get(&buff->skb_mstamp);
        tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
index 2d3148378a1f6a7315b135090bf1fed57f181a3b..bd46f736f61d74bcb75a4dabef264154f55a9fb0 100644 (file)
@@ -163,8 +163,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        pfh.icmph.checksum = 0;
        pfh.icmph.un.echo.id = inet->inet_sport;
        pfh.icmph.un.echo.sequence = user_icmph.icmp6_sequence;
-       /* XXX: stripping const */
-       pfh.iov = (struct iovec *)msg->msg_iter.iov;
+       pfh.msg = msg;
        pfh.wcheck = 0;
        pfh.family = AF_INET6;
 
index ee25631f8c293db3db95a0992fa2b319872afb30..dae7f1a1e46481d72e5b61da3cc9990d03c36380 100644 (file)
@@ -609,7 +609,7 @@ out:
        return err;
 }
 
-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
+static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
                        struct flowi6 *fl6, struct dst_entry **dstp,
                        unsigned int flags)
 {
@@ -648,7 +648,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
        skb->ip_summed = CHECKSUM_NONE;
 
        skb->transport_header = skb->network_header;
-       err = memcpy_fromiovecend((void *)iph, from, 0, length);
+       err = memcpy_from_msg(iph, msg, length);
        if (err)
                goto error_fault;
 
@@ -886,8 +886,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
 
 back_from_confirm:
        if (inet->hdrincl)
-               /* XXX: stripping const */
-               err = rawv6_send_hdrinc(sk, (struct iovec *)msg->msg_iter.iov, len, &fl6, &dst, msg->msg_flags);
+               err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, msg->msg_flags);
        else {
                lock_sock(sk);
                err = ip6_append_data(sk, raw6_getfrag, &rfv,
index 75cc6801a4316dbeb25958192f018b8808681096..64a012a0c6e52dba4d026701402e56c6e82f73b5 100644 (file)
@@ -5,6 +5,7 @@ config MAC80211
        select CRYPTO_ARC4
        select CRYPTO_AES
        select CRYPTO_CCM
+       select CRYPTO_GCM
        select CRC32
        select AVERAGE
        ---help---
index e53671b1105e039ad95fadc8c3f96fd090550cc3..3275f01881bee8a53a046e117873347fe04877c8 100644 (file)
@@ -15,7 +15,9 @@ mac80211-y := \
        michael.o \
        tkip.o \
        aes_ccm.o \
+       aes_gcm.o \
        aes_cmac.o \
+       aes_gmac.o \
        cfg.o \
        ethtool.o \
        rx.o \
index 09d9caaec59112f40b060951ae16796388e2e741..7869bb40acaa1acbe60763493a738bf32812bb34 100644 (file)
@@ -20,7 +20,8 @@
 #include "aes_ccm.h"
 
 void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
-                              u8 *data, size_t data_len, u8 *mic)
+                              u8 *data, size_t data_len, u8 *mic,
+                              size_t mic_len)
 {
        struct scatterlist assoc, pt, ct[2];
 
@@ -35,7 +36,7 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
        sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
        sg_init_table(ct, 2);
        sg_set_buf(&ct[0], data, data_len);
-       sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN);
+       sg_set_buf(&ct[1], mic, mic_len);
 
        aead_request_set_tfm(aead_req, tfm);
        aead_request_set_assoc(aead_req, &assoc, assoc.length);
@@ -45,7 +46,8 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
 }
 
 int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
-                             u8 *data, size_t data_len, u8 *mic)
+                             u8 *data, size_t data_len, u8 *mic,
+                             size_t mic_len)
 {
        struct scatterlist assoc, pt, ct[2];
        char aead_req_data[sizeof(struct aead_request) +
@@ -62,17 +64,18 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
        sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
        sg_init_table(ct, 2);
        sg_set_buf(&ct[0], data, data_len);
-       sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN);
+       sg_set_buf(&ct[1], mic, mic_len);
 
        aead_request_set_tfm(aead_req, tfm);
        aead_request_set_assoc(aead_req, &assoc, assoc.length);
-       aead_request_set_crypt(aead_req, ct, &pt,
-                              data_len + IEEE80211_CCMP_MIC_LEN, b_0);
+       aead_request_set_crypt(aead_req, ct, &pt, data_len + mic_len, b_0);
 
        return crypto_aead_decrypt(aead_req);
 }
 
-struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[])
+struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
+                                                   size_t key_len,
+                                                   size_t mic_len)
 {
        struct crypto_aead *tfm;
        int err;
@@ -81,9 +84,9 @@ struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[])
        if (IS_ERR(tfm))
                return tfm;
 
-       err = crypto_aead_setkey(tfm, key, WLAN_KEY_LEN_CCMP);
+       err = crypto_aead_setkey(tfm, key, key_len);
        if (!err)
-               err = crypto_aead_setauthsize(tfm, IEEE80211_CCMP_MIC_LEN);
+               err = crypto_aead_setauthsize(tfm, mic_len);
        if (!err)
                return tfm;
 
index 2c7ab1948a2edba3964a5c0edfb7e941752719f6..6a73d1e4d186d34a00da8c2f8e509985c3805607 100644 (file)
 
 #include <linux/crypto.h>
 
-struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[]);
+struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
+                                                   size_t key_len,
+                                                   size_t mic_len);
 void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
-                              u8 *data, size_t data_len, u8 *mic);
+                              u8 *data, size_t data_len, u8 *mic,
+                              size_t mic_len);
 int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
-                             u8 *data, size_t data_len, u8 *mic);
+                             u8 *data, size_t data_len, u8 *mic,
+                             size_t mic_len);
 void ieee80211_aes_key_free(struct crypto_aead *tfm);
 
 #endif /* AES_CCM_H */
index 9b9009f99551bb18b9613b4c470361d7fded4eb3..4192806be3d36884d22ce5830a43cae63d54745d 100644 (file)
@@ -18,8 +18,8 @@
 #include "key.h"
 #include "aes_cmac.h"
 
-#define AES_CMAC_KEY_LEN 16
 #define CMAC_TLEN 8 /* CMAC TLen = 64 bits (8 octets) */
+#define CMAC_TLEN_256 16 /* CMAC TLen = 128 bits (16 octets) */
 #define AAD_LEN 20
 
 
@@ -35,9 +35,9 @@ static void gf_mulx(u8 *pad)
                pad[AES_BLOCK_SIZE - 1] ^= 0x87;
 }
 
-
-static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
-                               const u8 *addr[], const size_t *len, u8 *mac)
+static void aes_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
+                           const u8 *addr[], const size_t *len, u8 *mac,
+                           size_t mac_len)
 {
        u8 cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE];
        const u8 *pos, *end;
@@ -88,7 +88,7 @@ static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
        for (i = 0; i < AES_BLOCK_SIZE; i++)
                pad[i] ^= cbc[i];
        crypto_cipher_encrypt_one(tfm, pad, pad);
-       memcpy(mac, pad, CMAC_TLEN);
+       memcpy(mac, pad, mac_len);
 }
 
 
@@ -107,17 +107,35 @@ void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad,
        addr[2] = zero;
        len[2] = CMAC_TLEN;
 
-       aes_128_cmac_vector(tfm, 3, addr, len, mic);
+       aes_cmac_vector(tfm, 3, addr, len, mic, CMAC_TLEN);
 }
 
+void ieee80211_aes_cmac_256(struct crypto_cipher *tfm, const u8 *aad,
+                           const u8 *data, size_t data_len, u8 *mic)
+{
+       const u8 *addr[3];
+       size_t len[3];
+       u8 zero[CMAC_TLEN_256];
+
+       memset(zero, 0, CMAC_TLEN_256);
+       addr[0] = aad;
+       len[0] = AAD_LEN;
+       addr[1] = data;
+       len[1] = data_len - CMAC_TLEN_256;
+       addr[2] = zero;
+       len[2] = CMAC_TLEN_256;
+
+       aes_cmac_vector(tfm, 3, addr, len, mic, CMAC_TLEN_256);
+}
 
-struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[])
+struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[],
+                                                  size_t key_len)
 {
        struct crypto_cipher *tfm;
 
        tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
        if (!IS_ERR(tfm))
-               crypto_cipher_setkey(tfm, key, AES_CMAC_KEY_LEN);
+               crypto_cipher_setkey(tfm, key, key_len);
 
        return tfm;
 }
index 0ce6487af79536c03e7cae25639eeacd389f5f18..3702041f44fdb16ce382c701eb53ef9493009f52 100644 (file)
 
 #include <linux/crypto.h>
 
-struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[]);
+struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[],
+                                                  size_t key_len);
 void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad,
                        const u8 *data, size_t data_len, u8 *mic);
+void ieee80211_aes_cmac_256(struct crypto_cipher *tfm, const u8 *aad,
+                           const u8 *data, size_t data_len, u8 *mic);
 void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm);
 
 #endif /* AES_CMAC_H */
diff --git a/net/mac80211/aes_gcm.c b/net/mac80211/aes_gcm.c
new file mode 100644 (file)
index 0000000..c2bf669
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2014-2015, Qualcomm Atheros, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <crypto/aes.h>
+
+#include <net/mac80211.h>
+#include "key.h"
+#include "aes_gcm.h"
+
+void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+                              u8 *data, size_t data_len, u8 *mic)
+{
+       struct scatterlist assoc, pt, ct[2];
+
+       char aead_req_data[sizeof(struct aead_request) +
+                          crypto_aead_reqsize(tfm)]
+               __aligned(__alignof__(struct aead_request));
+       struct aead_request *aead_req = (void *)aead_req_data;
+
+       memset(aead_req, 0, sizeof(aead_req_data));
+
+       sg_init_one(&pt, data, data_len);
+       sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
+       sg_init_table(ct, 2);
+       sg_set_buf(&ct[0], data, data_len);
+       sg_set_buf(&ct[1], mic, IEEE80211_GCMP_MIC_LEN);
+
+       aead_request_set_tfm(aead_req, tfm);
+       aead_request_set_assoc(aead_req, &assoc, assoc.length);
+       aead_request_set_crypt(aead_req, &pt, ct, data_len, j_0);
+
+       crypto_aead_encrypt(aead_req);
+}
+
+int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+                             u8 *data, size_t data_len, u8 *mic)
+{
+       struct scatterlist assoc, pt, ct[2];
+       char aead_req_data[sizeof(struct aead_request) +
+                          crypto_aead_reqsize(tfm)]
+               __aligned(__alignof__(struct aead_request));
+       struct aead_request *aead_req = (void *)aead_req_data;
+
+       if (data_len == 0)
+               return -EINVAL;
+
+       memset(aead_req, 0, sizeof(aead_req_data));
+
+       sg_init_one(&pt, data, data_len);
+       sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
+       sg_init_table(ct, 2);
+       sg_set_buf(&ct[0], data, data_len);
+       sg_set_buf(&ct[1], mic, IEEE80211_GCMP_MIC_LEN);
+
+       aead_request_set_tfm(aead_req, tfm);
+       aead_request_set_assoc(aead_req, &assoc, assoc.length);
+       aead_request_set_crypt(aead_req, ct, &pt,
+                              data_len + IEEE80211_GCMP_MIC_LEN, j_0);
+
+       return crypto_aead_decrypt(aead_req);
+}
+
+struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
+                                                       size_t key_len)
+{
+       struct crypto_aead *tfm;
+       int err;
+
+       tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm))
+               return tfm;
+
+       err = crypto_aead_setkey(tfm, key, key_len);
+       if (!err)
+               err = crypto_aead_setauthsize(tfm, IEEE80211_GCMP_MIC_LEN);
+       if (!err)
+               return tfm;
+
+       crypto_free_aead(tfm);
+       return ERR_PTR(err);
+}
+
+void ieee80211_aes_gcm_key_free(struct crypto_aead *tfm)
+{
+       crypto_free_aead(tfm);
+}
diff --git a/net/mac80211/aes_gcm.h b/net/mac80211/aes_gcm.h
new file mode 100644 (file)
index 0000000..1347fda
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2014-2015, Qualcomm Atheros, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef AES_GCM_H
+#define AES_GCM_H
+
+#include <linux/crypto.h>
+
+void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+                              u8 *data, size_t data_len, u8 *mic);
+int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+                             u8 *data, size_t data_len, u8 *mic);
+struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
+                                                       size_t key_len);
+void ieee80211_aes_gcm_key_free(struct crypto_aead *tfm);
+
+#endif /* AES_GCM_H */
diff --git a/net/mac80211/aes_gmac.c b/net/mac80211/aes_gmac.c
new file mode 100644 (file)
index 0000000..1c72edc
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * AES-GMAC for IEEE 802.11 BIP-GMAC-128 and BIP-GMAC-256
+ * Copyright 2015, Qualcomm Atheros, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <crypto/aes.h>
+
+#include <net/mac80211.h>
+#include "key.h"
+#include "aes_gmac.h"
+
+#define GMAC_MIC_LEN 16
+#define GMAC_NONCE_LEN 12
+#define AAD_LEN 20
+
+int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
+                      const u8 *data, size_t data_len, u8 *mic)
+{
+       struct scatterlist sg[3], ct[1];
+       char aead_req_data[sizeof(struct aead_request) +
+                          crypto_aead_reqsize(tfm)]
+               __aligned(__alignof__(struct aead_request));
+       struct aead_request *aead_req = (void *)aead_req_data;
+       u8 zero[GMAC_MIC_LEN], iv[AES_BLOCK_SIZE];
+
+       if (data_len < GMAC_MIC_LEN)
+               return -EINVAL;
+
+       memset(aead_req, 0, sizeof(aead_req_data));
+
+       memset(zero, 0, GMAC_MIC_LEN);
+       sg_init_table(sg, 3);
+       sg_set_buf(&sg[0], aad, AAD_LEN);
+       sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN);
+       sg_set_buf(&sg[2], zero, GMAC_MIC_LEN);
+
+       memcpy(iv, nonce, GMAC_NONCE_LEN);
+       memset(iv + GMAC_NONCE_LEN, 0, sizeof(iv) - GMAC_NONCE_LEN);
+       iv[AES_BLOCK_SIZE - 1] = 0x01;
+
+       sg_init_table(ct, 1);
+       sg_set_buf(&ct[0], mic, GMAC_MIC_LEN);
+
+       aead_request_set_tfm(aead_req, tfm);
+       aead_request_set_assoc(aead_req, sg, AAD_LEN + data_len);
+       aead_request_set_crypt(aead_req, NULL, ct, 0, iv);
+
+       crypto_aead_encrypt(aead_req);
+
+       return 0;
+}
+
+struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
+                                                size_t key_len)
+{
+       struct crypto_aead *tfm;
+       int err;
+
+       tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm))
+               return tfm;
+
+       err = crypto_aead_setkey(tfm, key, key_len);
+       if (!err)
+               return tfm;
+       if (!err)
+               err = crypto_aead_setauthsize(tfm, GMAC_MIC_LEN);
+
+       crypto_free_aead(tfm);
+       return ERR_PTR(err);
+}
+
+void ieee80211_aes_gmac_key_free(struct crypto_aead *tfm)
+{
+       crypto_free_aead(tfm);
+}
diff --git a/net/mac80211/aes_gmac.h b/net/mac80211/aes_gmac.h
new file mode 100644 (file)
index 0000000..d328204
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2015, Qualcomm Atheros, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef AES_GMAC_H
+#define AES_GMAC_H
+
+#include <linux/crypto.h>
+
+struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
+                                                size_t key_len);
+int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
+                      const u8 *data, size_t data_len, u8 *mic);
+void ieee80211_aes_gmac_key_free(struct crypto_aead *tfm);
+
+#endif /* AES_GMAC_H */
index ff090ef1ea2cdd57d85484b9f24ec4c801873e3e..dd4ff36c557a44158ef64cd18aa090600fec1faf 100644 (file)
@@ -162,8 +162,13 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
                        return -EINVAL;
                break;
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
        case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
                break;
        default:
                cs = ieee80211_cs_get(local, params->cipher, sdata->vif.type);
@@ -348,6 +353,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
                params.seq_len = 6;
                break;
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
                pn64 = atomic64_read(&key->u.ccmp.tx_pn);
                seq[0] = pn64;
                seq[1] = pn64 >> 8;
@@ -359,6 +365,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
                params.seq_len = 6;
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                pn64 = atomic64_read(&key->u.aes_cmac.tx_pn);
                seq[0] = pn64;
                seq[1] = pn64 >> 8;
@@ -369,6 +376,30 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
                params.seq = seq;
                params.seq_len = 6;
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               pn64 = atomic64_read(&key->u.aes_gmac.tx_pn);
+               seq[0] = pn64;
+               seq[1] = pn64 >> 8;
+               seq[2] = pn64 >> 16;
+               seq[3] = pn64 >> 24;
+               seq[4] = pn64 >> 32;
+               seq[5] = pn64 >> 40;
+               params.seq = seq;
+               params.seq_len = 6;
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               pn64 = atomic64_read(&key->u.gcmp.tx_pn);
+               seq[0] = pn64;
+               seq[1] = pn64 >> 8;
+               seq[2] = pn64 >> 16;
+               seq[3] = pn64 >> 24;
+               seq[4] = pn64 >> 32;
+               seq[5] = pn64 >> 40;
+               params.seq = seq;
+               params.seq_len = 6;
+               break;
        }
 
        params.key = key->conf.key;
@@ -2110,6 +2141,8 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
 {
        struct ieee80211_local *local = wiphy_priv(wiphy);
        struct ieee80211_sub_if_data *sdata;
+       enum nl80211_tx_power_setting txp_type = type;
+       bool update_txp_type = false;
 
        if (wdev) {
                sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
@@ -2117,6 +2150,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
                switch (type) {
                case NL80211_TX_POWER_AUTOMATIC:
                        sdata->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
+                       txp_type = NL80211_TX_POWER_LIMITED;
                        break;
                case NL80211_TX_POWER_LIMITED:
                case NL80211_TX_POWER_FIXED:
@@ -2126,7 +2160,12 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
                        break;
                }
 
-               ieee80211_recalc_txpower(sdata);
+               if (txp_type != sdata->vif.bss_conf.txpower_type) {
+                       update_txp_type = true;
+                       sdata->vif.bss_conf.txpower_type = txp_type;
+               }
+
+               ieee80211_recalc_txpower(sdata, update_txp_type);
 
                return 0;
        }
@@ -2134,6 +2173,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
        switch (type) {
        case NL80211_TX_POWER_AUTOMATIC:
                local->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
+               txp_type = NL80211_TX_POWER_LIMITED;
                break;
        case NL80211_TX_POWER_LIMITED:
        case NL80211_TX_POWER_FIXED:
@@ -2144,10 +2184,14 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
        }
 
        mutex_lock(&local->iflist_mtx);
-       list_for_each_entry(sdata, &local->interfaces, list)
+       list_for_each_entry(sdata, &local->interfaces, list) {
                sdata->user_power_level = local->user_power_level;
+               if (txp_type != sdata->vif.bss_conf.txpower_type)
+                       update_txp_type = true;
+               sdata->vif.bss_conf.txpower_type = txp_type;
+       }
        list_for_each_entry(sdata, &local->interfaces, list)
-               ieee80211_recalc_txpower(sdata);
+               ieee80211_recalc_txpower(sdata, update_txp_type);
        mutex_unlock(&local->iflist_mtx);
 
        return 0;
index 35b11e11e0c49ba81cb4ecdc9b3624e02628f503..ff0d2db09df9db467a5831606971e02f2fe6d410 100644 (file)
@@ -655,7 +655,7 @@ out:
        }
 
        if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) {
-               ieee80211_recalc_txpower(sdata);
+               ieee80211_recalc_txpower(sdata, false);
                ieee80211_recalc_chanctx_min_def(local, new_ctx);
        }
 
@@ -1387,7 +1387,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
                                ieee80211_bss_info_change_notify(sdata,
                                                                 changed);
 
-                       ieee80211_recalc_txpower(sdata);
+                       ieee80211_recalc_txpower(sdata, false);
                }
 
                ieee80211_recalc_chanctx_chantype(local, ctx);
index 5523b94c7c908f89e7d489903f486a99d738e7d3..71ac1b5f4da5632ab64893dfc2305c77a5a8b4d7 100644 (file)
@@ -94,17 +94,33 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
                                key->u.tkip.tx.iv16);
                break;
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
                pn = atomic64_read(&key->u.ccmp.tx_pn);
                len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
                                (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
                                (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                pn = atomic64_read(&key->u.aes_cmac.tx_pn);
                len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
                                (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
                                (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               pn = atomic64_read(&key->u.aes_gmac.tx_pn);
+               len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
+                               (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
+                               (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               pn = atomic64_read(&key->u.gcmp.tx_pn);
+               len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
+                               (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
+                               (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
+               break;
        default:
                return 0;
        }
@@ -134,6 +150,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
                len = p - buf;
                break;
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
                for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) {
                        rpn = key->u.ccmp.rx_pn[i];
                        p += scnprintf(p, sizeof(buf)+buf-p,
@@ -144,6 +161,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
                len = p - buf;
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                rpn = key->u.aes_cmac.rx_pn;
                p += scnprintf(p, sizeof(buf)+buf-p,
                               "%02x%02x%02x%02x%02x%02x\n",
@@ -151,6 +169,26 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
                               rpn[3], rpn[4], rpn[5]);
                len = p - buf;
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               rpn = key->u.aes_gmac.rx_pn;
+               p += scnprintf(p, sizeof(buf)+buf-p,
+                              "%02x%02x%02x%02x%02x%02x\n",
+                              rpn[0], rpn[1], rpn[2],
+                              rpn[3], rpn[4], rpn[5]);
+               len = p - buf;
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) {
+                       rpn = key->u.gcmp.rx_pn[i];
+                       p += scnprintf(p, sizeof(buf)+buf-p,
+                                      "%02x%02x%02x%02x%02x%02x\n",
+                                      rpn[0], rpn[1], rpn[2],
+                                      rpn[3], rpn[4], rpn[5]);
+               }
+               len = p - buf;
+               break;
        default:
                return 0;
        }
@@ -167,12 +205,23 @@ static ssize_t key_replays_read(struct file *file, char __user *userbuf,
 
        switch (key->conf.cipher) {
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
                len = scnprintf(buf, sizeof(buf), "%u\n", key->u.ccmp.replays);
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                len = scnprintf(buf, sizeof(buf), "%u\n",
                                key->u.aes_cmac.replays);
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               len = scnprintf(buf, sizeof(buf), "%u\n",
+                               key->u.aes_gmac.replays);
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               len = scnprintf(buf, sizeof(buf), "%u\n", key->u.gcmp.replays);
+               break;
        default:
                return 0;
        }
@@ -189,9 +238,15 @@ static ssize_t key_icverrors_read(struct file *file, char __user *userbuf,
 
        switch (key->conf.cipher) {
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                len = scnprintf(buf, sizeof(buf), "%u\n",
                                key->u.aes_cmac.icverrors);
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               len = scnprintf(buf, sizeof(buf), "%u\n",
+                               key->u.aes_gmac.icverrors);
+               break;
        default:
                return 0;
        }
index 156ea79e01579405de37480fdb0797768f675ecf..3afe36824703f49dcfe374e103b6372a9851b8aa 100644 (file)
@@ -1621,7 +1621,8 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local);
 void ieee80211_del_virtual_monitor(struct ieee80211_local *local);
 
 bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
-void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
+void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata,
+                             bool update_bss);
 
 static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
 {
@@ -1751,7 +1752,8 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
 {
        struct ieee80211_local *local = hw_to_local(hw);
 
-       WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
+       WARN(test_bit(SCAN_HW_SCANNING, &local->scanning) &&
+            !test_bit(SCAN_COMPLETED, &local->scanning),
                "%s: resume with hardware scan still in progress\n",
                wiphy_name(hw->wiphy));
 
@@ -1885,6 +1887,36 @@ void __ieee80211_flush_queues(struct ieee80211_local *local,
                              struct ieee80211_sub_if_data *sdata,
                              unsigned int queues, bool drop);
 
+static inline bool ieee80211_can_run_worker(struct ieee80211_local *local)
+{
+       /*
+        * If quiescing is set, we are racing with __ieee80211_suspend.
+        * __ieee80211_suspend flushes the workers after setting quiescing,
+        * and we check quiescing / suspended before enqueing new workers.
+        * We should abort the worker to avoid the races below.
+        */
+       if (local->quiescing)
+               return false;
+
+       /*
+        * We might already be suspended if the following scenario occurs:
+        * __ieee80211_suspend          Control path
+        *
+        *                              if (local->quiescing)
+        *                                      return;
+        * local->quiescing = true;
+        * flush_workqueue();
+        *                              queue_work(...);
+        * local->suspended = true;
+        * local->quiescing = false;
+        *                              worker starts running...
+        */
+       if (local->suspended)
+               return false;
+
+       return true;
+}
+
 void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
                         u16 transaction, u16 auth_alg, u16 status,
                         const u8 *extra, size_t extra_len, const u8 *bssid,
index 677422e11e075137ce0c85bb6819ce0db17b187b..81a27516813e2f3473bec783ef9253b54967e62d 100644 (file)
@@ -73,9 +73,10 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
        return false;
 }
 
-void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
+void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata,
+                             bool update_bss)
 {
-       if (__ieee80211_recalc_txpower(sdata))
+       if (__ieee80211_recalc_txpower(sdata) || update_bss)
                ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);
 }
 
@@ -1169,12 +1170,7 @@ static void ieee80211_iface_work(struct work_struct *work)
        if (local->scanning)
                return;
 
-       /*
-        * ieee80211_queue_work() should have picked up most cases,
-        * here we'll pick the rest.
-        */
-       if (WARN(local->suspended,
-                "interface work scheduled while going to suspend\n"))
+       if (!ieee80211_can_run_worker(local))
                return;
 
        /* first process frames */
index f8d9f0ee59bf1b549ba18c4be160bbf3533e2249..0825d76edcfc81d93c4afa70ec81e157a7ecc2d2 100644 (file)
@@ -24,6 +24,8 @@
 #include "debugfs_key.h"
 #include "aes_ccm.h"
 #include "aes_cmac.h"
+#include "aes_gmac.h"
+#include "aes_gcm.h"
 
 
 /**
@@ -90,7 +92,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
 {
        struct ieee80211_sub_if_data *sdata;
        struct sta_info *sta;
-       int ret;
+       int ret = -EOPNOTSUPP;
 
        might_sleep();
 
@@ -150,7 +152,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
                return 0;
        }
 
-       if (ret != -ENOSPC && ret != -EOPNOTSUPP)
+       if (ret != -ENOSPC && ret != -EOPNOTSUPP && ret != 1)
                sdata_err(sdata,
                          "failed to set key (%d, %pM) to hardware (%d)\n",
                          key->conf.keyidx,
@@ -162,8 +164,18 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
        case WLAN_CIPHER_SUITE_WEP104:
        case WLAN_CIPHER_SUITE_TKIP:
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
        case WLAN_CIPHER_SUITE_AES_CMAC:
-               /* all of these we can do in software */
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               /* all of these we can do in software - if driver can */
+               if (ret == 1)
+                       return 0;
+               if (key->local->hw.flags & IEEE80211_HW_SW_CRYPTO_CONTROL)
+                       return -EINVAL;
                return 0;
        default:
                return -EINVAL;
@@ -382,7 +394,26 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
                 * Initialize AES key state here as an optimization so that
                 * it does not need to be initialized for every packet.
                 */
-               key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(key_data);
+               key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(
+                       key_data, key_len, IEEE80211_CCMP_MIC_LEN);
+               if (IS_ERR(key->u.ccmp.tfm)) {
+                       err = PTR_ERR(key->u.ccmp.tfm);
+                       kfree(key);
+                       return ERR_PTR(err);
+               }
+               break;
+       case WLAN_CIPHER_SUITE_CCMP_256:
+               key->conf.iv_len = IEEE80211_CCMP_256_HDR_LEN;
+               key->conf.icv_len = IEEE80211_CCMP_256_MIC_LEN;
+               for (i = 0; seq && i < IEEE80211_NUM_TIDS + 1; i++)
+                       for (j = 0; j < IEEE80211_CCMP_256_PN_LEN; j++)
+                               key->u.ccmp.rx_pn[i][j] =
+                                       seq[IEEE80211_CCMP_256_PN_LEN - j - 1];
+               /* Initialize AES key state here as an optimization so that
+                * it does not need to be initialized for every packet.
+                */
+               key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(
+                       key_data, key_len, IEEE80211_CCMP_256_MIC_LEN);
                if (IS_ERR(key->u.ccmp.tfm)) {
                        err = PTR_ERR(key->u.ccmp.tfm);
                        kfree(key);
@@ -390,8 +421,12 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
                }
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                key->conf.iv_len = 0;
-               key->conf.icv_len = sizeof(struct ieee80211_mmie);
+               if (cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+                       key->conf.icv_len = sizeof(struct ieee80211_mmie);
+               else
+                       key->conf.icv_len = sizeof(struct ieee80211_mmie_16);
                if (seq)
                        for (j = 0; j < IEEE80211_CMAC_PN_LEN; j++)
                                key->u.aes_cmac.rx_pn[j] =
@@ -401,13 +436,51 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
                 * it does not need to be initialized for every packet.
                 */
                key->u.aes_cmac.tfm =
-                       ieee80211_aes_cmac_key_setup(key_data);
+                       ieee80211_aes_cmac_key_setup(key_data, key_len);
                if (IS_ERR(key->u.aes_cmac.tfm)) {
                        err = PTR_ERR(key->u.aes_cmac.tfm);
                        kfree(key);
                        return ERR_PTR(err);
                }
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               key->conf.iv_len = 0;
+               key->conf.icv_len = sizeof(struct ieee80211_mmie_16);
+               if (seq)
+                       for (j = 0; j < IEEE80211_GMAC_PN_LEN; j++)
+                               key->u.aes_gmac.rx_pn[j] =
+                                       seq[IEEE80211_GMAC_PN_LEN - j - 1];
+               /* Initialize AES key state here as an optimization so that
+                * it does not need to be initialized for every packet.
+                */
+               key->u.aes_gmac.tfm =
+                       ieee80211_aes_gmac_key_setup(key_data, key_len);
+               if (IS_ERR(key->u.aes_gmac.tfm)) {
+                       err = PTR_ERR(key->u.aes_gmac.tfm);
+                       kfree(key);
+                       return ERR_PTR(err);
+               }
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               key->conf.iv_len = IEEE80211_GCMP_HDR_LEN;
+               key->conf.icv_len = IEEE80211_GCMP_MIC_LEN;
+               for (i = 0; seq && i < IEEE80211_NUM_TIDS + 1; i++)
+                       for (j = 0; j < IEEE80211_GCMP_PN_LEN; j++)
+                               key->u.gcmp.rx_pn[i][j] =
+                                       seq[IEEE80211_GCMP_PN_LEN - j - 1];
+               /* Initialize AES key state here as an optimization so that
+                * it does not need to be initialized for every packet.
+                */
+               key->u.gcmp.tfm = ieee80211_aes_gcm_key_setup_encrypt(key_data,
+                                                                     key_len);
+               if (IS_ERR(key->u.gcmp.tfm)) {
+                       err = PTR_ERR(key->u.gcmp.tfm);
+                       kfree(key);
+                       return ERR_PTR(err);
+               }
+               break;
        default:
                if (cs) {
                        size_t len = (seq_len > MAX_PN_LEN) ?
@@ -429,10 +502,24 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
 
 static void ieee80211_key_free_common(struct ieee80211_key *key)
 {
-       if (key->conf.cipher == WLAN_CIPHER_SUITE_CCMP)
+       switch (key->conf.cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
                ieee80211_aes_key_free(key->u.ccmp.tfm);
-       if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+               break;
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
+               break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               ieee80211_aes_gmac_key_free(key->u.aes_gmac.tfm);
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               ieee80211_aes_gcm_key_free(key->u.gcmp.tfm);
+               break;
+       }
        kzfree(key);
 }
 
@@ -739,6 +826,7 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
                seq->tkip.iv16 = key->u.tkip.tx.iv16;
                break;
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
                pn64 = atomic64_read(&key->u.ccmp.tx_pn);
                seq->ccmp.pn[5] = pn64;
                seq->ccmp.pn[4] = pn64 >> 8;
@@ -748,6 +836,7 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
                seq->ccmp.pn[0] = pn64 >> 40;
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                pn64 = atomic64_read(&key->u.aes_cmac.tx_pn);
                seq->ccmp.pn[5] = pn64;
                seq->ccmp.pn[4] = pn64 >> 8;
@@ -756,6 +845,26 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
                seq->ccmp.pn[1] = pn64 >> 32;
                seq->ccmp.pn[0] = pn64 >> 40;
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               pn64 = atomic64_read(&key->u.aes_gmac.tx_pn);
+               seq->ccmp.pn[5] = pn64;
+               seq->ccmp.pn[4] = pn64 >> 8;
+               seq->ccmp.pn[3] = pn64 >> 16;
+               seq->ccmp.pn[2] = pn64 >> 24;
+               seq->ccmp.pn[1] = pn64 >> 32;
+               seq->ccmp.pn[0] = pn64 >> 40;
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               pn64 = atomic64_read(&key->u.gcmp.tx_pn);
+               seq->gcmp.pn[5] = pn64;
+               seq->gcmp.pn[4] = pn64 >> 8;
+               seq->gcmp.pn[3] = pn64 >> 16;
+               seq->gcmp.pn[2] = pn64 >> 24;
+               seq->gcmp.pn[1] = pn64 >> 32;
+               seq->gcmp.pn[0] = pn64 >> 40;
+               break;
        default:
                WARN_ON(1);
        }
@@ -778,6 +887,7 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
                seq->tkip.iv16 = key->u.tkip.rx[tid].iv16;
                break;
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
                if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
                        return;
                if (tid < 0)
@@ -787,11 +897,29 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
                memcpy(seq->ccmp.pn, pn, IEEE80211_CCMP_PN_LEN);
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                if (WARN_ON(tid != 0))
                        return;
                pn = key->u.aes_cmac.rx_pn;
                memcpy(seq->aes_cmac.pn, pn, IEEE80211_CMAC_PN_LEN);
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               if (WARN_ON(tid != 0))
+                       return;
+               pn = key->u.aes_gmac.rx_pn;
+               memcpy(seq->aes_gmac.pn, pn, IEEE80211_GMAC_PN_LEN);
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
+                       return;
+               if (tid < 0)
+                       pn = key->u.gcmp.rx_pn[IEEE80211_NUM_TIDS];
+               else
+                       pn = key->u.gcmp.rx_pn[tid];
+               memcpy(seq->gcmp.pn, pn, IEEE80211_GCMP_PN_LEN);
+               break;
        }
 }
 EXPORT_SYMBOL(ieee80211_get_key_rx_seq);
@@ -810,6 +938,7 @@ void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
                key->u.tkip.tx.iv16 = seq->tkip.iv16;
                break;
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
                pn64 = (u64)seq->ccmp.pn[5] |
                       ((u64)seq->ccmp.pn[4] << 8) |
                       ((u64)seq->ccmp.pn[3] << 16) |
@@ -819,6 +948,7 @@ void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
                atomic64_set(&key->u.ccmp.tx_pn, pn64);
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                pn64 = (u64)seq->aes_cmac.pn[5] |
                       ((u64)seq->aes_cmac.pn[4] << 8) |
                       ((u64)seq->aes_cmac.pn[3] << 16) |
@@ -827,6 +957,26 @@ void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
                       ((u64)seq->aes_cmac.pn[0] << 40);
                atomic64_set(&key->u.aes_cmac.tx_pn, pn64);
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               pn64 = (u64)seq->aes_gmac.pn[5] |
+                      ((u64)seq->aes_gmac.pn[4] << 8) |
+                      ((u64)seq->aes_gmac.pn[3] << 16) |
+                      ((u64)seq->aes_gmac.pn[2] << 24) |
+                      ((u64)seq->aes_gmac.pn[1] << 32) |
+                      ((u64)seq->aes_gmac.pn[0] << 40);
+               atomic64_set(&key->u.aes_gmac.tx_pn, pn64);
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               pn64 = (u64)seq->gcmp.pn[5] |
+                      ((u64)seq->gcmp.pn[4] << 8) |
+                      ((u64)seq->gcmp.pn[3] << 16) |
+                      ((u64)seq->gcmp.pn[2] << 24) |
+                      ((u64)seq->gcmp.pn[1] << 32) |
+                      ((u64)seq->gcmp.pn[0] << 40);
+               atomic64_set(&key->u.gcmp.tx_pn, pn64);
+               break;
        default:
                WARN_ON(1);
                break;
@@ -850,6 +1000,7 @@ void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
                key->u.tkip.rx[tid].iv16 = seq->tkip.iv16;
                break;
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
                if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
                        return;
                if (tid < 0)
@@ -859,11 +1010,29 @@ void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
                memcpy(pn, seq->ccmp.pn, IEEE80211_CCMP_PN_LEN);
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                if (WARN_ON(tid != 0))
                        return;
                pn = key->u.aes_cmac.rx_pn;
                memcpy(pn, seq->aes_cmac.pn, IEEE80211_CMAC_PN_LEN);
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               if (WARN_ON(tid != 0))
+                       return;
+               pn = key->u.aes_gmac.rx_pn;
+               memcpy(pn, seq->aes_gmac.pn, IEEE80211_GMAC_PN_LEN);
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
+                       return;
+               if (tid < 0)
+                       pn = key->u.gcmp.rx_pn[IEEE80211_NUM_TIDS];
+               else
+                       pn = key->u.gcmp.rx_pn[tid];
+               memcpy(pn, seq->gcmp.pn, IEEE80211_GCMP_PN_LEN);
+               break;
        default:
                WARN_ON(1);
                break;
index 19db68663d7555461768eeae45a0afad8b0b163b..d57a9915494f94eb44bfbf3ed609286d8eb9907f 100644 (file)
@@ -94,6 +94,24 @@ struct ieee80211_key {
                        u32 replays; /* dot11RSNAStatsCMACReplays */
                        u32 icverrors; /* dot11RSNAStatsCMACICVErrors */
                } aes_cmac;
+               struct {
+                       atomic64_t tx_pn;
+                       u8 rx_pn[IEEE80211_GMAC_PN_LEN];
+                       struct crypto_aead *tfm;
+                       u32 replays; /* dot11RSNAStatsCMACReplays */
+                       u32 icverrors; /* dot11RSNAStatsCMACICVErrors */
+               } aes_gmac;
+               struct {
+                       atomic64_t tx_pn;
+                       /* Last received packet number. The first
+                        * IEEE80211_NUM_TIDS counters are used with Data
+                        * frames and the last counter is used with Robust
+                        * Management frames.
+                        */
+                       u8 rx_pn[IEEE80211_NUM_TIDS + 1][IEEE80211_GCMP_PN_LEN];
+                       struct crypto_aead *tfm;
+                       u32 replays; /* dot11RSNAStatsGCMPReplays */
+               } gcmp;
                struct {
                        /* generic cipher scheme */
                        u8 rx_pn[IEEE80211_NUM_TIDS + 1][MAX_PN_LEN];
index d9ce33663c736f732b18906f90aba0c82c1c25c5..5e09d354c5a52f25a373740cbd54b8dddaf841df 100644 (file)
@@ -658,7 +658,6 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
        bool have_wep = !(IS_ERR(local->wep_tx_tfm) ||
                          IS_ERR(local->wep_rx_tfm));
        bool have_mfp = local->hw.flags & IEEE80211_HW_MFP_CAPABLE;
-       const struct ieee80211_cipher_scheme *cs = local->hw.cipher_schemes;
        int n_suites = 0, r = 0, w = 0;
        u32 *suites;
        static const u32 cipher_suites[] = {
@@ -667,79 +666,109 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
                WLAN_CIPHER_SUITE_WEP104,
                WLAN_CIPHER_SUITE_TKIP,
                WLAN_CIPHER_SUITE_CCMP,
+               WLAN_CIPHER_SUITE_CCMP_256,
+               WLAN_CIPHER_SUITE_GCMP,
+               WLAN_CIPHER_SUITE_GCMP_256,
 
                /* keep last -- depends on hw flags! */
-               WLAN_CIPHER_SUITE_AES_CMAC
+               WLAN_CIPHER_SUITE_AES_CMAC,
+               WLAN_CIPHER_SUITE_BIP_CMAC_256,
+               WLAN_CIPHER_SUITE_BIP_GMAC_128,
+               WLAN_CIPHER_SUITE_BIP_GMAC_256,
        };
 
-       /* Driver specifies the ciphers, we have nothing to do... */
-       if (local->hw.wiphy->cipher_suites && have_wep)
-               return 0;
+       if (local->hw.flags & IEEE80211_HW_SW_CRYPTO_CONTROL ||
+           local->hw.wiphy->cipher_suites) {
+               /* If the driver advertises, or doesn't support SW crypto,
+                * we only need to remove WEP if necessary.
+                */
+               if (have_wep)
+                       return 0;
+
+               /* well if it has _no_ ciphers ... fine */
+               if (!local->hw.wiphy->n_cipher_suites)
+                       return 0;
+
+               /* Driver provides cipher suites, but we need to exclude WEP */
+               suites = kmemdup(local->hw.wiphy->cipher_suites,
+                                sizeof(u32) * local->hw.wiphy->n_cipher_suites,
+                                GFP_KERNEL);
+               if (!suites)
+                       return -ENOMEM;
+
+               for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) {
+                       u32 suite = local->hw.wiphy->cipher_suites[r];
 
-       /* Set up cipher suites if driver relies on mac80211 cipher defs */
-       if (!local->hw.wiphy->cipher_suites && !cs) {
+                       if (suite == WLAN_CIPHER_SUITE_WEP40 ||
+                           suite == WLAN_CIPHER_SUITE_WEP104)
+                               continue;
+                       suites[w++] = suite;
+               }
+       } else if (!local->hw.cipher_schemes) {
+               /* If the driver doesn't have cipher schemes, there's nothing
+                * else to do other than assign the (software supported and
+                * perhaps offloaded) cipher suites.
+                */
                local->hw.wiphy->cipher_suites = cipher_suites;
                local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
 
                if (!have_mfp)
-                       local->hw.wiphy->n_cipher_suites--;
+                       local->hw.wiphy->n_cipher_suites -= 4;
 
                if (!have_wep) {
                        local->hw.wiphy->cipher_suites += 2;
                        local->hw.wiphy->n_cipher_suites -= 2;
                }
 
+               /* not dynamically allocated, so just return */
                return 0;
-       }
+       } else {
+               const struct ieee80211_cipher_scheme *cs;
 
-       if (!local->hw.wiphy->cipher_suites) {
-               /*
-                * Driver specifies cipher schemes only
-                * We start counting ciphers defined by schemes, TKIP and CCMP
+               cs = local->hw.cipher_schemes;
+
+               /* Driver specifies cipher schemes only (but not cipher suites
+                * including the schemes)
+                *
+                * We start counting ciphers defined by schemes, TKIP, CCMP,
+                * CCMP-256, GCMP, and GCMP-256
                 */
-               n_suites = local->hw.n_cipher_schemes + 2;
+               n_suites = local->hw.n_cipher_schemes + 5;
 
                /* check if we have WEP40 and WEP104 */
                if (have_wep)
                        n_suites += 2;
 
-               /* check if we have AES_CMAC */
+               /* check if we have AES_CMAC, BIP-CMAC-256, BIP-GMAC-128,
+                * BIP-GMAC-256
+                */
                if (have_mfp)
-                       n_suites++;
+                       n_suites += 4;
 
                suites = kmalloc(sizeof(u32) * n_suites, GFP_KERNEL);
                if (!suites)
                        return -ENOMEM;
 
                suites[w++] = WLAN_CIPHER_SUITE_CCMP;
+               suites[w++] = WLAN_CIPHER_SUITE_CCMP_256;
                suites[w++] = WLAN_CIPHER_SUITE_TKIP;
+               suites[w++] = WLAN_CIPHER_SUITE_GCMP;
+               suites[w++] = WLAN_CIPHER_SUITE_GCMP_256;
 
                if (have_wep) {
                        suites[w++] = WLAN_CIPHER_SUITE_WEP40;
                        suites[w++] = WLAN_CIPHER_SUITE_WEP104;
                }
 
-               if (have_mfp)
+               if (have_mfp) {
                        suites[w++] = WLAN_CIPHER_SUITE_AES_CMAC;
+                       suites[w++] = WLAN_CIPHER_SUITE_BIP_CMAC_256;
+                       suites[w++] = WLAN_CIPHER_SUITE_BIP_GMAC_128;
+                       suites[w++] = WLAN_CIPHER_SUITE_BIP_GMAC_256;
+               }
 
                for (r = 0; r < local->hw.n_cipher_schemes; r++)
                        suites[w++] = cs[r].cipher;
-       } else {
-               /* Driver provides cipher suites, but we need to exclude WEP */
-               suites = kmemdup(local->hw.wiphy->cipher_suites,
-                                sizeof(u32) * local->hw.wiphy->n_cipher_suites,
-                                GFP_KERNEL);
-               if (!suites)
-                       return -ENOMEM;
-
-               for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) {
-                       u32 suite = local->hw.wiphy->cipher_suites[r];
-
-                       if (suite == WLAN_CIPHER_SUITE_WEP40 ||
-                           suite == WLAN_CIPHER_SUITE_WEP104)
-                               continue;
-                       suites[w++] = suite;
-               }
        }
 
        local->hw.wiphy->cipher_suites = suites;
@@ -1041,10 +1070,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                ieee80211_max_network_latency;
        result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
                                     &local->network_latency_notifier);
-       if (result) {
-               rtnl_lock();
+       if (result)
                goto fail_pm_qos;
-       }
 
 #ifdef CONFIG_INET
        local->ifa_notifier.notifier_call = ieee80211_ifa_changed;
@@ -1072,15 +1099,15 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
  fail_ifa:
        pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
                               &local->network_latency_notifier);
-       rtnl_lock();
 #endif
  fail_pm_qos:
-       ieee80211_led_exit(local);
+       rtnl_lock();
+       rate_control_deinitialize(local);
        ieee80211_remove_interfaces(local);
  fail_rate:
        rtnl_unlock();
+       ieee80211_led_exit(local);
        ieee80211_wep_free(local);
-       sta_info_stop(local);
        destroy_workqueue(local->workqueue);
  fail_workqueue:
        wiphy_unregister(local->hw.wiphy);
@@ -1176,6 +1203,8 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
 
        kfree(rcu_access_pointer(local->tx_latency));
 
+       sta_info_stop(local);
+
        wiphy_free(local->hw.wiphy);
 }
 EXPORT_SYMBOL(ieee80211_free_hw);
index fa94ca15ba95ba5cefac929a3031541f6a606d1f..b488e1859b18e8ed7797cffbb5ab2319138fdb28 100644 (file)
@@ -523,13 +523,6 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
            sdata->u.mesh.mshcfg.auto_open_plinks &&
            rssi_threshold_check(sdata, sta))
                changed = mesh_plink_open(sta);
-       else if (sta->plink_state == NL80211_PLINK_LISTEN &&
-                (sdata->u.mesh.user_mpm ||
-                 sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED))
-               cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr,
-                                                  elems->ie_start,
-                                                  elems->total_len,
-                                                  GFP_ATOMIC);
 
        ieee80211_mps_frame_release(sta, elems);
 out:
index c1460e635c7fad42951494de88a0c396d5bbc98c..10ac6324c1d014c708749748ce89ef31055561cf 100644 (file)
@@ -2011,6 +2011,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        /* disable per-vif ps */
        ieee80211_recalc_ps_vif(sdata);
 
+       /* make sure ongoing transmission finishes */
+       synchronize_net();
+
        /*
         * drop any frame before deauth/disassoc, this can be data or
         * management frame. Since we are disconnecting, we should not
index 9491e8689a9d4fb0cd230b6b27da9000942dce43..1101563357eae365f1e1a1df926ecf36fdc0570b 100644 (file)
@@ -647,6 +647,7 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
 {
        struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
        struct ieee80211_mmie *mmie;
+       struct ieee80211_mmie_16 *mmie16;
 
        if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
                return -1;
@@ -656,11 +657,18 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
 
        mmie = (struct ieee80211_mmie *)
                (skb->data + skb->len - sizeof(*mmie));
-       if (mmie->element_id != WLAN_EID_MMIE ||
-           mmie->length != sizeof(*mmie) - 2)
-               return -1;
-
-       return le16_to_cpu(mmie->key_id);
+       if (mmie->element_id == WLAN_EID_MMIE &&
+           mmie->length == sizeof(*mmie) - 2)
+               return le16_to_cpu(mmie->key_id);
+
+       mmie16 = (struct ieee80211_mmie_16 *)
+               (skb->data + skb->len - sizeof(*mmie16));
+       if (skb->len >= 24 + sizeof(*mmie16) &&
+           mmie16->element_id == WLAN_EID_MMIE &&
+           mmie16->length == sizeof(*mmie16) - 2)
+               return le16_to_cpu(mmie16->key_id);
+
+       return -1;
 }
 
 static int iwl80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs,
@@ -1650,11 +1658,27 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
                result = ieee80211_crypto_tkip_decrypt(rx);
                break;
        case WLAN_CIPHER_SUITE_CCMP:
-               result = ieee80211_crypto_ccmp_decrypt(rx);
+               result = ieee80211_crypto_ccmp_decrypt(
+                       rx, IEEE80211_CCMP_MIC_LEN);
+               break;
+       case WLAN_CIPHER_SUITE_CCMP_256:
+               result = ieee80211_crypto_ccmp_decrypt(
+                       rx, IEEE80211_CCMP_256_MIC_LEN);
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
                result = ieee80211_crypto_aes_cmac_decrypt(rx);
                break;
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+               result = ieee80211_crypto_aes_cmac_256_decrypt(rx);
+               break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               result = ieee80211_crypto_aes_gmac_decrypt(rx);
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               result = ieee80211_crypto_gcmp_decrypt(rx);
+               break;
        default:
                result = ieee80211_crypto_hw_decrypt(rx);
        }
@@ -1781,7 +1805,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
                /* This is the first fragment of a new frame. */
                entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
                                                 rx->seqno_idx, &(rx->skb));
-               if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
+               if (rx->key &&
+                   (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
+                    rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256) &&
                    ieee80211_has_protected(fc)) {
                        int queue = rx->security_idx;
                        /* Store CCMP PN so that we can verify that the next
@@ -1810,7 +1836,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
                int i;
                u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
                int queue;
-               if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
+               if (!rx->key ||
+                   (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
+                    rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256))
                        return RX_DROP_UNUSABLE;
                memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
                for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
@@ -2310,12 +2338,12 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
                return RX_DROP_MONITOR;
 
        if (rx->sta) {
-               /* The security index has the same property as needed
+               /* The seqno index has the same property as needed
                 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
                 * for non-QoS-data frames. Here we know it's a data
                 * frame, so count MSDUs.
                 */
-               rx->sta->rx_msdu[rx->security_idx]++;
+               rx->sta->rx_msdu[rx->seqno_idx]++;
        }
 
        /*
index 7807fa42ed3f4fcd8fc6b066cbf89cd987e1e331..05f0d711b6d8666701e91262141fb67711d9dad7 100644 (file)
@@ -828,6 +828,11 @@ void ieee80211_scan_work(struct work_struct *work)
 
        mutex_lock(&local->mtx);
 
+       if (!ieee80211_can_run_worker(local)) {
+               aborted = true;
+               goto out_complete;
+       }
+
        sdata = rcu_dereference_protected(local->scan_sdata,
                                          lockdep_is_held(&local->mtx));
        scan_req = rcu_dereference_protected(local->scan_req,
index 79383ef0c26405eeff3dff62e48ffe364ff16646..00ca8dcc2bcf2d924fb24ed0d4ee674295086aff 100644 (file)
@@ -1764,6 +1764,13 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
 
        sinfo->generation = sdata->local->sta_generation;
 
+       /* do before driver, so beacon filtering drivers have a
+        * chance to e.g. just add the number of filtered beacons
+        * (or just modify the value entirely, of course)
+        */
+       if (sdata->vif.type == NL80211_IFTYPE_STATION)
+               sinfo->rx_beacon = sdata->u.mgd.count_beacon_signal;
+
        drv_sta_statistics(local, sdata, &sta->sta, sinfo);
 
        sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME) |
@@ -1816,6 +1823,13 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
        sinfo->rx_dropped_misc = sta->rx_dropped;
        sinfo->beacon_loss_count = sta->beacon_loss_count;
 
+       if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+           !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) {
+               sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX) |
+                                BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
+               sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif);
+       }
+
        if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
            (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
                if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL))) {
index 917088dfd69659de6614b327cc0ee23864e429cd..c9f9752217ac8230056e90e28a9b0b02883a87d5 100644 (file)
@@ -345,24 +345,24 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
         */
        sband = local->hw.wiphy->bands[band];
        memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
-       if ((action_code == WLAN_TDLS_SETUP_REQUEST ||
-            action_code == WLAN_TDLS_SETUP_RESPONSE) &&
-           ht_cap.ht_supported && (!sta || sta->sta.ht_cap.ht_supported)) {
-               if (action_code == WLAN_TDLS_SETUP_REQUEST) {
-                       ieee80211_apply_htcap_overrides(sdata, &ht_cap);
-
-                       /* disable SMPS in TDLS initiator */
-                       ht_cap.cap |= (WLAN_HT_CAP_SM_PS_DISABLED
-                                      << IEEE80211_HT_CAP_SM_PS_SHIFT);
-               } else {
-                       /* disable SMPS in TDLS responder */
-                       sta->sta.ht_cap.cap |=
-                               (WLAN_HT_CAP_SM_PS_DISABLED
-                                << IEEE80211_HT_CAP_SM_PS_SHIFT);
-
-                       /* the peer caps are already intersected with our own */
-                       memcpy(&ht_cap, &sta->sta.ht_cap, sizeof(ht_cap));
-               }
+
+       if (action_code == WLAN_TDLS_SETUP_REQUEST && ht_cap.ht_supported) {
+               ieee80211_apply_htcap_overrides(sdata, &ht_cap);
+
+               /* disable SMPS in TDLS initiator */
+               ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED
+                               << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+               pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+               ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
+       } else if (action_code == WLAN_TDLS_SETUP_RESPONSE &&
+                  ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) {
+               /* disable SMPS in TDLS responder */
+               sta->sta.ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED
+                                       << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+               /* the peer caps are already intersected with our own */
+               memcpy(&ht_cap, &sta->sta.ht_cap, sizeof(ht_cap));
 
                pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
                ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
@@ -852,7 +852,6 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
         */
        if ((action_code == WLAN_TDLS_TEARDOWN) &&
            (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
-               struct sta_info *sta = NULL;
                bool try_resend; /* Should we keep skb for possible resend */
 
                /* If not sending directly to peer - no point in keeping skb */
index 02ed6f60629a5aa123513f759104ca57513d0f5f..88a18ffe2975520edbcc80733bc1bbc9b2655f11 100644 (file)
@@ -626,6 +626,9 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
                                tx->key = NULL;
                        break;
                case WLAN_CIPHER_SUITE_CCMP:
+               case WLAN_CIPHER_SUITE_CCMP_256:
+               case WLAN_CIPHER_SUITE_GCMP:
+               case WLAN_CIPHER_SUITE_GCMP_256:
                        if (!ieee80211_is_data_present(hdr->frame_control) &&
                            !ieee80211_use_mfp(hdr->frame_control, tx->sta,
                                               tx->skb))
@@ -636,6 +639,9 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
                                        ieee80211_is_mgmt(hdr->frame_control);
                        break;
                case WLAN_CIPHER_SUITE_AES_CMAC:
+               case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+               case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+               case WLAN_CIPHER_SUITE_BIP_GMAC_256:
                        if (!ieee80211_is_mgmt(hdr->frame_control))
                                tx->key = NULL;
                        break;
@@ -1011,9 +1017,21 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
        case WLAN_CIPHER_SUITE_TKIP:
                return ieee80211_crypto_tkip_encrypt(tx);
        case WLAN_CIPHER_SUITE_CCMP:
-               return ieee80211_crypto_ccmp_encrypt(tx);
+               return ieee80211_crypto_ccmp_encrypt(
+                       tx, IEEE80211_CCMP_MIC_LEN);
+       case WLAN_CIPHER_SUITE_CCMP_256:
+               return ieee80211_crypto_ccmp_encrypt(
+                       tx, IEEE80211_CCMP_256_MIC_LEN);
        case WLAN_CIPHER_SUITE_AES_CMAC:
                return ieee80211_crypto_aes_cmac_encrypt(tx);
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+               return ieee80211_crypto_aes_cmac_256_encrypt(tx);
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               return ieee80211_crypto_aes_gmac_encrypt(tx);
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               return ieee80211_crypto_gcmp_encrypt(tx);
        default:
                return ieee80211_crypto_hw_encrypt(tx);
        }
index fbd37d43dfceb31ccf57da5231930560eeff6b09..8428f4a954795657a32a24f77a0f9c9ae6591b7e 100644 (file)
@@ -744,16 +744,19 @@ EXPORT_SYMBOL_GPL(wdev_to_ieee80211_vif);
 
 /*
  * Nothing should have been stuffed into the workqueue during
- * the suspend->resume cycle. If this WARN is seen then there
- * is a bug with either the driver suspend or something in
- * mac80211 stuffing into the workqueue which we haven't yet
- * cleared during mac80211's suspend cycle.
+ * the suspend->resume cycle. Since we can't check each caller
+ * of this function if we are already quiescing / suspended,
+ * check here and don't WARN since this can actually happen when
+ * the rx path (for example) is racing against __ieee80211_suspend
+ * and suspending / quiescing was set after the rx path checked
+ * them.
  */
 static bool ieee80211_can_queue_work(struct ieee80211_local *local)
 {
-       if (WARN(local->suspended && !local->resuming,
-                "queueing ieee80211 work while going to suspend\n"))
+       if (local->quiescing || (local->suspended && !local->resuming)) {
+               pr_warn("queueing ieee80211 work while going to suspend\n");
                return false;
+       }
 
        return true;
 }
@@ -2057,6 +2060,18 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        mb();
        local->resuming = false;
 
+       /* It's possible that we don't handle the scan completion in
+        * time during suspend, so if it's still marked as completed
+        * here, queue the work and flush it to clean things up.
+        * Instead of calling the worker function directly here, we
+        * really queue it to avoid potential races with other flows
+        * scheduling the same work.
+        */
+       if (test_bit(SCAN_COMPLETED, &local->scanning)) {
+               ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
+               flush_delayed_work(&local->scan_work);
+       }
+
        if (local->open_count && !reconfig_due_to_wowlan)
                drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND);
 
index 12398fde02e87e7c7eb0eba1430e72287c6bb6f8..75de6fac40d1533fbb9836ab138d71e838ffb6af 100644 (file)
@@ -22,6 +22,8 @@
 #include "tkip.h"
 #include "aes_ccm.h"
 #include "aes_cmac.h"
+#include "aes_gmac.h"
+#include "aes_gcm.h"
 #include "wpa.h"
 
 ieee80211_tx_result
@@ -393,7 +395,8 @@ static inline void ccmp_hdr2pn(u8 *pn, u8 *hdr)
 }
 
 
-static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
+static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
+                           unsigned int mic_len)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct ieee80211_key *key = tx->key;
@@ -424,7 +427,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
        if (info->control.hw_key)
                tail = 0;
        else
-               tail = IEEE80211_CCMP_MIC_LEN;
+               tail = mic_len;
 
        if (WARN_ON(skb_tailroom(skb) < tail ||
                    skb_headroom(skb) < IEEE80211_CCMP_HDR_LEN))
@@ -459,21 +462,22 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
        pos += IEEE80211_CCMP_HDR_LEN;
        ccmp_special_blocks(skb, pn, b_0, aad);
        ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
-                                 skb_put(skb, IEEE80211_CCMP_MIC_LEN));
+                                 skb_put(skb, mic_len), mic_len);
 
        return 0;
 }
 
 
 ieee80211_tx_result
-ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx)
+ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx,
+                             unsigned int mic_len)
 {
        struct sk_buff *skb;
 
        ieee80211_tx_set_protected(tx);
 
        skb_queue_walk(&tx->skbs, skb) {
-               if (ccmp_encrypt_skb(tx, skb) < 0)
+               if (ccmp_encrypt_skb(tx, skb, mic_len) < 0)
                        return TX_DROP;
        }
 
@@ -482,7 +486,8 @@ ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx)
 
 
 ieee80211_rx_result
-ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
+ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
+                             unsigned int mic_len)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
        int hdrlen;
@@ -499,8 +504,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
            !ieee80211_is_robust_mgmt_frame(skb))
                return RX_CONTINUE;
 
-       data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN -
-                  IEEE80211_CCMP_MIC_LEN;
+       data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
        if (!rx->sta || data_len < 0)
                return RX_DROP_UNUSABLE;
 
@@ -531,14 +535,14 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
                            key->u.ccmp.tfm, b_0, aad,
                            skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
                            data_len,
-                           skb->data + skb->len - IEEE80211_CCMP_MIC_LEN))
+                           skb->data + skb->len - mic_len, mic_len))
                        return RX_DROP_UNUSABLE;
        }
 
        memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
 
        /* Remove CCMP header and MIC */
-       if (pskb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN))
+       if (pskb_trim(skb, skb->len - mic_len))
                return RX_DROP_UNUSABLE;
        memmove(skb->data + IEEE80211_CCMP_HDR_LEN, skb->data, hdrlen);
        skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
@@ -546,6 +550,229 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
        return RX_CONTINUE;
 }
 
+static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad)
+{
+       __le16 mask_fc;
+       u8 qos_tid;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+       memcpy(j_0, hdr->addr2, ETH_ALEN);
+       memcpy(&j_0[ETH_ALEN], pn, IEEE80211_GCMP_PN_LEN);
+       j_0[13] = 0;
+       j_0[14] = 0;
+       j_0[AES_BLOCK_SIZE - 1] = 0x01;
+
+       /* AAD (extra authenticate-only data) / masked 802.11 header
+        * FC | A1 | A2 | A3 | SC | [A4] | [QC]
+        */
+       put_unaligned_be16(ieee80211_hdrlen(hdr->frame_control) - 2, &aad[0]);
+       /* Mask FC: zero subtype b4 b5 b6 (if not mgmt)
+        * Retry, PwrMgt, MoreData; set Protected
+        */
+       mask_fc = hdr->frame_control;
+       mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY |
+                               IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA);
+       if (!ieee80211_is_mgmt(hdr->frame_control))
+               mask_fc &= ~cpu_to_le16(0x0070);
+       mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+
+       put_unaligned(mask_fc, (__le16 *)&aad[2]);
+       memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN);
+
+       /* Mask Seq#, leave Frag# */
+       aad[22] = *((u8 *)&hdr->seq_ctrl) & 0x0f;
+       aad[23] = 0;
+
+       if (ieee80211_is_data_qos(hdr->frame_control))
+               qos_tid = *ieee80211_get_qos_ctl(hdr) &
+                       IEEE80211_QOS_CTL_TID_MASK;
+       else
+               qos_tid = 0;
+
+       if (ieee80211_has_a4(hdr->frame_control)) {
+               memcpy(&aad[24], hdr->addr4, ETH_ALEN);
+               aad[30] = qos_tid;
+               aad[31] = 0;
+       } else {
+               memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN);
+               aad[24] = qos_tid;
+       }
+}
+
+static inline void gcmp_pn2hdr(u8 *hdr, const u8 *pn, int key_id)
+{
+       hdr[0] = pn[5];
+       hdr[1] = pn[4];
+       hdr[2] = 0;
+       hdr[3] = 0x20 | (key_id << 6);
+       hdr[4] = pn[3];
+       hdr[5] = pn[2];
+       hdr[6] = pn[1];
+       hdr[7] = pn[0];
+}
+
+static inline void gcmp_hdr2pn(u8 *pn, const u8 *hdr)
+{
+       pn[0] = hdr[7];
+       pn[1] = hdr[6];
+       pn[2] = hdr[5];
+       pn[3] = hdr[4];
+       pn[4] = hdr[1];
+       pn[5] = hdr[0];
+}
+
+static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_key *key = tx->key;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       int hdrlen, len, tail;
+       u8 *pos;
+       u8 pn[6];
+       u64 pn64;
+       u8 aad[2 * AES_BLOCK_SIZE];
+       u8 j_0[AES_BLOCK_SIZE];
+
+       if (info->control.hw_key &&
+           !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
+           !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
+           !((info->control.hw_key->flags &
+              IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) &&
+             ieee80211_is_mgmt(hdr->frame_control))) {
+               /* hwaccel has no need for preallocated room for GCMP
+                * header or MIC fields
+                */
+               return 0;
+       }
+
+       hdrlen = ieee80211_hdrlen(hdr->frame_control);
+       len = skb->len - hdrlen;
+
+       if (info->control.hw_key)
+               tail = 0;
+       else
+               tail = IEEE80211_GCMP_MIC_LEN;
+
+       if (WARN_ON(skb_tailroom(skb) < tail ||
+                   skb_headroom(skb) < IEEE80211_GCMP_HDR_LEN))
+               return -1;
+
+       pos = skb_push(skb, IEEE80211_GCMP_HDR_LEN);
+       memmove(pos, pos + IEEE80211_GCMP_HDR_LEN, hdrlen);
+       skb_set_network_header(skb, skb_network_offset(skb) +
+                                   IEEE80211_GCMP_HDR_LEN);
+
+       /* the HW only needs room for the IV, but not the actual IV */
+       if (info->control.hw_key &&
+           (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
+               return 0;
+
+       hdr = (struct ieee80211_hdr *)pos;
+       pos += hdrlen;
+
+       pn64 = atomic64_inc_return(&key->u.gcmp.tx_pn);
+
+       pn[5] = pn64;
+       pn[4] = pn64 >> 8;
+       pn[3] = pn64 >> 16;
+       pn[2] = pn64 >> 24;
+       pn[1] = pn64 >> 32;
+       pn[0] = pn64 >> 40;
+
+       gcmp_pn2hdr(pos, pn, key->conf.keyidx);
+
+       /* hwaccel - with software GCMP header */
+       if (info->control.hw_key)
+               return 0;
+
+       pos += IEEE80211_GCMP_HDR_LEN;
+       gcmp_special_blocks(skb, pn, j_0, aad);
+       ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
+                                 skb_put(skb, IEEE80211_GCMP_MIC_LEN));
+
+       return 0;
+}
+
+ieee80211_tx_result
+ieee80211_crypto_gcmp_encrypt(struct ieee80211_tx_data *tx)
+{
+       struct sk_buff *skb;
+
+       ieee80211_tx_set_protected(tx);
+
+       skb_queue_walk(&tx->skbs, skb) {
+               if (gcmp_encrypt_skb(tx, skb) < 0)
+                       return TX_DROP;
+       }
+
+       return TX_CONTINUE;
+}
+
+ieee80211_rx_result
+ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+       int hdrlen;
+       struct ieee80211_key *key = rx->key;
+       struct sk_buff *skb = rx->skb;
+       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       u8 pn[IEEE80211_GCMP_PN_LEN];
+       int data_len;
+       int queue;
+
+       hdrlen = ieee80211_hdrlen(hdr->frame_control);
+
+       if (!ieee80211_is_data(hdr->frame_control) &&
+           !ieee80211_is_robust_mgmt_frame(skb))
+               return RX_CONTINUE;
+
+       data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN -
+                  IEEE80211_GCMP_MIC_LEN;
+       if (!rx->sta || data_len < 0)
+               return RX_DROP_UNUSABLE;
+
+       if (status->flag & RX_FLAG_DECRYPTED) {
+               if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_GCMP_HDR_LEN))
+                       return RX_DROP_UNUSABLE;
+       } else {
+               if (skb_linearize(rx->skb))
+                       return RX_DROP_UNUSABLE;
+       }
+
+       gcmp_hdr2pn(pn, skb->data + hdrlen);
+
+       queue = rx->security_idx;
+
+       if (memcmp(pn, key->u.gcmp.rx_pn[queue], IEEE80211_GCMP_PN_LEN) <= 0) {
+               key->u.gcmp.replays++;
+               return RX_DROP_UNUSABLE;
+       }
+
+       if (!(status->flag & RX_FLAG_DECRYPTED)) {
+               u8 aad[2 * AES_BLOCK_SIZE];
+               u8 j_0[AES_BLOCK_SIZE];
+               /* hardware didn't decrypt/verify MIC */
+               gcmp_special_blocks(skb, pn, j_0, aad);
+
+               if (ieee80211_aes_gcm_decrypt(
+                           key->u.gcmp.tfm, j_0, aad,
+                           skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN,
+                           data_len,
+                           skb->data + skb->len - IEEE80211_GCMP_MIC_LEN))
+                       return RX_DROP_UNUSABLE;
+       }
+
+       memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
+
+       /* Remove GCMP header and MIC */
+       if (pskb_trim(skb, skb->len - IEEE80211_GCMP_MIC_LEN))
+               return RX_DROP_UNUSABLE;
+       memmove(skb->data + IEEE80211_GCMP_HDR_LEN, skb->data, hdrlen);
+       skb_pull(skb, IEEE80211_GCMP_HDR_LEN);
+
+       return RX_CONTINUE;
+}
+
 static ieee80211_tx_result
 ieee80211_crypto_cs_encrypt(struct ieee80211_tx_data *tx,
                            struct sk_buff *skb)
@@ -729,6 +956,48 @@ ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
        return TX_CONTINUE;
 }
 
+ieee80211_tx_result
+ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx)
+{
+       struct sk_buff *skb;
+       struct ieee80211_tx_info *info;
+       struct ieee80211_key *key = tx->key;
+       struct ieee80211_mmie_16 *mmie;
+       u8 aad[20];
+       u64 pn64;
+
+       if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
+               return TX_DROP;
+
+       skb = skb_peek(&tx->skbs);
+
+       info = IEEE80211_SKB_CB(skb);
+
+       if (info->control.hw_key)
+               return TX_CONTINUE;
+
+       if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
+               return TX_DROP;
+
+       mmie = (struct ieee80211_mmie_16 *)skb_put(skb, sizeof(*mmie));
+       mmie->element_id = WLAN_EID_MMIE;
+       mmie->length = sizeof(*mmie) - 2;
+       mmie->key_id = cpu_to_le16(key->conf.keyidx);
+
+       /* PN = PN + 1 */
+       pn64 = atomic64_inc_return(&key->u.aes_cmac.tx_pn);
+
+       bip_ipn_set64(mmie->sequence_number, pn64);
+
+       bip_aad(skb, aad);
+
+       /* MIC = AES-256-CMAC(IGTK, AAD || Management Frame Body || MMIE, 128)
+        */
+       ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
+                              skb->data + 24, skb->len - 24, mmie->mic);
+
+       return TX_CONTINUE;
+}
 
 ieee80211_rx_result
 ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
@@ -780,6 +1049,160 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
        return RX_CONTINUE;
 }
 
+ieee80211_rx_result
+ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx)
+{
+       struct sk_buff *skb = rx->skb;
+       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       struct ieee80211_key *key = rx->key;
+       struct ieee80211_mmie_16 *mmie;
+       u8 aad[20], mic[16], ipn[6];
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+       if (!ieee80211_is_mgmt(hdr->frame_control))
+               return RX_CONTINUE;
+
+       /* management frames are already linear */
+
+       if (skb->len < 24 + sizeof(*mmie))
+               return RX_DROP_UNUSABLE;
+
+       mmie = (struct ieee80211_mmie_16 *)
+               (skb->data + skb->len - sizeof(*mmie));
+       if (mmie->element_id != WLAN_EID_MMIE ||
+           mmie->length != sizeof(*mmie) - 2)
+               return RX_DROP_UNUSABLE; /* Invalid MMIE */
+
+       bip_ipn_swap(ipn, mmie->sequence_number);
+
+       if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) {
+               key->u.aes_cmac.replays++;
+               return RX_DROP_UNUSABLE;
+       }
+
+       if (!(status->flag & RX_FLAG_DECRYPTED)) {
+               /* hardware didn't decrypt/verify MIC */
+               bip_aad(skb, aad);
+               ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
+                                      skb->data + 24, skb->len - 24, mic);
+               if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+                       key->u.aes_cmac.icverrors++;
+                       return RX_DROP_UNUSABLE;
+               }
+       }
+
+       memcpy(key->u.aes_cmac.rx_pn, ipn, 6);
+
+       /* Remove MMIE */
+       skb_trim(skb, skb->len - sizeof(*mmie));
+
+       return RX_CONTINUE;
+}
+
+ieee80211_tx_result
+ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx)
+{
+       struct sk_buff *skb;
+       struct ieee80211_tx_info *info;
+       struct ieee80211_key *key = tx->key;
+       struct ieee80211_mmie_16 *mmie;
+       struct ieee80211_hdr *hdr;
+       u8 aad[20];
+       u64 pn64;
+       u8 nonce[12];
+
+       if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
+               return TX_DROP;
+
+       skb = skb_peek(&tx->skbs);
+
+       info = IEEE80211_SKB_CB(skb);
+
+       if (info->control.hw_key)
+               return TX_CONTINUE;
+
+       if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
+               return TX_DROP;
+
+       mmie = (struct ieee80211_mmie_16 *)skb_put(skb, sizeof(*mmie));
+       mmie->element_id = WLAN_EID_MMIE;
+       mmie->length = sizeof(*mmie) - 2;
+       mmie->key_id = cpu_to_le16(key->conf.keyidx);
+
+       /* PN = PN + 1 */
+       pn64 = atomic64_inc_return(&key->u.aes_gmac.tx_pn);
+
+       bip_ipn_set64(mmie->sequence_number, pn64);
+
+       bip_aad(skb, aad);
+
+       hdr = (struct ieee80211_hdr *)skb->data;
+       memcpy(nonce, hdr->addr2, ETH_ALEN);
+       bip_ipn_swap(nonce + ETH_ALEN, mmie->sequence_number);
+
+       /* MIC = AES-GMAC(IGTK, AAD || Management Frame Body || MMIE, 128) */
+       if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
+                              skb->data + 24, skb->len - 24, mmie->mic) < 0)
+               return TX_DROP;
+
+       return TX_CONTINUE;
+}
+
+ieee80211_rx_result
+ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
+{
+       struct sk_buff *skb = rx->skb;
+       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       struct ieee80211_key *key = rx->key;
+       struct ieee80211_mmie_16 *mmie;
+       u8 aad[20], mic[16], ipn[6], nonce[12];
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+       if (!ieee80211_is_mgmt(hdr->frame_control))
+               return RX_CONTINUE;
+
+       /* management frames are already linear */
+
+       if (skb->len < 24 + sizeof(*mmie))
+               return RX_DROP_UNUSABLE;
+
+       mmie = (struct ieee80211_mmie_16 *)
+               (skb->data + skb->len - sizeof(*mmie));
+       if (mmie->element_id != WLAN_EID_MMIE ||
+           mmie->length != sizeof(*mmie) - 2)
+               return RX_DROP_UNUSABLE; /* Invalid MMIE */
+
+       bip_ipn_swap(ipn, mmie->sequence_number);
+
+       if (memcmp(ipn, key->u.aes_gmac.rx_pn, 6) <= 0) {
+               key->u.aes_gmac.replays++;
+               return RX_DROP_UNUSABLE;
+       }
+
+       if (!(status->flag & RX_FLAG_DECRYPTED)) {
+               /* hardware didn't decrypt/verify MIC */
+               bip_aad(skb, aad);
+
+               memcpy(nonce, hdr->addr2, ETH_ALEN);
+               memcpy(nonce + ETH_ALEN, ipn, 6);
+
+               if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
+                                      skb->data + 24, skb->len - 24,
+                                      mic) < 0 ||
+                   memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+                       key->u.aes_gmac.icverrors++;
+                       return RX_DROP_UNUSABLE;
+               }
+       }
+
+       memcpy(key->u.aes_gmac.rx_pn, ipn, 6);
+
+       /* Remove MMIE */
+       skb_trim(skb, skb->len - sizeof(*mmie));
+
+       return RX_CONTINUE;
+}
+
 ieee80211_tx_result
 ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx)
 {
index 62e5a12dfe0a24010b32eec210d7a38a1c4c3eb8..d98011ee8f554a512acd0d9711a4ee9bcb7583f0 100644 (file)
@@ -24,17 +24,32 @@ ieee80211_rx_result
 ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx);
 
 ieee80211_tx_result
-ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx);
+ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx,
+                             unsigned int mic_len);
 ieee80211_rx_result
-ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx);
+ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
+                             unsigned int mic_len);
 
 ieee80211_tx_result
 ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx);
+ieee80211_tx_result
+ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx);
 ieee80211_rx_result
 ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx);
+ieee80211_rx_result
+ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx);
+ieee80211_tx_result
+ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx);
+ieee80211_rx_result
+ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx);
 ieee80211_tx_result
 ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx);
 ieee80211_rx_result
 ieee80211_crypto_hw_decrypt(struct ieee80211_rx_data *rx);
 
+ieee80211_tx_result
+ieee80211_crypto_gcmp_encrypt(struct ieee80211_tx_data *tx);
+ieee80211_rx_result
+ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx);
+
 #endif /* WPA_H */
index 75887d7d2c6a756b7863aca289f2e0f31d81bfc4..61e6c407476a618df386c2f14839033398aae14b 100644 (file)
@@ -130,31 +130,50 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
                          struct nft_set_iter *iter)
 {
        struct rhashtable *priv = nft_set_priv(set);
-       const struct bucket_table *tbl;
        const struct nft_hash_elem *he;
+       struct rhashtable_iter hti;
        struct nft_set_elem elem;
-       unsigned int i;
+       int err;
 
-       tbl = rht_dereference_rcu(priv->tbl, priv);
-       for (i = 0; i < tbl->size; i++) {
-               struct rhash_head *pos;
+       err = rhashtable_walk_init(priv, &hti);
+       iter->err = err;
+       if (err)
+               return;
+
+       err = rhashtable_walk_start(&hti);
+       if (err && err != -EAGAIN) {
+               iter->err = err;
+               goto out;
+       }
 
-               rht_for_each_entry_rcu(he, pos, tbl, i, node) {
-                       if (iter->count < iter->skip)
-                               goto cont;
+       while ((he = rhashtable_walk_next(&hti))) {
+               if (IS_ERR(he)) {
+                       err = PTR_ERR(he);
+                       if (err != -EAGAIN) {
+                               iter->err = err;
+                               goto out;
+                       }
+               }
+
+               if (iter->count < iter->skip)
+                       goto cont;
+
+               memcpy(&elem.key, &he->key, sizeof(elem.key));
+               if (set->flags & NFT_SET_MAP)
+                       memcpy(&elem.data, he->data, sizeof(elem.data));
+               elem.flags = 0;
 
-                       memcpy(&elem.key, &he->key, sizeof(elem.key));
-                       if (set->flags & NFT_SET_MAP)
-                               memcpy(&elem.data, he->data, sizeof(elem.data));
-                       elem.flags = 0;
+               iter->err = iter->fn(ctx, set, iter, &elem);
+               if (iter->err < 0)
+                       goto out;
 
-                       iter->err = iter->fn(ctx, set, iter, &elem);
-                       if (iter->err < 0)
-                               return;
 cont:
-                       iter->count++;
-               }
+               iter->count++;
        }
+
+out:
+       rhashtable_walk_stop(&hti);
+       rhashtable_walk_exit(&hti);
 }
 
 static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
index a36777b7cfb6d89967d25fe784a3224caaccedab..6feb16d5e1b88ef00db380b2c8c55180a40a49df 100644 (file)
@@ -2298,7 +2298,12 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
                        goto out;
        }
 
+       /* It's a really convoluted way for userland to ask for mmaped
+        * sendmsg(), but that's what we've got...
+        */
        if (netlink_tx_is_mmaped(sk) &&
+           msg->msg_iter.type == ITER_IOVEC &&
+           msg->msg_iter.nr_segs == 1 &&
            msg->msg_iter.iov->iov_base == NULL) {
                err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
                                           &scm);
@@ -2886,99 +2891,97 @@ EXPORT_SYMBOL(nlmsg_notify);
 #ifdef CONFIG_PROC_FS
 struct nl_seq_iter {
        struct seq_net_private p;
+       struct rhashtable_iter hti;
        int link;
-       int hash_idx;
 };
 
-static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
+static int netlink_walk_start(struct nl_seq_iter *iter)
 {
-       struct nl_seq_iter *iter = seq->private;
-       int i, j;
-       struct netlink_sock *nlk;
-       struct sock *s;
-       loff_t off = 0;
-
-       for (i = 0; i < MAX_LINKS; i++) {
-               struct rhashtable *ht = &nl_table[i].hash;
-               const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
-
-               for (j = 0; j < tbl->size; j++) {
-                       struct rhash_head *node;
-
-                       rht_for_each_entry_rcu(nlk, node, tbl, j, node) {
-                               s = (struct sock *)nlk;
+       int err;
 
-                               if (sock_net(s) != seq_file_net(seq))
-                                       continue;
-                               if (off == pos) {
-                                       iter->link = i;
-                                       iter->hash_idx = j;
-                                       return s;
-                               }
-                               ++off;
-                       }
-               }
+       err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti);
+       if (err) {
+               iter->link = MAX_LINKS;
+               return err;
        }
-       return NULL;
+
+       err = rhashtable_walk_start(&iter->hti);
+       return err == -EAGAIN ? 0 : err;
 }
 
-static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(RCU)
+static void netlink_walk_stop(struct nl_seq_iter *iter)
 {
-       rcu_read_lock();
-       return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+       rhashtable_walk_stop(&iter->hti);
+       rhashtable_walk_exit(&iter->hti);
 }
 
-static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+static void *__netlink_seq_next(struct seq_file *seq)
 {
-       struct rhashtable *ht;
-       const struct bucket_table *tbl;
-       struct rhash_head *node;
+       struct nl_seq_iter *iter = seq->private;
        struct netlink_sock *nlk;
-       struct nl_seq_iter *iter;
-       struct net *net;
-       int i, j;
 
-       ++*pos;
+       do {
+               for (;;) {
+                       int err;
 
-       if (v == SEQ_START_TOKEN)
-               return netlink_seq_socket_idx(seq, 0);
+                       nlk = rhashtable_walk_next(&iter->hti);
 
-       net = seq_file_net(seq);
-       iter = seq->private;
-       nlk = v;
+                       if (IS_ERR(nlk)) {
+                               if (PTR_ERR(nlk) == -EAGAIN)
+                                       continue;
 
-       i = iter->link;
-       ht = &nl_table[i].hash;
-       tbl = rht_dereference_rcu(ht->tbl, ht);
-       rht_for_each_entry_rcu_continue(nlk, node, nlk->node.next, tbl, iter->hash_idx, node)
-               if (net_eq(sock_net((struct sock *)nlk), net))
-                       return nlk;
+                               return nlk;
+                       }
 
-       j = iter->hash_idx + 1;
+                       if (nlk)
+                               break;
 
-       do {
+                       netlink_walk_stop(iter);
+                       if (++iter->link >= MAX_LINKS)
+                               return NULL;
 
-               for (; j < tbl->size; j++) {
-                       rht_for_each_entry_rcu(nlk, node, tbl, j, node) {
-                               if (net_eq(sock_net((struct sock *)nlk), net)) {
-                                       iter->link = i;
-                                       iter->hash_idx = j;
-                                       return nlk;
-                               }
-                       }
+                       err = netlink_walk_start(iter);
+                       if (err)
+                               return ERR_PTR(err);
                }
+       } while (sock_net(&nlk->sk) != seq_file_net(seq));
 
-               j = 0;
-       } while (++i < MAX_LINKS);
+       return nlk;
+}
 
-       return NULL;
+static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
+{
+       struct nl_seq_iter *iter = seq->private;
+       void *obj = SEQ_START_TOKEN;
+       loff_t pos;
+       int err;
+
+       iter->link = 0;
+
+       err = netlink_walk_start(iter);
+       if (err)
+               return ERR_PTR(err);
+
+       for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
+               obj = __netlink_seq_next(seq);
+
+       return obj;
+}
+
+static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       ++*pos;
+       return __netlink_seq_next(seq);
 }
 
 static void netlink_seq_stop(struct seq_file *seq, void *v)
-       __releases(RCU)
 {
-       rcu_read_unlock();
+       struct nl_seq_iter *iter = seq->private;
+
+       if (iter->link >= MAX_LINKS)
+               return;
+
+       netlink_walk_stop(iter);
 }
 
 
index 3f4a0bbeed3da2bfe2863153dc44767f47e43686..d978f2f46ff35e0181e2a833e3bfab2b6d58221e 100644 (file)
@@ -170,6 +170,7 @@ static const struct acpi_device_id rfkill_acpi_match[] = {
        { "BCM2E1A", RFKILL_TYPE_BLUETOOTH },
        { "BCM2E39", RFKILL_TYPE_BLUETOOTH },
        { "BCM2E3D", RFKILL_TYPE_BLUETOOTH },
+       { "BCM2E40", RFKILL_TYPE_BLUETOOTH },
        { "BCM2E64", RFKILL_TYPE_BLUETOOTH },
        { "BCM4752", RFKILL_TYPE_GPS },
        { "LNV4752", RFKILL_TYPE_GPS },
index e1a9373e59799fd2a9cd998fbdc4399d2d021f6a..8331c95e152283d437b3dee9205cf37c9ce06271 100644 (file)
@@ -232,10 +232,7 @@ int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
                   call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
                ret = -EPROTO; /* request phase complete for this client call */
        } else {
-               mm_segment_t oldfs = get_fs();
-               set_fs(KERNEL_DS);
                ret = rxrpc_send_data(NULL, call->socket, call, msg, len);
-               set_fs(oldfs);
        }
 
        release_sock(&call->socket->sk);
@@ -529,13 +526,11 @@ static int rxrpc_send_data(struct kiocb *iocb,
                           struct msghdr *msg, size_t len)
 {
        struct rxrpc_skb_priv *sp;
-       unsigned char __user *from;
        struct sk_buff *skb;
-       const struct iovec *iov;
        struct sock *sk = &rx->sk;
        long timeo;
        bool more;
-       int ret, ioc, segment, copied;
+       int ret, copied;
 
        timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 
@@ -545,25 +540,17 @@ static int rxrpc_send_data(struct kiocb *iocb,
        if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
                return -EPIPE;
 
-       iov = msg->msg_iter.iov;
-       ioc = msg->msg_iter.nr_segs - 1;
-       from = iov->iov_base;
-       segment = iov->iov_len;
-       iov++;
        more = msg->msg_flags & MSG_MORE;
 
        skb = call->tx_pending;
        call->tx_pending = NULL;
 
        copied = 0;
-       do {
+       if (len > iov_iter_count(&msg->msg_iter))
+               len = iov_iter_count(&msg->msg_iter);
+       while (len) {
                int copy;
 
-               if (segment > len)
-                       segment = len;
-
-               _debug("SEGMENT %d @%p", segment, from);
-
                if (!skb) {
                        size_t size, chunk, max, space;
 
@@ -631,13 +618,13 @@ static int rxrpc_send_data(struct kiocb *iocb,
                /* append next segment of data to the current buffer */
                copy = skb_tailroom(skb);
                ASSERTCMP(copy, >, 0);
-               if (copy > segment)
-                       copy = segment;
+               if (copy > len)
+                       copy = len;
                if (copy > sp->remain)
                        copy = sp->remain;
 
                _debug("add");
-               ret = skb_add_data(skb, from, copy);
+               ret = skb_add_data(skb, &msg->msg_iter, copy);
                _debug("added");
                if (ret < 0)
                        goto efault;
@@ -646,18 +633,6 @@ static int rxrpc_send_data(struct kiocb *iocb,
                copied += copy;
 
                len -= copy;
-               segment -= copy;
-               from += copy;
-               while (segment == 0 && ioc > 0) {
-                       from = iov->iov_base;
-                       segment = iov->iov_len;
-                       iov++;
-                       ioc--;
-               }
-               if (len == 0) {
-                       segment = 0;
-                       ioc = 0;
-               }
 
                /* check for the far side aborting the call or a network error
                 * occurring */
@@ -665,7 +640,7 @@ static int rxrpc_send_data(struct kiocb *iocb,
                        goto call_aborted;
 
                /* add the packet to the send queue if it's now full */
-               if (sp->remain <= 0 || (segment == 0 && !more)) {
+               if (sp->remain <= 0 || (!len && !more)) {
                        struct rxrpc_connection *conn = call->conn;
                        uint32_t seq;
                        size_t pad;
@@ -711,11 +686,10 @@ static int rxrpc_send_data(struct kiocb *iocb,
 
                        memcpy(skb->head, &sp->hdr,
                               sizeof(struct rxrpc_header));
-                       rxrpc_queue_packet(call, skb, segment == 0 && !more);
+                       rxrpc_queue_packet(call, skb, !iov_iter_count(&msg->msg_iter) && !more);
                        skb = NULL;
                }
-
-       } while (segment > 0);
+       }
 
 success:
        ret = copied;
index 2a50f5c62070a81ae37d871aac2626555128fd38..a00c4304300101a093e834c73fdd3bdb2c2c38a3 100644 (file)
@@ -52,6 +52,7 @@
 #include <net/pkt_sched.h>
 #include <net/sock.h>
 #include <net/tcp_states.h>
+#include <net/tcp.h>
 
 /*
  * Per flow structure, dynamically allocated
@@ -92,6 +93,7 @@ struct fq_sched_data {
        u32             flow_refill_delay;
        u32             flow_max_rate;  /* optional max rate per flow */
        u32             flow_plimit;    /* max packets per flow */
+       u32             orphan_mask;    /* mask for orphaned skb */
        struct rb_root  *fq_root;
        u8              rate_enable;
        u8              fq_trees_log;
@@ -222,11 +224,20 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
        if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
                return &q->internal;
 
-       if (unlikely(!sk)) {
+       /* SYNACK messages are attached to a listener socket.
+        * 1) They are not part of a 'flow' yet
+        * 2) We do not want to rate limit them (eg SYNFLOOD attack),
+        *    especially if the listener set SO_MAX_PACING_RATE
+        * 3) We pretend they are orphaned
+        */
+       if (!sk || sk->sk_state == TCP_LISTEN) {
+               unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
+
                /* By forcing low order bit to 1, we make sure to not
                 * collide with a local flow (socket pointers are word aligned)
                 */
-               sk = (struct sock *)(skb_get_hash(skb) | 1L);
+               sk = (struct sock *)((hash << 1) | 1UL);
+               skb_orphan(skb);
        }
 
        root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)];
@@ -445,7 +456,9 @@ begin:
                goto begin;
        }
 
-       if (unlikely(f->head && now < f->time_next_packet)) {
+       skb = f->head;
+       if (unlikely(skb && now < f->time_next_packet &&
+                    !skb_is_tcp_pure_ack(skb))) {
                head->first = f->next;
                fq_flow_set_throttled(q, f);
                goto begin;
@@ -464,12 +477,15 @@ begin:
                goto begin;
        }
        prefetch(&skb->end);
-       f->time_next_packet = now;
        f->credit -= qdisc_pkt_len(skb);
 
        if (f->credit > 0 || !q->rate_enable)
                goto out;
 
+       /* Do not pace locally generated ack packets */
+       if (skb_is_tcp_pure_ack(skb))
+               goto out;
+
        rate = q->flow_max_rate;
        if (skb->sk)
                rate = min(skb->sk->sk_pacing_rate, rate);
@@ -698,6 +714,9 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
                q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
        }
 
+       if (tb[TCA_FQ_ORPHAN_MASK])
+               q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
+
        if (!err) {
                sch_tree_unlock(sch);
                err = fq_resize(sch, fq_log);
@@ -743,6 +762,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
        q->delayed              = RB_ROOT;
        q->fq_root              = NULL;
        q->fq_trees_log         = ilog2(1024);
+       q->orphan_mask          = 1024 - 1;
        qdisc_watchdog_init(&q->watchdog, sch);
 
        if (opt)
@@ -772,6 +792,7 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
            nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
            nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
                        jiffies_to_usecs(q->flow_refill_delay)) ||
+           nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
            nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
                goto nla_put_failure;
 
index 3326d67482acbacfd35bb2dcf7916ab2057e160d..bbedbfcb42c2505fceb57fa058f262d90e1670ed 100644 (file)
@@ -113,10 +113,8 @@ unsigned int sysctl_net_busy_read __read_mostly;
 unsigned int sysctl_net_busy_poll __read_mostly;
 #endif
 
-static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
-                        unsigned long nr_segs, loff_t pos);
-static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                         unsigned long nr_segs, loff_t pos);
+static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to);
+static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from);
 static int sock_mmap(struct file *file, struct vm_area_struct *vma);
 
 static int sock_close(struct inode *inode, struct file *file);
@@ -142,8 +140,10 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
 static const struct file_operations socket_file_ops = {
        .owner =        THIS_MODULE,
        .llseek =       no_llseek,
-       .aio_read =     sock_aio_read,
-       .aio_write =    sock_aio_write,
+       .read =         new_sync_read,
+       .write =        new_sync_write,
+       .read_iter =    sock_read_iter,
+       .write_iter =   sock_write_iter,
        .poll =         sock_poll,
        .unlocked_ioctl = sock_ioctl,
 #ifdef CONFIG_COMPAT
@@ -845,63 +845,47 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
        return sock->ops->splice_read(sock, ppos, pipe, len, flags);
 }
 
-static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb,
-               struct file *file, const struct iovec *iov,
-               unsigned long nr_segs)
+static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
+       struct file *file = iocb->ki_filp;
        struct socket *sock = file->private_data;
+       struct msghdr msg = {.msg_iter = *to};
+       ssize_t res;
 
-       msg->msg_name = NULL;
-       msg->msg_namelen = 0;
-       msg->msg_control = NULL;
-       msg->msg_controllen = 0;
-       iov_iter_init(&msg->msg_iter, READ, iov, nr_segs, iocb->ki_nbytes);
-       msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
+       if (file->f_flags & O_NONBLOCK)
+               msg.msg_flags = MSG_DONTWAIT;
 
-       return __sock_recvmsg(iocb, sock, msg, iocb->ki_nbytes, msg->msg_flags);
-}
-
-static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos)
-{
-       struct msghdr msg;
-
-       if (pos != 0)
+       if (iocb->ki_pos != 0)
                return -ESPIPE;
 
        if (iocb->ki_nbytes == 0)       /* Match SYS5 behaviour */
                return 0;
 
-       return do_sock_read(&msg, iocb, iocb->ki_filp, iov, nr_segs);
+       res = __sock_recvmsg(iocb, sock, &msg,
+                            iocb->ki_nbytes, msg.msg_flags);
+       *to = msg.msg_iter;
+       return res;
 }
 
-static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb,
-                       struct file *file, const struct iovec *iov,
-                       unsigned long nr_segs)
+static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
+       struct file *file = iocb->ki_filp;
        struct socket *sock = file->private_data;
+       struct msghdr msg = {.msg_iter = *from};
+       ssize_t res;
 
-       msg->msg_name = NULL;
-       msg->msg_namelen = 0;
-       msg->msg_control = NULL;
-       msg->msg_controllen = 0;
-       iov_iter_init(&msg->msg_iter, WRITE, iov, nr_segs, iocb->ki_nbytes);
-       msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
-       if (sock->type == SOCK_SEQPACKET)
-               msg->msg_flags |= MSG_EOR;
-
-       return __sock_sendmsg(iocb, sock, msg, iocb->ki_nbytes);
-}
+       if (iocb->ki_pos != 0)
+               return -ESPIPE;
 
-static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                         unsigned long nr_segs, loff_t pos)
-{
-       struct msghdr msg;
+       if (file->f_flags & O_NONBLOCK)
+               msg.msg_flags = MSG_DONTWAIT;
 
-       if (pos != 0)
-               return -ESPIPE;
+       if (sock->type == SOCK_SEQPACKET)
+               msg.msg_flags |= MSG_EOR;
 
-       return do_sock_write(&msg, iocb, iocb->ki_filp, iov, nr_segs);
+       res = __sock_sendmsg(iocb, sock, &msg, iocb->ki_nbytes);
+       *from = msg.msg_iter;
+       return res;
 }
 
 /*
index 5b40cb89ff0aaa5492ba5c51b38df6bdc3bc85a8..a580a40d0208a7c9a46dd1246440209e52434d83 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/discover.c
  *
- * Copyright (c) 2003-2006, 2014, Ericsson AB
+ * Copyright (c) 2003-2006, 2014-2015, Ericsson AB
  * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
  * All rights reserved.
  *
@@ -47,7 +47,6 @@
 /* indicates no timer in use */
 #define TIPC_LINK_REQ_INACTIVE 0xffffffff
 
-
 /**
  * struct tipc_link_req - information about an ongoing link setup request
  * @bearer_id: identity of bearer issuing requests
@@ -163,13 +162,9 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
        if (!tipc_in_scope(bearer->domain, onode))
                return;
 
-       /* Locate, or if necessary, create, node: */
-       node = tipc_node_find(net, onode);
-       if (!node)
-               node = tipc_node_create(net, onode);
+       node = tipc_node_create(net, onode);
        if (!node)
                return;
-
        tipc_node_lock(node);
        link = node->links[bearer->identity];
 
index 2846ad802e43f3361250b1ebe2b8e57109ae55c2..77c7ccd492b54c1d662be3d326d64e6faf2a679d 100644 (file)
@@ -127,6 +127,21 @@ static unsigned int align(unsigned int i)
        return (i + 3) & ~3u;
 }
 
+static void tipc_link_release(struct kref *kref)
+{
+       kfree(container_of(kref, struct tipc_link, ref));
+}
+
+static void tipc_link_get(struct tipc_link *l_ptr)
+{
+       kref_get(&l_ptr->ref);
+}
+
+static void tipc_link_put(struct tipc_link *l_ptr)
+{
+       kref_put(&l_ptr->ref, tipc_link_release);
+}
+
 static void link_init_max_pkt(struct tipc_link *l_ptr)
 {
        struct tipc_node *node = l_ptr->owner;
@@ -222,11 +237,13 @@ static void link_timeout(unsigned long data)
                tipc_link_push_packets(l_ptr);
 
        tipc_node_unlock(l_ptr->owner);
+       tipc_link_put(l_ptr);
 }
 
 static void link_set_timer(struct tipc_link *link, unsigned long time)
 {
-       mod_timer(&link->timer, jiffies + time);
+       if (!mod_timer(&link->timer, jiffies + time))
+               tipc_link_get(link);
 }
 
 /**
@@ -267,7 +284,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
                pr_warn("Link creation failed, no memory\n");
                return NULL;
        }
-
+       kref_init(&l_ptr->ref);
        l_ptr->addr = peer;
        if_name = strchr(b_ptr->name, ':') + 1;
        sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
@@ -305,46 +322,48 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
        skb_queue_head_init(&l_ptr->waiting_sks);
 
        link_reset_statistics(l_ptr);
-
        tipc_node_attach_link(n_ptr, l_ptr);
-
        setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
-
        link_state_event(l_ptr, STARTING_EVT);
 
        return l_ptr;
 }
 
+/**
+ * link_delete - Conditional deletion of link.
+ *               If timer still running, real delete is done when it expires
+ * @link: link to be deleted
+ */
+void tipc_link_delete(struct tipc_link *link)
+{
+       tipc_link_reset_fragments(link);
+       tipc_node_detach_link(link->owner, link);
+       tipc_link_put(link);
+}
+
 void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
                           bool shutting_down)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_link *l_ptr;
-       struct tipc_node *n_ptr;
+       struct tipc_link *link;
+       struct tipc_node *node;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
-               tipc_node_lock(n_ptr);
-               l_ptr = n_ptr->links[bearer_id];
-               if (l_ptr) {
-                       tipc_link_reset(l_ptr);
-                       if (shutting_down || !tipc_node_is_up(n_ptr)) {
-                               tipc_node_detach_link(l_ptr->owner, l_ptr);
-                               tipc_link_reset_fragments(l_ptr);
-                               tipc_node_unlock(n_ptr);
-
-                               /* Nobody else can access this link now: */
-                               del_timer_sync(&l_ptr->timer);
-                               kfree(l_ptr);
-                       } else {
-                               /* Detach/delete when failover is finished: */
-                               l_ptr->flags |= LINK_STOPPED;
-                               tipc_node_unlock(n_ptr);
-                               del_timer_sync(&l_ptr->timer);
-                       }
+       list_for_each_entry_rcu(node, &tn->node_list, list) {
+               tipc_node_lock(node);
+               link = node->links[bearer_id];
+               if (!link) {
+                       tipc_node_unlock(node);
                        continue;
                }
-               tipc_node_unlock(n_ptr);
+               tipc_link_reset(link);
+               if (del_timer(&link->timer))
+                       tipc_link_put(link);
+               link->flags |= LINK_STOPPED;
+               /* Delete link now, or when failover is finished: */
+               if (shutting_down || !tipc_node_is_up(node))
+                       tipc_link_delete(link);
+               tipc_node_unlock(node);
        }
        rcu_read_unlock();
 }
@@ -630,7 +649,9 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        break;
                case STARTING_EVT:
                        l_ptr->flags |= LINK_STARTED;
-                       /* fall through */
+                       l_ptr->fsm_msg_cnt++;
+                       link_set_timer(l_ptr, cont_intv);
+                       break;
                case TIMEOUT_EVT:
                        tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
@@ -1837,10 +1858,8 @@ static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
                }
        }
 exit:
-       if ((l_ptr->exp_msg_count == 0) && (l_ptr->flags & LINK_STOPPED)) {
-               tipc_node_detach_link(l_ptr->owner, l_ptr);
-               kfree(l_ptr);
-       }
+       if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED))
+               tipc_link_delete(l_ptr);
        return buf;
 }
 
index 9df7fa4d3bdd3f7a38e1f5ff6225f79ac74a6534..3e3432b3044e01c867feb2d0e04af9b102531082 100644 (file)
@@ -103,6 +103,7 @@ struct tipc_stats {
  * @media_addr: media address to use when sending messages over link
  * @timer: link timer
  * @owner: pointer to peer node
+ * @refcnt: reference counter for permanent references (owner node & timer)
  * @flags: execution state flags for link endpoint instance
  * @checkpoint: reference point for triggering link continuity checking
  * @peer_session: link session # being used by peer end of link
@@ -142,6 +143,7 @@ struct tipc_link {
        struct tipc_media_addr media_addr;
        struct timer_list timer;
        struct tipc_node *owner;
+       struct kref ref;
 
        /* Management and link supervision data */
        unsigned int flags;
@@ -200,6 +202,7 @@ struct tipc_port;
 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
                              struct tipc_bearer *b_ptr,
                              const struct tipc_media_addr *media_addr);
+void tipc_link_delete(struct tipc_link *link);
 void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
                           bool shutting_down);
 void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
index 18aba9e9934550318c8100a72c2c6dade4f0f088..da67c8d3edc67df0303f9d302ab4330a351b89a8 100644 (file)
@@ -189,7 +189,6 @@ err:
  * tipc_msg_build - create buffer chain containing specified header and data
  * @mhdr: Message header, to be prepended to data
  * @m: User message
- * @offset: Posision in iov to start copying from
  * @dsz: Total length of user data
  * @pktmax: Max packet size that can be used
  * @list: Buffer or chain of buffers to be returned to caller
@@ -221,8 +220,7 @@ int tipc_msg_build(struct net *net, struct tipc_msg *mhdr, struct msghdr *m,
                __skb_queue_tail(list, skb);
                skb_copy_to_linear_data(skb, mhdr, mhsz);
                pktpos = skb->data + mhsz;
-               if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset,
-                                                dsz))
+               if (copy_from_iter(pktpos, dsz, &m->msg_iter) == dsz)
                        return dsz;
                rc = -EFAULT;
                goto error;
@@ -252,12 +250,11 @@ int tipc_msg_build(struct net *net, struct tipc_msg *mhdr, struct msghdr *m,
                if (drem < pktrem)
                        pktrem = drem;
 
-               if (memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset, pktrem)) {
+               if (copy_from_iter(pktpos, pktrem, &m->msg_iter) != pktrem) {
                        rc = -EFAULT;
                        goto error;
                }
                drem -= pktrem;
-               offset += pktrem;
 
                if (!drem)
                        break;
index ee5d33cfcf8032ffc40b3c029471ecf75b34015a..842bd7ad4b171bbe9d7ee6592ad16ea39f9bf28f 100644 (file)
@@ -96,14 +96,14 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr)
        struct tipc_node *n_ptr, *temp_node;
 
        spin_lock_bh(&tn->node_list_lock);
-
+       n_ptr = tipc_node_find(net, addr);
+       if (n_ptr)
+               goto exit;
        n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
        if (!n_ptr) {
-               spin_unlock_bh(&tn->node_list_lock);
                pr_warn("Node creation failed, no memory\n");
-               return NULL;
+               goto exit;
        }
-
        n_ptr->addr = addr;
        n_ptr->net = net;
        spin_lock_init(&n_ptr->lock);
@@ -123,9 +123,8 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr)
        list_add_tail_rcu(&n_ptr->list, &temp_node->list);
        n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
        n_ptr->signature = INVALID_NODE_SIG;
-
        tn->num_nodes++;
-
+exit:
        spin_unlock_bh(&tn->node_list_lock);
        return n_ptr;
 }
@@ -406,6 +405,10 @@ static void node_lost_contact(struct tipc_node *n_ptr)
                l_ptr->reset_checkpoint = l_ptr->next_in_no;
                l_ptr->exp_msg_count = 0;
                tipc_link_reset_fragments(l_ptr);
+
+               /* Link marked for deletion after failover? => do it now */
+               if (l_ptr->flags & LINK_STOPPED)
+                       tipc_link_delete(l_ptr);
        }
 
        n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
index 679a22082fcbc73480df514fe2b0dafc7aa5844c..caa4d663fd901c56edd661aa7c450ecd3d408598 100644 (file)
@@ -733,6 +733,7 @@ static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
        struct net *net = sock_net(sk);
        struct tipc_msg *mhdr = &tipc_sk(sk)->phdr;
        struct sk_buff_head head;
+       struct iov_iter save = msg->msg_iter;
        uint mtu;
        int rc;
 
@@ -758,8 +759,10 @@ new_mtu:
                        rc = dsz;
                        break;
                }
-               if (rc == -EMSGSIZE)
+               if (rc == -EMSGSIZE) {
+                       msg->msg_iter = save;
                        goto new_mtu;
+               }
                if (rc != -ELINKCONG)
                        break;
                tipc_sk(sk)->link_cong = 1;
@@ -895,6 +898,7 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
        struct sk_buff_head head;
        struct sk_buff *skb;
        struct tipc_name_seq *seq = &dest->addr.nameseq;
+       struct iov_iter save;
        u32 mtu;
        long timeo;
        int rc;
@@ -963,6 +967,7 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
                msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
        }
 
+       save = m->msg_iter;
 new_mtu:
        mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
        __skb_queue_head_init(&head);
@@ -980,8 +985,10 @@ new_mtu:
                        rc = dsz;
                        break;
                }
-               if (rc == -EMSGSIZE)
+               if (rc == -EMSGSIZE) {
+                       m->msg_iter = save;
                        goto new_mtu;
+               }
                if (rc != -ELINKCONG)
                        break;
                tsk->link_cong = 1;
@@ -1052,6 +1059,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
        long timeo;
        u32 dnode;
        uint mtu, send, sent = 0;
+       struct iov_iter save;
 
        /* Handle implied connection establishment */
        if (unlikely(dest)) {
@@ -1078,6 +1086,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
        dnode = tsk_peer_node(tsk);
 
 next:
+       save = m->msg_iter;
        mtu = tsk->max_pkt;
        send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
        __skb_queue_head_init(&head);
@@ -1097,6 +1106,7 @@ next:
                        if (rc == -EMSGSIZE) {
                                tsk->max_pkt = tipc_node_get_mtu(net, dnode,
                                                                 portid);
+                               m->msg_iter = save;
                                goto next;
                        }
                        if (rc != -ELINKCONG)
index 02d2e5229240dd635dfc3eb7b6f81e8aa970bf01..7f3255084a6c036074664240f4da6fb3cdd231ce 100644 (file)
@@ -1850,8 +1850,7 @@ static ssize_t vmci_transport_stream_enqueue(
        struct msghdr *msg,
        size_t len)
 {
-       /* XXX: stripping const */
-       return vmci_qpair_enquev(vmci_trans(vsk)->qpair, (struct iovec *)msg->msg_iter.iov, len, 0);
+       return vmci_qpair_enquev(vmci_trans(vsk)->qpair, msg, len, 0);
 }
 
 static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk)
index 7d60f4bf95d2ff0e3742906121684df6c5db55e0..d78fd8b54515e630b67bf38d710b2b698f703c4c 100644 (file)
@@ -397,6 +397,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 },
        [NL80211_ATTR_MAC_MASK] = { .len = ETH_ALEN },
        [NL80211_ATTR_WIPHY_SELF_MANAGED_REG] = { .type = NLA_FLAG },
+       [NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 },
+       [NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 },
 };
 
 /* policy for the key attributes */
@@ -5778,7 +5780,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
                request->ssids = (void *)&request->channels[n_channels];
        request->n_ssids = n_ssids;
        if (ie_len) {
-               if (request->ssids)
+               if (n_ssids)
                        request->ie = (void *)(request->ssids + n_ssids);
                else
                        request->ie = (void *)(request->channels + n_channels);
@@ -5834,7 +5836,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
        request->n_channels = i;
 
        i = 0;
-       if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
+       if (n_ssids) {
                nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
                        if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
                                err = -EINVAL;
@@ -6032,7 +6034,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
                request->ssids = (void *)&request->channels[n_channels];
        request->n_ssids = n_ssids;
        if (ie_len) {
-               if (request->ssids)
+               if (n_ssids)
                        request->ie = (void *)(request->ssids + n_ssids);
                else
                        request->ie = (void *)(request->channels + n_channels);
@@ -6041,7 +6043,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
        if (n_match_sets) {
                if (request->ie)
                        request->match_sets = (void *)(request->ie + ie_len);
-               else if (request->ssids)
+               else if (n_ssids)
                        request->match_sets =
                                (void *)(request->ssids + n_ssids);
                else
@@ -6100,7 +6102,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
        request->n_channels = i;
 
        i = 0;
-       if (attrs[NL80211_ATTR_SCAN_SSIDS]) {
+       if (n_ssids) {
                nla_for_each_nested(attr, attrs[NL80211_ATTR_SCAN_SSIDS],
                                    tmp) {
                        if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
@@ -6208,6 +6210,10 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
                }
        }
 
+       if (attrs[NL80211_ATTR_SCHED_SCAN_DELAY])
+               request->delay =
+                       nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_DELAY]);
+
        request->interval = interval;
        request->scan_start = jiffies;
 
@@ -7768,14 +7774,19 @@ static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info)
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
        struct net *net;
        int err;
-       u32 pid;
 
-       if (!info->attrs[NL80211_ATTR_PID])
-               return -EINVAL;
+       if (info->attrs[NL80211_ATTR_PID]) {
+               u32 pid = nla_get_u32(info->attrs[NL80211_ATTR_PID]);
+
+               net = get_net_ns_by_pid(pid);
+       } else if (info->attrs[NL80211_ATTR_NETNS_FD]) {
+               u32 fd = nla_get_u32(info->attrs[NL80211_ATTR_NETNS_FD]);
 
-       pid = nla_get_u32(info->attrs[NL80211_ATTR_PID]);
+               net = get_net_ns_by_fd(fd);
+       } else {
+               return -EINVAL;
+       }
 
-       net = get_net_ns_by_pid(pid);
        if (IS_ERR(net))
                return PTR_ERR(net);
 
index 0d1966d54aaa70af912b678bfffdc39e8ad2fbd0..6903dbdcb8c1f03bcef684ad1074e3dea9f18e18 100644 (file)
@@ -227,18 +227,32 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
        if (pairwise && !mac_addr)
                return -EINVAL;
 
-       /*
-        * Disallow pairwise keys with non-zero index unless it's WEP
-        * or a vendor specific cipher (because current deployments use
-        * pairwise WEP keys with non-zero indices and for vendor specific
-        * ciphers this should be validated in the driver or hardware level
-        * - but 802.11i clearly specifies to use zero)
-        */
-       if (pairwise && key_idx &&
-           ((params->cipher == WLAN_CIPHER_SUITE_TKIP) ||
-            (params->cipher == WLAN_CIPHER_SUITE_CCMP) ||
-            (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC)))
-               return -EINVAL;
+       switch (params->cipher) {
+       case WLAN_CIPHER_SUITE_TKIP:
+       case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               /* Disallow pairwise keys with non-zero index unless it's WEP
+                * or a vendor specific cipher (because current deployments use
+                * pairwise WEP keys with non-zero indices and for vendor
+                * specific ciphers this should be validated in the driver or
+                * hardware level - but 802.11i clearly specifies to use zero)
+                */
+               if (pairwise && key_idx)
+                       return -EINVAL;
+               break;
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               /* Disallow BIP (group-only) cipher as pairwise cipher */
+               if (pairwise)
+                       return -EINVAL;
+               break;
+       default:
+               break;
+       }
 
        switch (params->cipher) {
        case WLAN_CIPHER_SUITE_WEP40:
@@ -253,6 +267,18 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
                if (params->key_len != WLAN_KEY_LEN_CCMP)
                        return -EINVAL;
                break;
+       case WLAN_CIPHER_SUITE_CCMP_256:
+               if (params->key_len != WLAN_KEY_LEN_CCMP_256)
+                       return -EINVAL;
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+               if (params->key_len != WLAN_KEY_LEN_GCMP)
+                       return -EINVAL;
+               break;
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               if (params->key_len != WLAN_KEY_LEN_GCMP_256)
+                       return -EINVAL;
+               break;
        case WLAN_CIPHER_SUITE_WEP104:
                if (params->key_len != WLAN_KEY_LEN_WEP104)
                        return -EINVAL;
@@ -261,6 +287,18 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
                if (params->key_len != WLAN_KEY_LEN_AES_CMAC)
                        return -EINVAL;
                break;
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+               if (params->key_len != WLAN_KEY_LEN_BIP_CMAC_256)
+                       return -EINVAL;
+               break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+               if (params->key_len != WLAN_KEY_LEN_BIP_GMAC_128)
+                       return -EINVAL;
+               break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               if (params->key_len != WLAN_KEY_LEN_BIP_GMAC_256)
+                       return -EINVAL;
+               break;
        default:
                /*
                 * We don't know anything about this algorithm,
@@ -280,7 +318,13 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
                        return -EINVAL;
                case WLAN_CIPHER_SUITE_TKIP:
                case WLAN_CIPHER_SUITE_CCMP:
+               case WLAN_CIPHER_SUITE_CCMP_256:
+               case WLAN_CIPHER_SUITE_GCMP:
+               case WLAN_CIPHER_SUITE_GCMP_256:
                case WLAN_CIPHER_SUITE_AES_CMAC:
+               case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+               case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+               case WLAN_CIPHER_SUITE_BIP_GMAC_256:
                        if (params->seq_len != 6)
                                return -EINVAL;
                        break;