]> git.karo-electronics.de Git - linux-beck.git/commitdiff
ath10k: mac80211 driver for Qualcomm Atheros 802.11ac CQA98xx devices
authorKalle Valo <kvalo@qca.qualcomm.com>
Wed, 12 Jun 2013 17:52:10 +0000 (20:52 +0300)
committerKalle Valo <kvalo@qca.qualcomm.com>
Wed, 12 Jun 2013 17:52:10 +0000 (20:52 +0300)
Here's a new mac80211 driver for Qualcomm Atheros 802.11ac QCA98xx devices.
A major difference from ath9k is that there's now a firmware and
that's why we had to implement a new driver.

The wiki page for the driver is:

http://wireless.kernel.org/en/users/Drivers/ath10k

The driver has had many authors, they are listed here alphabetically:

Bartosz Markowski <bartosz.markowski@tieto.com>
Janusz Dziedzic <janusz.dziedzic@tieto.com>
Kalle Valo <kvalo@qca.qualcomm.com>
Marek Kwaczynski <marek.kwaczynski@tieto.com>
Marek Puzyniak <marek.puzyniak@tieto.com>
Michal Kazior <michal.kazior@tieto.com>
Sujith Manoharan <c_manoha@qca.qualcomm.com>

Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
32 files changed:
drivers/net/wireless/ath/Kconfig
drivers/net/wireless/ath/Makefile
drivers/net/wireless/ath/ath10k/Kconfig [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/Makefile [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/bmi.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/bmi.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/ce.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/ce.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/core.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/core.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/debug.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/debug.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/hif.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/htc.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/htc.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/htt.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/htt.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/htt_rx.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/htt_tx.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/hw.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/mac.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/mac.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/pci.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/pci.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/rx_desc.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/targaddrs.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/trace.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/trace.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/txrx.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/txrx.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/wmi.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/wmi.h [new file with mode: 0644]

index 2c02b4e84094d32fc5605deaebb0ed148087fbf5..1abf1d421173085e72a4bfa549fd147f83a87ca5 100644 (file)
@@ -31,5 +31,6 @@ source "drivers/net/wireless/ath/carl9170/Kconfig"
 source "drivers/net/wireless/ath/ath6kl/Kconfig"
 source "drivers/net/wireless/ath/ar5523/Kconfig"
 source "drivers/net/wireless/ath/wil6210/Kconfig"
+source "drivers/net/wireless/ath/ath10k/Kconfig"
 
 endif
index 97b964ded2bef25e4218cc3843377ada32732077..fb05cfd193616ea65cecbfc489653409b173e617 100644 (file)
@@ -4,6 +4,7 @@ obj-$(CONFIG_CARL9170)          += carl9170/
 obj-$(CONFIG_ATH6KL)           += ath6kl/
 obj-$(CONFIG_AR5523)           += ar5523/
 obj-$(CONFIG_WIL6210)          += wil6210/
+obj-$(CONFIG_ATH10K)           += ath10k/
 
 obj-$(CONFIG_ATH_COMMON)       += ath.o
 
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
new file mode 100644 (file)
index 0000000..cde58fe
--- /dev/null
@@ -0,0 +1,39 @@
+config ATH10K
+        tristate "Atheros 802.11ac wireless cards support"
+        depends on MAC80211
+       select ATH_COMMON
+        ---help---
+          This module adds support for wireless adapters based on
+          Atheros IEEE 802.11ac family of chipsets.
+
+          If you choose to build a module, it'll be called ath10k.
+
+config ATH10K_PCI
+       tristate "Atheros ath10k PCI support"
+       depends on ATH10K && PCI
+       ---help---
+         This module adds support for PCIE bus
+
+config ATH10K_DEBUG
+       bool "Atheros ath10k debugging"
+       depends on ATH10K
+       ---help---
+         Enables debug support
+
+         If unsure, say Y to make it easier to debug problems.
+
+config ATH10K_DEBUGFS
+       bool "Atheros ath10k debugfs support"
+       depends on ATH10K
+       ---help---
+         Enabled debugfs support
+
+         If unsure, say Y to make it easier to debug problems.
+
+config ATH10K_TRACING
+       bool "Atheros ath10k tracing support"
+       depends on ATH10K
+       depends on EVENT_TRACING
+       ---help---
+         Select this to ath10k use tracing infrastructure.
+
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
new file mode 100644 (file)
index 0000000..a4179f4
--- /dev/null
@@ -0,0 +1,20 @@
+obj-$(CONFIG_ATH10K) += ath10k_core.o
+ath10k_core-y += mac.o \
+                debug.o \
+                core.o \
+                htc.o \
+                htt.o \
+                htt_rx.o \
+                htt_tx.o \
+                txrx.o \
+                wmi.o \
+                bmi.o
+
+ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
+
+obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
+ath10k_pci-y += pci.o \
+               ce.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
new file mode 100644 (file)
index 0000000..1a2ef51
--- /dev/null
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "bmi.h"
+#include "hif.h"
+#include "debug.h"
+#include "htc.h"
+
+int ath10k_bmi_done(struct ath10k *ar)
+{
+       struct bmi_cmd cmd;
+       u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
+       int ret;
+
+       if (ar->bmi.done_sent) {
+               ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__);
+               return 0;
+       }
+
+       ar->bmi.done_sent = true;
+       cmd.id = __cpu_to_le32(BMI_DONE);
+
+       ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+       if (ret) {
+               ath10k_warn("unable to write to the device: %d\n", ret);
+               return ret;
+       }
+
+       ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n");
+       return 0;
+}
+
+int ath10k_bmi_get_target_info(struct ath10k *ar,
+                              struct bmi_target_info *target_info)
+{
+       struct bmi_cmd cmd;
+       union bmi_resp resp;
+       u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
+       u32 resplen = sizeof(resp.get_target_info);
+       int ret;
+
+       if (ar->bmi.done_sent) {
+               ath10k_warn("BMI Get Target Info Command disallowed\n");
+               return -EBUSY;
+       }
+
+       cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
+
+       ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
+       if (ret) {
+               ath10k_warn("unable to get target info from device\n");
+               return ret;
+       }
+
+       if (resplen < sizeof(resp.get_target_info)) {
+               ath10k_warn("invalid get_target_info response length (%d)\n",
+                           resplen);
+               return -EIO;
+       }
+
+       target_info->version = __le32_to_cpu(resp.get_target_info.version);
+       target_info->type    = __le32_to_cpu(resp.get_target_info.type);
+       return 0;
+}
+
+int ath10k_bmi_read_memory(struct ath10k *ar,
+                          u32 address, void *buffer, u32 length)
+{
+       struct bmi_cmd cmd;
+       union bmi_resp resp;
+       u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
+       u32 rxlen;
+       int ret;
+
+       if (ar->bmi.done_sent) {
+               ath10k_warn("command disallowed\n");
+               return -EBUSY;
+       }
+
+       ath10k_dbg(ATH10K_DBG_CORE,
+                  "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
+                  __func__, ar, address, length);
+
+       while (length) {
+               rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
+
+               cmd.id            = __cpu_to_le32(BMI_READ_MEMORY);
+               cmd.read_mem.addr = __cpu_to_le32(address);
+               cmd.read_mem.len  = __cpu_to_le32(rxlen);
+
+               ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
+                                                 &resp, &rxlen);
+               if (ret) {
+                       ath10k_warn("unable to read from the device\n");
+                       return ret;
+               }
+
+               memcpy(buffer, resp.read_mem.payload, rxlen);
+               address += rxlen;
+               buffer  += rxlen;
+               length  -= rxlen;
+       }
+
+       return 0;
+}
+
+int ath10k_bmi_write_memory(struct ath10k *ar,
+                           u32 address, const void *buffer, u32 length)
+{
+       struct bmi_cmd cmd;
+       u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
+       u32 txlen;
+       int ret;
+
+       if (ar->bmi.done_sent) {
+               ath10k_warn("command disallowed\n");
+               return -EBUSY;
+       }
+
+       ath10k_dbg(ATH10K_DBG_CORE,
+                  "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
+                  __func__, ar, address, length);
+
+       while (length) {
+               txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
+
+               /* copy before roundup to avoid reading beyond buffer*/
+               memcpy(cmd.write_mem.payload, buffer, txlen);
+               txlen = roundup(txlen, 4);
+
+               cmd.id             = __cpu_to_le32(BMI_WRITE_MEMORY);
+               cmd.write_mem.addr = __cpu_to_le32(address);
+               cmd.write_mem.len  = __cpu_to_le32(txlen);
+
+               ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
+                                                 NULL, NULL);
+               if (ret) {
+                       ath10k_warn("unable to write to the device\n");
+                       return ret;
+               }
+
+               /* fixup roundup() so `length` zeroes out for last chunk */
+               txlen = min(txlen, length);
+
+               address += txlen;
+               buffer  += txlen;
+               length  -= txlen;
+       }
+
+       return 0;
+}
+
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
+{
+       struct bmi_cmd cmd;
+       union bmi_resp resp;
+       u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
+       u32 resplen = sizeof(resp.execute);
+       int ret;
+
+       if (ar->bmi.done_sent) {
+               ath10k_warn("command disallowed\n");
+               return -EBUSY;
+       }
+
+       ath10k_dbg(ATH10K_DBG_CORE,
+                  "%s: (device: 0x%p, address: 0x%x, param: %d)\n",
+                  __func__, ar, address, *param);
+
+       cmd.id            = __cpu_to_le32(BMI_EXECUTE);
+       cmd.execute.addr  = __cpu_to_le32(address);
+       cmd.execute.param = __cpu_to_le32(*param);
+
+       ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
+       if (ret) {
+               ath10k_warn("unable to read from the device\n");
+               return ret;
+       }
+
+       if (resplen < sizeof(resp.execute)) {
+               ath10k_warn("invalid execute response length (%d)\n",
+                           resplen);
+               return ret;
+       }
+
+       *param = __le32_to_cpu(resp.execute.result);
+       return 0;
+}
+
+int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
+{
+       struct bmi_cmd cmd;
+       u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
+       u32 txlen;
+       int ret;
+
+       if (ar->bmi.done_sent) {
+               ath10k_warn("command disallowed\n");
+               return -EBUSY;
+       }
+
+       while (length) {
+               txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
+
+               WARN_ON_ONCE(txlen & 3);
+
+               cmd.id          = __cpu_to_le32(BMI_LZ_DATA);
+               cmd.lz_data.len = __cpu_to_le32(txlen);
+               memcpy(cmd.lz_data.payload, buffer, txlen);
+
+               ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
+                                                 NULL, NULL);
+               if (ret) {
+                       ath10k_warn("unable to write to the device\n");
+                       return ret;
+               }
+
+               buffer += txlen;
+               length -= txlen;
+       }
+
+       return 0;
+}
+
+int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
+{
+       struct bmi_cmd cmd;
+       u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
+       int ret;
+
+       if (ar->bmi.done_sent) {
+               ath10k_warn("command disallowed\n");
+               return -EBUSY;
+       }
+
+       cmd.id            = __cpu_to_le32(BMI_LZ_STREAM_START);
+       cmd.lz_start.addr = __cpu_to_le32(address);
+
+       ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+       if (ret) {
+               ath10k_warn("unable to Start LZ Stream to the device\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+int ath10k_bmi_fast_download(struct ath10k *ar,
+                            u32 address, const void *buffer, u32 length)
+{
+       u8 trailer[4] = {};
+       u32 head_len = rounddown(length, 4);
+       u32 trailer_len = length - head_len;
+       int ret;
+
+       ret = ath10k_bmi_lz_stream_start(ar, address);
+       if (ret)
+               return ret;
+
+       /* copy the last word into a zero padded buffer */
+       if (trailer_len > 0)
+               memcpy(trailer, buffer + head_len, trailer_len);
+
+       ret = ath10k_bmi_lz_data(ar, buffer, head_len);
+       if (ret)
+               return ret;
+
+       if (trailer_len > 0)
+               ret = ath10k_bmi_lz_data(ar, trailer, 4);
+
+       if (ret != 0)
+               return ret;
+
+       /*
+        * Close compressed stream and open a new (fake) one.
+        * This serves mainly to flush Target caches.
+        */
+       ret = ath10k_bmi_lz_stream_start(ar, 0x00);
+
+       return ret;
+}
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
new file mode 100644 (file)
index 0000000..32c56aa
--- /dev/null
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BMI_H_
+#define _BMI_H_
+
+#include "core.h"
+
+/*
+ * Bootloader Messaging Interface (BMI)
+ *
+ * BMI is a very simple messaging interface used during initialization
+ * to read memory, write memory, execute code, and to define an
+ * application entry PC.
+ *
+ * It is used to download an application to QCA988x, to provide
+ * patches to code that is already resident on QCA988x, and generally
+ * to examine and modify state.  The Host has an opportunity to use
+ * BMI only once during bootup.  Once the Host issues a BMI_DONE
+ * command, this opportunity ends.
+ *
+ * The Host writes BMI requests to mailbox0, and reads BMI responses
+ * from mailbox0.   BMI requests all begin with a command
+ * (see below for specific commands), and are followed by
+ * command-specific data.
+ *
+ * Flow control:
+ * The Host can only issue a command once the Target gives it a
+ * "BMI Command Credit", using AR8K Counter #4.  As soon as the
+ * Target has completed a command, it issues another BMI Command
+ * Credit (so the Host can issue the next command).
+ *
+ * BMI handles all required Target-side cache flushing.
+ */
+
+/* Maximum data size used for BMI transfers */
+#define BMI_MAX_DATA_SIZE      256
+
+/* len = cmd + addr + length */
+#define BMI_MAX_CMDBUF_SIZE (BMI_MAX_DATA_SIZE + \
+                       sizeof(u32) + \
+                       sizeof(u32) + \
+                       sizeof(u32))
+
+/* BMI Commands */
+
+enum bmi_cmd_id {
+       BMI_NO_COMMAND          = 0,
+       BMI_DONE                = 1,
+       BMI_READ_MEMORY         = 2,
+       BMI_WRITE_MEMORY        = 3,
+       BMI_EXECUTE             = 4,
+       BMI_SET_APP_START       = 5,
+       BMI_READ_SOC_REGISTER   = 6,
+       BMI_READ_SOC_WORD       = 6,
+       BMI_WRITE_SOC_REGISTER  = 7,
+       BMI_WRITE_SOC_WORD      = 7,
+       BMI_GET_TARGET_ID       = 8,
+       BMI_GET_TARGET_INFO     = 8,
+       BMI_ROMPATCH_INSTALL    = 9,
+       BMI_ROMPATCH_UNINSTALL  = 10,
+       BMI_ROMPATCH_ACTIVATE   = 11,
+       BMI_ROMPATCH_DEACTIVATE = 12,
+       BMI_LZ_STREAM_START     = 13, /* should be followed by LZ_DATA */
+       BMI_LZ_DATA             = 14,
+       BMI_NVRAM_PROCESS       = 15,
+};
+
+#define BMI_NVRAM_SEG_NAME_SZ 16
+
+struct bmi_cmd {
+       __le32 id; /* enum bmi_cmd_id */
+       union {
+               struct {
+               } done;
+               struct {
+                       __le32 addr;
+                       __le32 len;
+               } read_mem;
+               struct {
+                       __le32 addr;
+                       __le32 len;
+                       u8 payload[0];
+               } write_mem;
+               struct {
+                       __le32 addr;
+                       __le32 param;
+               } execute;
+               struct {
+                       __le32 addr;
+               } set_app_start;
+               struct {
+                       __le32 addr;
+               } read_soc_reg;
+               struct {
+                       __le32 addr;
+                       __le32 value;
+               } write_soc_reg;
+               struct {
+               } get_target_info;
+               struct {
+                       __le32 rom_addr;
+                       __le32 ram_addr; /* or value */
+                       __le32 size;
+                       __le32 activate; /* 0=install, but dont activate */
+               } rompatch_install;
+               struct {
+                       __le32 patch_id;
+               } rompatch_uninstall;
+               struct {
+                       __le32 count;
+                       __le32 patch_ids[0]; /* length of @count */
+               } rompatch_activate;
+               struct {
+                       __le32 count;
+                       __le32 patch_ids[0]; /* length of @count */
+               } rompatch_deactivate;
+               struct {
+                       __le32 addr;
+               } lz_start;
+               struct {
+                       __le32 len; /* max BMI_MAX_DATA_SIZE */
+                       u8 payload[0]; /* length of @len */
+               } lz_data;
+               struct {
+                       u8 name[BMI_NVRAM_SEG_NAME_SZ];
+               } nvram_process;
+               u8 payload[BMI_MAX_CMDBUF_SIZE];
+       };
+} __packed;
+
+union bmi_resp {
+       struct {
+               u8 payload[0];
+       } read_mem;
+       struct {
+               __le32 result;
+       } execute;
+       struct {
+               __le32 value;
+       } read_soc_reg;
+       struct {
+               __le32 len;
+               __le32 version;
+               __le32 type;
+       } get_target_info;
+       struct {
+               __le32 patch_id;
+       } rompatch_install;
+       struct {
+               __le32 patch_id;
+       } rompatch_uninstall;
+       struct {
+               /* 0 = nothing executed
+                * otherwise = NVRAM segment return value */
+               __le32 result;
+       } nvram_process;
+       u8 payload[BMI_MAX_CMDBUF_SIZE];
+} __packed;
+
+struct bmi_target_info {
+       u32 version;
+       u32 type;
+};
+
+
+/* in msec */
+#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ)
+
+#define BMI_CE_NUM_TO_TARG 0
+#define BMI_CE_NUM_TO_HOST 1
+
+int ath10k_bmi_done(struct ath10k *ar);
+int ath10k_bmi_get_target_info(struct ath10k *ar,
+                              struct bmi_target_info *target_info);
+int ath10k_bmi_read_memory(struct ath10k *ar, u32 address,
+                          void *buffer, u32 length);
+int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
+                           const void *buffer, u32 length);
+
+#define ath10k_bmi_read32(ar, item, val)                               \
+       ({                                                              \
+               int ret;                                                \
+               u32 addr;                                               \
+               __le32 tmp;                                             \
+                                                                       \
+               addr = host_interest_item_address(HI_ITEM(item));       \
+               ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
+               *val = __le32_to_cpu(tmp);                              \
+               ret;                                                    \
+        })
+
+#define ath10k_bmi_write32(ar, item, val)                              \
+       ({                                                              \
+               int ret;                                                \
+               u32 address;                                            \
+               __le32 v = __cpu_to_le32(val);                          \
+                                                                       \
+               address = host_interest_item_address(HI_ITEM(item));    \
+               ret = ath10k_bmi_write_memory(ar, address,              \
+                                             (u8 *)&v, sizeof(v));     \
+               ret;                                                    \
+       })
+
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param);
+int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
+int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
+int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
+                            const void *buffer, u32 length);
+#endif /* _BMI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
new file mode 100644 (file)
index 0000000..61a8ac7
--- /dev/null
@@ -0,0 +1,1189 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hif.h"
+#include "pci.h"
+#include "ce.h"
+#include "debug.h"
+
+/*
+ * Support for Copy Engine hardware, which is mainly used for
+ * communication between Host and Target over a PCIe interconnect.
+ */
+
+/*
+ * A single CopyEngine (CE) comprises two "rings":
+ *   a source ring
+ *   a destination ring
+ *
+ * Each ring consists of a number of descriptors which specify
+ * an address, length, and meta-data.
+ *
+ * Typically, one side of the PCIe interconnect (Host or Target)
+ * controls one ring and the other side controls the other ring.
+ * The source side chooses when to initiate a transfer and it
+ * chooses what to send (buffer address, length). The destination
+ * side keeps a supply of "anonymous receive buffers" available and
+ * it handles incoming data as it arrives (when the destination
+ * recieves an interrupt).
+ *
+ * The sender may send a simple buffer (address/length) or it may
+ * send a small list of buffers.  When a small list is sent, hardware
+ * "gathers" these and they end up in a single destination buffer
+ * with a single interrupt.
+ *
+ * There are several "contexts" managed by this layer -- more, it
+ * may seem -- than should be needed. These are provided mainly for
+ * maximum flexibility and especially to facilitate a simpler HIF
+ * implementation. There are per-CopyEngine recv, send, and watermark
+ * contexts. These are supplied by the caller when a recv, send,
+ * or watermark handler is established and they are echoed back to
+ * the caller when the respective callbacks are invoked. There is
+ * also a per-transfer context supplied by the caller when a buffer
+ * (or sendlist) is sent and when a buffer is enqueued for recv.
+ * These per-transfer contexts are echoed back to the caller when
+ * the buffer is sent/received.
+ */
+
+static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
+                                                      u32 ce_ctrl_addr,
+                                                      unsigned int n)
+{
+       ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
+}
+
+static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
+                                                     u32 ce_ctrl_addr)
+{
+       return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
+}
+
+static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
+                                                     u32 ce_ctrl_addr,
+                                                     unsigned int n)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       void __iomem *indicator_addr;
+
+       if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
+               ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
+               return;
+       }
+
+       /* workaround for QCA988x_1.0 HW CE */
+       indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS;
+
+       if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) {
+               iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr);
+       } else {
+               unsigned long irq_flags;
+               local_irq_save(irq_flags);
+               iowrite32(1, indicator_addr);
+
+               /*
+                * PCIE write waits for ACK in IPQ8K, there is no
+                * need to read back value.
+                */
+               (void)ioread32(indicator_addr);
+               (void)ioread32(indicator_addr); /* conservative */
+
+               ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
+
+               iowrite32(0, indicator_addr);
+               local_irq_restore(irq_flags);
+       }
+}
+
+static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
+                                                    u32 ce_ctrl_addr)
+{
+       return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
+}
+
+static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
+                                                   u32 ce_ctrl_addr)
+{
+       return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
+}
+
+static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
+                                                   u32 ce_ctrl_addr,
+                                                   unsigned int addr)
+{
+       ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
+}
+
+static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
+                                              u32 ce_ctrl_addr,
+                                              unsigned int n)
+{
+       ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
+}
+
+static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
+                                              u32 ce_ctrl_addr,
+                                              unsigned int n)
+{
+       u32 ctrl1_addr = ath10k_pci_read32((ar),
+                                          (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
+                          (ctrl1_addr &  ~CE_CTRL1_DMAX_LENGTH_MASK) |
+                          CE_CTRL1_DMAX_LENGTH_SET(n));
+}
+
+static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
+                                                   u32 ce_ctrl_addr,
+                                                   unsigned int n)
+{
+       u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
+                          (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
+                          CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
+}
+
+static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
+                                                    u32 ce_ctrl_addr,
+                                                    unsigned int n)
+{
+       u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
+                          (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
+                          CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
+}
+
+static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
+                                                    u32 ce_ctrl_addr)
+{
+       return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
+}
+
+static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
+                                                    u32 ce_ctrl_addr,
+                                                    u32 addr)
+{
+       ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
+}
+
+static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
+                                               u32 ce_ctrl_addr,
+                                               unsigned int n)
+{
+       ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
+}
+
+static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
+                                                  u32 ce_ctrl_addr,
+                                                  unsigned int n)
+{
+       u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
+                          (addr & ~SRC_WATERMARK_HIGH_MASK) |
+                          SRC_WATERMARK_HIGH_SET(n));
+}
+
+static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
+                                                 u32 ce_ctrl_addr,
+                                                 unsigned int n)
+{
+       u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
+                          (addr & ~SRC_WATERMARK_LOW_MASK) |
+                          SRC_WATERMARK_LOW_SET(n));
+}
+
+static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
+                                                   u32 ce_ctrl_addr,
+                                                   unsigned int n)
+{
+       u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
+                          (addr & ~DST_WATERMARK_HIGH_MASK) |
+                          DST_WATERMARK_HIGH_SET(n));
+}
+
+static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
+                                                  u32 ce_ctrl_addr,
+                                                  unsigned int n)
+{
+       u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
+                          (addr & ~DST_WATERMARK_LOW_MASK) |
+                          DST_WATERMARK_LOW_SET(n));
+}
+
+static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
+                                                       u32 ce_ctrl_addr)
+{
+       u32 host_ie_addr = ath10k_pci_read32(ar,
+                                            ce_ctrl_addr + HOST_IE_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
+                          host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
+}
+
+static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
+                                                       u32 ce_ctrl_addr)
+{
+       u32 host_ie_addr = ath10k_pci_read32(ar,
+                                            ce_ctrl_addr + HOST_IE_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
+                          host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
+}
+
+static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
+                                                   u32 ce_ctrl_addr)
+{
+       u32 host_ie_addr = ath10k_pci_read32(ar,
+                                            ce_ctrl_addr + HOST_IE_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
+                          host_ie_addr & ~CE_WATERMARK_MASK);
+}
+
+static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
+                                              u32 ce_ctrl_addr)
+{
+       u32 misc_ie_addr = ath10k_pci_read32(ar,
+                                            ce_ctrl_addr + MISC_IE_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
+                          misc_ie_addr | CE_ERROR_MASK);
+}
+
+static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
+                                                    u32 ce_ctrl_addr,
+                                                    unsigned int mask)
+{
+       ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
+}
+
+
+/*
+ * Guts of ath10k_ce_send, used by both ath10k_ce_send and
+ * ath10k_ce_sendlist_send.
+ * The caller takes responsibility for any needed locking.
+ */
+static int ath10k_ce_send_nolock(struct ce_state *ce_state,
+                                void *per_transfer_context,
+                                u32 buffer,
+                                unsigned int nbytes,
+                                unsigned int transfer_id,
+                                unsigned int flags)
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ce_ring_state *src_ring = ce_state->src_ring;
+       struct ce_desc *desc, *sdesc;
+       unsigned int nentries_mask = src_ring->nentries_mask;
+       unsigned int sw_index = src_ring->sw_index;
+       unsigned int write_index = src_ring->write_index;
+       u32 ctrl_addr = ce_state->ctrl_addr;
+       u32 desc_flags = 0;
+       int ret = 0;
+
+       if (nbytes > ce_state->src_sz_max)
+               ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
+                           __func__, nbytes, ce_state->src_sz_max);
+
+       ath10k_pci_wake(ar);
+
+       if (unlikely(CE_RING_DELTA(nentries_mask,
+                                  write_index, sw_index - 1) <= 0)) {
+               ret = -EIO;
+               goto exit;
+       }
+
+       desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
+                                  write_index);
+       sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index);
+
+       desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
+
+       if (flags & CE_SEND_FLAG_GATHER)
+               desc_flags |= CE_DESC_FLAGS_GATHER;
+       if (flags & CE_SEND_FLAG_BYTE_SWAP)
+               desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
+
+       sdesc->addr   = __cpu_to_le32(buffer);
+       sdesc->nbytes = __cpu_to_le16(nbytes);
+       sdesc->flags  = __cpu_to_le16(desc_flags);
+
+       *desc = *sdesc;
+
+       src_ring->per_transfer_context[write_index] = per_transfer_context;
+
+       /* Update Source Ring Write Index */
+       write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+
+       /* WORKAROUND */
+       if (!(flags & CE_SEND_FLAG_GATHER))
+               ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+
+       src_ring->write_index = write_index;
+exit:
+       ath10k_pci_sleep(ar);
+       return ret;
+}
+
+int ath10k_ce_send(struct ce_state *ce_state,
+                  void *per_transfer_context,
+                  u32 buffer,
+                  unsigned int nbytes,
+                  unsigned int transfer_id,
+                  unsigned int flags)
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+
+       spin_lock_bh(&ar_pci->ce_lock);
+       ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
+                                   buffer, nbytes, transfer_id, flags);
+       spin_unlock_bh(&ar_pci->ce_lock);
+
+       return ret;
+}
+
+void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
+                               unsigned int nbytes, u32 flags)
+{
+       unsigned int num_items = sendlist->num_items;
+       struct ce_sendlist_item *item;
+
+       item = &sendlist->item[num_items];
+       item->data = buffer;
+       item->u.nbytes = nbytes;
+       item->flags = flags;
+       sendlist->num_items++;
+}
+
+int ath10k_ce_sendlist_send(struct ce_state *ce_state,
+                           void *per_transfer_context,
+                           struct ce_sendlist *sendlist,
+                           unsigned int transfer_id)
+{
+       struct ce_ring_state *src_ring = ce_state->src_ring;
+       struct ce_sendlist_item *item;
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       unsigned int nentries_mask = src_ring->nentries_mask;
+       unsigned int num_items = sendlist->num_items;
+       unsigned int sw_index;
+       unsigned int write_index;
+       int i, delta, ret = -ENOMEM;
+
+       spin_lock_bh(&ar_pci->ce_lock);
+
+       sw_index = src_ring->sw_index;
+       write_index = src_ring->write_index;
+
+       delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
+
+       if (delta >= num_items) {
+               /*
+                * Handle all but the last item uniformly.
+                */
+               for (i = 0; i < num_items - 1; i++) {
+                       item = &sendlist->item[i];
+                       ret = ath10k_ce_send_nolock(ce_state,
+                                                   CE_SENDLIST_ITEM_CTXT,
+                                                   (u32) item->data,
+                                                   item->u.nbytes, transfer_id,
+                                                   item->flags |
+                                                   CE_SEND_FLAG_GATHER);
+                       if (ret)
+                               ath10k_warn("CE send failed for item: %d\n", i);
+               }
+               /*
+                * Provide valid context pointer for final item.
+                */
+               item = &sendlist->item[i];
+               ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
+                                           (u32) item->data, item->u.nbytes,
+                                           transfer_id, item->flags);
+               if (ret)
+                       ath10k_warn("CE send failed for last item: %d\n", i);
+       }
+
+       spin_unlock_bh(&ar_pci->ce_lock);
+
+       return ret;
+}
+
+int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+                              void *per_recv_context,
+                              u32 buffer)
+{
+       struct ce_ring_state *dest_ring = ce_state->dest_ring;
+       u32 ctrl_addr = ce_state->ctrl_addr;
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       unsigned int nentries_mask = dest_ring->nentries_mask;
+       unsigned int write_index;
+       unsigned int sw_index;
+       int ret;
+
+       spin_lock_bh(&ar_pci->ce_lock);
+       write_index = dest_ring->write_index;
+       sw_index = dest_ring->sw_index;
+
+       ath10k_pci_wake(ar);
+
+       if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
+               struct ce_desc *base = dest_ring->base_addr_owner_space;
+               struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
+
+               /* Update destination descriptor */
+               desc->addr    = __cpu_to_le32(buffer);
+               desc->nbytes = 0;
+
+               dest_ring->per_transfer_context[write_index] =
+                                                       per_recv_context;
+
+               /* Update Destination Ring Write Index */
+               write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+               ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+               dest_ring->write_index = write_index;
+               ret = 0;
+       } else {
+               ret = -EIO;
+       }
+       ath10k_pci_sleep(ar);
+       spin_unlock_bh(&ar_pci->ce_lock);
+
+       return ret;
+}
+
+/*
+ * Guts of ath10k_ce_completed_recv_next.
+ * The caller takes responsibility for any necessary locking.
+ */
+static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
+                                               void **per_transfer_contextp,
+                                               u32 *bufferp,
+                                               unsigned int *nbytesp,
+                                               unsigned int *transfer_idp,
+                                               unsigned int *flagsp)
+{
+       struct ce_ring_state *dest_ring = ce_state->dest_ring;
+       unsigned int nentries_mask = dest_ring->nentries_mask;
+       unsigned int sw_index = dest_ring->sw_index;
+
+       struct ce_desc *base = dest_ring->base_addr_owner_space;
+       struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
+       struct ce_desc sdesc;
+       u16 nbytes;
+
+       /* Copy in one go for performance reasons */
+       sdesc = *desc;
+
+       nbytes = __le16_to_cpu(sdesc.nbytes);
+       if (nbytes == 0) {
+               /*
+                * This closes a relatively unusual race where the Host
+                * sees the updated DRRI before the update to the
+                * corresponding descriptor has completed. We treat this
+                * as a descriptor that is not yet done.
+                */
+               return -EIO;
+       }
+
+       desc->nbytes = 0;
+
+       /* Return data from completed destination descriptor */
+       *bufferp = __le32_to_cpu(sdesc.addr);
+       *nbytesp = nbytes;
+       *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
+
+       if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
+               *flagsp = CE_RECV_FLAG_SWAPPED;
+       else
+               *flagsp = 0;
+
+       if (per_transfer_contextp)
+               *per_transfer_contextp =
+                       dest_ring->per_transfer_context[sw_index];
+
+       /* sanity */
+       dest_ring->per_transfer_context[sw_index] = NULL;
+
+       /* Update sw_index */
+       sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+       dest_ring->sw_index = sw_index;
+
+       return 0;
+}
+
+int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+                                 void **per_transfer_contextp,
+                                 u32 *bufferp,
+                                 unsigned int *nbytesp,
+                                 unsigned int *transfer_idp,
+                                 unsigned int *flagsp)
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+
+       spin_lock_bh(&ar_pci->ce_lock);
+       ret = ath10k_ce_completed_recv_next_nolock(ce_state,
+                                                  per_transfer_contextp,
+                                                  bufferp, nbytesp,
+                                                  transfer_idp, flagsp);
+       spin_unlock_bh(&ar_pci->ce_lock);
+
+       return ret;
+}
+
+int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+                              void **per_transfer_contextp,
+                              u32 *bufferp)
+{
+       struct ce_ring_state *dest_ring;
+       unsigned int nentries_mask;
+       unsigned int sw_index;
+       unsigned int write_index;
+       int ret;
+       struct ath10k *ar;
+       struct ath10k_pci *ar_pci;
+
+       dest_ring = ce_state->dest_ring;
+
+       if (!dest_ring)
+               return -EIO;
+
+       ar = ce_state->ar;
+       ar_pci = ath10k_pci_priv(ar);
+
+       spin_lock_bh(&ar_pci->ce_lock);
+
+       nentries_mask = dest_ring->nentries_mask;
+       sw_index = dest_ring->sw_index;
+       write_index = dest_ring->write_index;
+       if (write_index != sw_index) {
+               struct ce_desc *base = dest_ring->base_addr_owner_space;
+               struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
+
+               /* Return data from completed destination descriptor */
+               *bufferp = __le32_to_cpu(desc->addr);
+
+               if (per_transfer_contextp)
+                       *per_transfer_contextp =
+                               dest_ring->per_transfer_context[sw_index];
+
+               /* sanity */
+               dest_ring->per_transfer_context[sw_index] = NULL;
+
+               /* Update sw_index */
+               sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+               dest_ring->sw_index = sw_index;
+               ret = 0;
+       } else {
+               ret = -EIO;
+       }
+
+       spin_unlock_bh(&ar_pci->ce_lock);
+
+       return ret;
+}
+
+/*
+ * Guts of ath10k_ce_completed_send_next.
+ * The caller takes responsibility for any necessary locking.
+ */
+static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
+                                               void **per_transfer_contextp,
+                                               u32 *bufferp,
+                                               unsigned int *nbytesp,
+                                               unsigned int *transfer_idp)
+{
+       struct ce_ring_state *src_ring = ce_state->src_ring;
+       u32 ctrl_addr = ce_state->ctrl_addr;
+       struct ath10k *ar = ce_state->ar;
+       unsigned int nentries_mask = src_ring->nentries_mask;
+       unsigned int sw_index = src_ring->sw_index;
+       unsigned int read_index;
+       int ret = -EIO;
+
+       if (src_ring->hw_index == sw_index) {
+               /*
+                * The SW completion index has caught up with the cached
+                * version of the HW completion index.
+                * Update the cached HW completion index to see whether
+                * the SW has really caught up to the HW, or if the cached
+                * value of the HW index has become stale.
+                */
+               ath10k_pci_wake(ar);
+               src_ring->hw_index =
+                       ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+               ath10k_pci_sleep(ar);
+       }
+       read_index = src_ring->hw_index;
+
+       if ((read_index != sw_index) && (read_index != 0xffffffff)) {
+               struct ce_desc *sbase = src_ring->shadow_base;
+               struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
+
+               /* Return data from completed source descriptor */
+               *bufferp = __le32_to_cpu(sdesc->addr);
+               *nbytesp = __le16_to_cpu(sdesc->nbytes);
+               *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
+                                               CE_DESC_FLAGS_META_DATA);
+
+               if (per_transfer_contextp)
+                       *per_transfer_contextp =
+                               src_ring->per_transfer_context[sw_index];
+
+               /* sanity */
+               src_ring->per_transfer_context[sw_index] = NULL;
+
+               /* Update sw_index */
+               sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+               src_ring->sw_index = sw_index;
+               ret = 0;
+       }
+
+       return ret;
+}
+
+/* NB: Modeled after ath10k_ce_completed_send_next */
+int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+                              void **per_transfer_contextp,
+                              u32 *bufferp,
+                              unsigned int *nbytesp,
+                              unsigned int *transfer_idp)
+{
+       struct ce_ring_state *src_ring;
+       unsigned int nentries_mask;
+       unsigned int sw_index;
+       unsigned int write_index;
+       int ret;
+       struct ath10k *ar;
+       struct ath10k_pci *ar_pci;
+
+       src_ring = ce_state->src_ring;
+
+       if (!src_ring)
+               return -EIO;
+
+       ar = ce_state->ar;
+       ar_pci = ath10k_pci_priv(ar);
+
+       spin_lock_bh(&ar_pci->ce_lock);
+
+       nentries_mask = src_ring->nentries_mask;
+       sw_index = src_ring->sw_index;
+       write_index = src_ring->write_index;
+
+       if (write_index != sw_index) {
+               struct ce_desc *base = src_ring->base_addr_owner_space;
+               struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
+
+               /* Return data from completed source descriptor */
+               *bufferp = __le32_to_cpu(desc->addr);
+               *nbytesp = __le16_to_cpu(desc->nbytes);
+               *transfer_idp = MS(__le16_to_cpu(desc->flags),
+                                               CE_DESC_FLAGS_META_DATA);
+
+               if (per_transfer_contextp)
+                       *per_transfer_contextp =
+                               src_ring->per_transfer_context[sw_index];
+
+               /* sanity */
+               src_ring->per_transfer_context[sw_index] = NULL;
+
+               /* Update sw_index */
+               sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+               src_ring->sw_index = sw_index;
+               ret = 0;
+       } else {
+               ret = -EIO;
+       }
+
+       spin_unlock_bh(&ar_pci->ce_lock);
+
+       return ret;
+}
+
+int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+                                 void **per_transfer_contextp,
+                                 u32 *bufferp,
+                                 unsigned int *nbytesp,
+                                 unsigned int *transfer_idp)
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+
+       spin_lock_bh(&ar_pci->ce_lock);
+       ret = ath10k_ce_completed_send_next_nolock(ce_state,
+                                                  per_transfer_contextp,
+                                                  bufferp, nbytesp,
+                                                  transfer_idp);
+       spin_unlock_bh(&ar_pci->ce_lock);
+
+       return ret;
+}
+
+/*
+ * Guts of interrupt handler for per-engine interrupts on a particular CE.
+ *
+ * Invokes registered callbacks for recv_complete,
+ * send_complete, and watermarks.
+ */
+void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
+       u32 ctrl_addr = ce_state->ctrl_addr;
+       void *transfer_context;
+       u32 buf;
+       unsigned int nbytes;
+       unsigned int id;
+       unsigned int flags;
+
+       ath10k_pci_wake(ar);
+       spin_lock_bh(&ar_pci->ce_lock);
+
+       /* Clear the copy-complete interrupts that will be handled here. */
+       ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
+                                         HOST_IS_COPY_COMPLETE_MASK);
+
+       if (ce_state->recv_cb) {
+               /*
+                * Pop completed recv buffers and call the registered
+                * recv callback for each
+                */
+               while (ath10k_ce_completed_recv_next_nolock(ce_state,
+                                                           &transfer_context,
+                                                           &buf, &nbytes,
+                                                           &id, &flags) == 0) {
+                       spin_unlock_bh(&ar_pci->ce_lock);
+                       ce_state->recv_cb(ce_state, transfer_context, buf,
+                                         nbytes, id, flags);
+                       spin_lock_bh(&ar_pci->ce_lock);
+               }
+       }
+
+       if (ce_state->send_cb) {
+               /*
+                * Pop completed send buffers and call the registered
+                * send callback for each
+                */
+               while (ath10k_ce_completed_send_next_nolock(ce_state,
+                                                           &transfer_context,
+                                                           &buf,
+                                                           &nbytes,
+                                                           &id) == 0) {
+                       spin_unlock_bh(&ar_pci->ce_lock);
+                       ce_state->send_cb(ce_state, transfer_context,
+                                         buf, nbytes, id);
+                       spin_lock_bh(&ar_pci->ce_lock);
+               }
+       }
+
+       /*
+        * Misc CE interrupts are not being handled, but still need
+        * to be cleared.
+        */
+       ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
+
+       spin_unlock_bh(&ar_pci->ce_lock);
+       ath10k_pci_sleep(ar);
+}
+
+/*
+ * Handler for per-engine interrupts on ALL active CEs.
+ * This is used in cases where the system is sharing a
+ * single interrput for all CEs
+ */
+
+void ath10k_ce_per_engine_service_any(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ce_id;
+       u32 intr_summary;
+
+       ath10k_pci_wake(ar);
+       intr_summary = CE_INTERRUPT_SUMMARY(ar);
+
+       for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
+               if (intr_summary & (1 << ce_id))
+                       intr_summary &= ~(1 << ce_id);
+               else
+                       /* no intr pending on this CE */
+                       continue;
+
+               ath10k_ce_per_engine_service(ar, ce_id);
+       }
+
+       ath10k_pci_sleep(ar);
+}
+
+/*
+ * Adjust interrupts for the copy complete handler.
+ * If it's needed for either send or recv, then unmask
+ * this interrupt; otherwise, mask it.
+ *
+ * Called with ce_lock held.
+ */
+static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
+                                               int disable_copy_compl_intr)
+{
+       u32 ctrl_addr = ce_state->ctrl_addr;
+       struct ath10k *ar = ce_state->ar;
+
+       ath10k_pci_wake(ar);
+
+       if ((!disable_copy_compl_intr) &&
+           (ce_state->send_cb || ce_state->recv_cb))
+               ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
+       else
+               ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+
+       ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
+
+       ath10k_pci_sleep(ar);
+}
+
+void ath10k_ce_disable_interrupts(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ce_id;
+
+       ath10k_pci_wake(ar);
+       for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
+               struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
+               u32 ctrl_addr = ce_state->ctrl_addr;
+
+               ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+       }
+       ath10k_pci_sleep(ar);
+}
+
+void ath10k_ce_send_cb_register(struct ce_state *ce_state,
+                               void (*send_cb) (struct ce_state *ce_state,
+                                                void *transfer_context,
+                                                u32 buffer,
+                                                unsigned int nbytes,
+                                                unsigned int transfer_id),
+                               int disable_interrupts)
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       spin_lock_bh(&ar_pci->ce_lock);
+       ce_state->send_cb = send_cb;
+       ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts);
+       spin_unlock_bh(&ar_pci->ce_lock);
+}
+
+void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
+                               void (*recv_cb) (struct ce_state *ce_state,
+                                                void *transfer_context,
+                                                u32 buffer,
+                                                unsigned int nbytes,
+                                                unsigned int transfer_id,
+                                                unsigned int flags))
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       spin_lock_bh(&ar_pci->ce_lock);
+       ce_state->recv_cb = recv_cb;
+       ath10k_ce_per_engine_handler_adjust(ce_state, 0);
+       spin_unlock_bh(&ar_pci->ce_lock);
+}
+
+static int ath10k_ce_init_src_ring(struct ath10k *ar,
+                                  unsigned int ce_id,
+                                  struct ce_state *ce_state,
+                                  const struct ce_attr *attr)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ce_ring_state *src_ring;
+       unsigned int nentries = attr->src_nentries;
+       unsigned int ce_nbytes;
+       u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+       dma_addr_t base_addr;
+       char *ptr;
+
+       nentries = roundup_pow_of_two(nentries);
+
+       if (ce_state->src_ring) {
+               WARN_ON(ce_state->src_ring->nentries != nentries);
+               return 0;
+       }
+
+       ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
+       ptr = kzalloc(ce_nbytes, GFP_KERNEL);
+       if (ptr == NULL)
+               return -ENOMEM;
+
+       ce_state->src_ring = (struct ce_ring_state *)ptr;
+       src_ring = ce_state->src_ring;
+
+       ptr += sizeof(struct ce_ring_state);
+       src_ring->nentries = nentries;
+       src_ring->nentries_mask = nentries - 1;
+
+       ath10k_pci_wake(ar);
+       src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+       src_ring->hw_index = src_ring->sw_index;
+
+       src_ring->write_index =
+               ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
+       ath10k_pci_sleep(ar);
+
+       src_ring->per_transfer_context = (void **)ptr;
+
+       /*
+        * Legacy platforms that do not support cache
+        * coherent DMA are unsupported
+        */
+       src_ring->base_addr_owner_space_unaligned =
+               pci_alloc_consistent(ar_pci->pdev,
+                                    (nentries * sizeof(struct ce_desc) +
+                                     CE_DESC_RING_ALIGN),
+                                    &base_addr);
+       src_ring->base_addr_ce_space_unaligned = base_addr;
+
+       src_ring->base_addr_owner_space = PTR_ALIGN(
+                       src_ring->base_addr_owner_space_unaligned,
+                       CE_DESC_RING_ALIGN);
+       src_ring->base_addr_ce_space = ALIGN(
+                       src_ring->base_addr_ce_space_unaligned,
+                       CE_DESC_RING_ALIGN);
+
+       /*
+        * Also allocate a shadow src ring in regular
+        * mem to use for faster access.
+        */
+       src_ring->shadow_base_unaligned =
+               kmalloc((nentries * sizeof(struct ce_desc) +
+                        CE_DESC_RING_ALIGN), GFP_KERNEL);
+
+       src_ring->shadow_base = PTR_ALIGN(
+                       src_ring->shadow_base_unaligned,
+                       CE_DESC_RING_ALIGN);
+
+       ath10k_pci_wake(ar);
+       ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
+                                        src_ring->base_addr_ce_space);
+       ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
+       ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
+       ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
+       ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
+       ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
+       ath10k_pci_sleep(ar);
+
+       return 0;
+}
+
+static int ath10k_ce_init_dest_ring(struct ath10k *ar,
+                                   unsigned int ce_id,
+                                   struct ce_state *ce_state,
+                                   const struct ce_attr *attr)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ce_ring_state *dest_ring;
+       unsigned int nentries = attr->dest_nentries;
+       unsigned int ce_nbytes;
+       u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+       dma_addr_t base_addr;
+       char *ptr;
+
+       nentries = roundup_pow_of_two(nentries);
+
+       if (ce_state->dest_ring) {
+               WARN_ON(ce_state->dest_ring->nentries != nentries);
+               return 0;
+       }
+
+       ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
+       ptr = kzalloc(ce_nbytes, GFP_KERNEL);
+       if (ptr == NULL)
+               return -ENOMEM;
+
+       ce_state->dest_ring = (struct ce_ring_state *)ptr;
+       dest_ring = ce_state->dest_ring;
+
+       ptr += sizeof(struct ce_ring_state);
+       dest_ring->nentries = nentries;
+       dest_ring->nentries_mask = nentries - 1;
+
+       ath10k_pci_wake(ar);
+       dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
+       dest_ring->write_index =
+               ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+       ath10k_pci_sleep(ar);
+
+       dest_ring->per_transfer_context = (void **)ptr;
+
+       /*
+        * Legacy platforms that do not support cache
+        * coherent DMA are unsupported
+        */
+       dest_ring->base_addr_owner_space_unaligned =
+               pci_alloc_consistent(ar_pci->pdev,
+                                    (nentries * sizeof(struct ce_desc) +
+                                     CE_DESC_RING_ALIGN),
+                                    &base_addr);
+       dest_ring->base_addr_ce_space_unaligned = base_addr;
+
+       /*
+        * Correctly initialize memory to 0 to prevent garbage
+        * data crashing system when download firmware
+        */
+       memset(dest_ring->base_addr_owner_space_unaligned, 0,
+              nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
+
+       dest_ring->base_addr_owner_space = PTR_ALIGN(
+                       dest_ring->base_addr_owner_space_unaligned,
+                       CE_DESC_RING_ALIGN);
+       dest_ring->base_addr_ce_space = ALIGN(
+                       dest_ring->base_addr_ce_space_unaligned,
+                       CE_DESC_RING_ALIGN);
+
+       ath10k_pci_wake(ar);
+       ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
+                                         dest_ring->base_addr_ce_space);
+       ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
+       ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
+       ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
+       ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
+       ath10k_pci_sleep(ar);
+
+       return 0;
+}
+
+static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
+                                            unsigned int ce_id,
+                                            const struct ce_attr *attr)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ce_state *ce_state = NULL;
+       u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+
+       spin_lock_bh(&ar_pci->ce_lock);
+
+       if (!ar_pci->ce_id_to_state[ce_id]) {
+               ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC);
+               if (ce_state == NULL) {
+                       spin_unlock_bh(&ar_pci->ce_lock);
+                       return NULL;
+               }
+
+               ar_pci->ce_id_to_state[ce_id] = ce_state;
+               ce_state->ar = ar;
+               ce_state->id = ce_id;
+               ce_state->ctrl_addr = ctrl_addr;
+               ce_state->state = CE_RUNNING;
+               /* Save attribute flags */
+               ce_state->attr_flags = attr->flags;
+               ce_state->src_sz_max = attr->src_sz_max;
+       }
+
+       spin_unlock_bh(&ar_pci->ce_lock);
+
+       return ce_state;
+}
+
+/*
+ * Initialize a Copy Engine based on caller-supplied attributes.
+ * This may be called once to initialize both source and destination
+ * rings or it may be called twice for separate source and destination
+ * initialization. It may be that only one side or the other is
+ * initialized by software/firmware.
+ */
+struct ce_state *ath10k_ce_init(struct ath10k *ar,
+                               unsigned int ce_id,
+                               const struct ce_attr *attr)
+{
+       struct ce_state *ce_state;
+       u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+
+       ce_state = ath10k_ce_init_state(ar, ce_id, attr);
+       if (!ce_state) {
+               ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
+               return NULL;
+       }
+
+       if (attr->src_nentries) {
+               if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) {
+                       ath10k_err("Failed to initialize CE src ring for ID: %d\n",
+                                  ce_id);
+                       ath10k_ce_deinit(ce_state);
+                       return NULL;
+               }
+       }
+
+       if (attr->dest_nentries) {
+               if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) {
+                       ath10k_err("Failed to initialize CE dest ring for ID: %d\n",
+                                  ce_id);
+                       ath10k_ce_deinit(ce_state);
+                       return NULL;
+               }
+       }
+
+       /* Enable CE error interrupts */
+       ath10k_pci_wake(ar);
+       ath10k_ce_error_intr_enable(ar, ctrl_addr);
+       ath10k_pci_sleep(ar);
+
+       return ce_state;
+}
+
+void ath10k_ce_deinit(struct ce_state *ce_state)
+{
+       unsigned int ce_id = ce_state->id;
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       ce_state->state = CE_UNUSED;
+       ar_pci->ce_id_to_state[ce_id] = NULL;
+
+       if (ce_state->src_ring) {
+               kfree(ce_state->src_ring->shadow_base_unaligned);
+               pci_free_consistent(ar_pci->pdev,
+                                   (ce_state->src_ring->nentries *
+                                    sizeof(struct ce_desc) +
+                                    CE_DESC_RING_ALIGN),
+                                   ce_state->src_ring->base_addr_owner_space,
+                                   ce_state->src_ring->base_addr_ce_space);
+               kfree(ce_state->src_ring);
+       }
+
+       if (ce_state->dest_ring) {
+               pci_free_consistent(ar_pci->pdev,
+                                   (ce_state->dest_ring->nentries *
+                                    sizeof(struct ce_desc) +
+                                    CE_DESC_RING_ALIGN),
+                                   ce_state->dest_ring->base_addr_owner_space,
+                                   ce_state->dest_ring->base_addr_ce_space);
+               kfree(ce_state->dest_ring);
+       }
+       kfree(ce_state);
+}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
new file mode 100644 (file)
index 0000000..c17f07c
--- /dev/null
@@ -0,0 +1,516 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _CE_H_
+#define _CE_H_
+
+#include "hif.h"
+
+
+/* Maximum number of Copy Engine's supported */
+#define CE_COUNT_MAX 8
+#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048
+
+/* Descriptor rings must be aligned to this boundary */
+#define CE_DESC_RING_ALIGN     8
+#define CE_SENDLIST_ITEMS_MAX  12
+#define CE_SEND_FLAG_GATHER    0x00010000
+
+/*
+ * Copy Engine support: low-level Target-side Copy Engine API.
+ * This is a hardware access layer used by code that understands
+ * how to use copy engines.
+ */
+
+struct ce_state;
+
+
+/* Copy Engine operational state */
+enum ce_op_state {
+       CE_UNUSED,
+       CE_PAUSED,
+       CE_RUNNING,
+};
+
+#define CE_DESC_FLAGS_GATHER         (1 << 0)
+#define CE_DESC_FLAGS_BYTE_SWAP      (1 << 1)
+#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
+#define CE_DESC_FLAGS_META_DATA_LSB  3
+
+struct ce_desc {
+       __le32 addr;
+       __le16 nbytes;
+       __le16 flags; /* %CE_DESC_FLAGS_ */
+};
+
+/* Copy Engine Ring internal state */
+struct ce_ring_state {
+       /* Number of entries in this ring; must be power of 2 */
+       unsigned int nentries;
+       unsigned int nentries_mask;
+
+       /*
+        * For dest ring, this is the next index to be processed
+        * by software after it was/is received into.
+        *
+        * For src ring, this is the last descriptor that was sent
+        * and completion processed by software.
+        *
+        * Regardless of src or dest ring, this is an invariant
+        * (modulo ring size):
+        *     write index >= read index >= sw_index
+        */
+       unsigned int sw_index;
+       /* cached copy */
+       unsigned int write_index;
+       /*
+        * For src ring, this is the next index not yet processed by HW.
+        * This is a cached copy of the real HW index (read index), used
+        * for avoiding reading the HW index register more often than
+        * necessary.
+        * This extends the invariant:
+        *     write index >= read index >= hw_index >= sw_index
+        *
+        * For dest ring, this is currently unused.
+        */
+       /* cached copy */
+       unsigned int hw_index;
+
+       /* Start of DMA-coherent area reserved for descriptors */
+       /* Host address space */
+       void *base_addr_owner_space_unaligned;
+       /* CE address space */
+       u32 base_addr_ce_space_unaligned;
+
+       /*
+        * Actual start of descriptors.
+        * Aligned to descriptor-size boundary.
+        * Points into reserved DMA-coherent area, above.
+        */
+       /* Host address space */
+       void *base_addr_owner_space;
+
+       /* CE address space */
+       u32 base_addr_ce_space;
+       /*
+        * Start of shadow copy of descriptors, within regular memory.
+        * Aligned to descriptor-size boundary.
+        */
+       void *shadow_base_unaligned;
+       struct ce_desc *shadow_base;
+
+       void **per_transfer_context;
+};
+
+/* Copy Engine internal state */
+struct ce_state {
+       struct ath10k *ar;
+       unsigned int id;
+
+       unsigned int attr_flags;
+
+       u32 ctrl_addr;
+       enum ce_op_state state;
+
+       void (*send_cb) (struct ce_state *ce_state,
+                        void *per_transfer_send_context,
+                        u32 buffer,
+                        unsigned int nbytes,
+                        unsigned int transfer_id);
+       void (*recv_cb) (struct ce_state *ce_state,
+                        void *per_transfer_recv_context,
+                        u32 buffer,
+                        unsigned int nbytes,
+                        unsigned int transfer_id,
+                        unsigned int flags);
+
+       unsigned int src_sz_max;
+       struct ce_ring_state *src_ring;
+       struct ce_ring_state *dest_ring;
+};
+
+struct ce_sendlist_item {
+       /* e.g. buffer or desc list */
+       dma_addr_t data;
+       union {
+               /* simple buffer */
+               unsigned int nbytes;
+               /* Rx descriptor list */
+               unsigned int ndesc;
+       } u;
+       /* externally-specified flags; OR-ed with internal flags */
+       u32 flags;
+};
+
+struct ce_sendlist {
+       unsigned int num_items;
+       struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
+};
+
+/* Copy Engine settable attributes */
+struct ce_attr;
+
+/*==================Send====================*/
+
+/* ath10k_ce_send flags */
+#define CE_SEND_FLAG_BYTE_SWAP 1
+
+/*
+ * Queue a source buffer to be sent to an anonymous destination buffer.
+ *   ce         - which copy engine to use
+ *   buffer          - address of buffer
+ *   nbytes          - number of bytes to send
+ *   transfer_id     - arbitrary ID; reflected to destination
+ *   flags           - CE_SEND_FLAG_* values
+ * Returns 0 on success; otherwise an error status.
+ *
+ * Note: If no flags are specified, use CE's default data swap mode.
+ *
+ * Implementation note: pushes 1 buffer to Source ring
+ */
+int ath10k_ce_send(struct ce_state *ce_state,
+                  void *per_transfer_send_context,
+                  u32 buffer,
+                  unsigned int nbytes,
+                  /* 14 bits */
+                  unsigned int transfer_id,
+                  unsigned int flags);
+
+void ath10k_ce_send_cb_register(struct ce_state *ce_state,
+                               void (*send_cb) (struct ce_state *ce_state,
+                                                void *transfer_context,
+                                                u32 buffer,
+                                                unsigned int nbytes,
+                                                unsigned int transfer_id),
+                               int disable_interrupts);
+
+/* Append a simple buffer (address/length) to a sendlist. */
+void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
+                               u32 buffer,
+                               unsigned int nbytes,
+                               /* OR-ed with internal flags */
+                               u32 flags);
+
+/*
+ * Queue a "sendlist" of buffers to be sent using gather to a single
+ * anonymous destination buffer
+ *   ce         - which copy engine to use
+ *   sendlist        - list of simple buffers to send using gather
+ *   transfer_id     - arbitrary ID; reflected to destination
+ * Returns 0 on success; otherwise an error status.
+ *
+ * Implemenation note: Pushes multiple buffers with Gather to Source ring.
+ */
+int ath10k_ce_sendlist_send(struct ce_state *ce_state,
+                           void *per_transfer_send_context,
+                           struct ce_sendlist *sendlist,
+                           /* 14 bits */
+                           unsigned int transfer_id);
+
+/*==================Recv=======================*/
+
+/*
+ * Make a buffer available to receive. The buffer must be at least of a
+ * minimal size appropriate for this copy engine (src_sz_max attribute).
+ *   ce                    - which copy engine to use
+ *   per_transfer_recv_context  - context passed back to caller's recv_cb
+ *   buffer                     - address of buffer in CE space
+ * Returns 0 on success; otherwise an error status.
+ *
+ * Implemenation note: Pushes a buffer to Dest ring.
+ */
+int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+                              void *per_transfer_recv_context,
+                              u32 buffer);
+
+void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
+                               void (*recv_cb) (struct ce_state *ce_state,
+                                                void *transfer_context,
+                                                u32 buffer,
+                                                unsigned int nbytes,
+                                                unsigned int transfer_id,
+                                                unsigned int flags));
+
+/* recv flags */
+/* Data is byte-swapped */
+#define CE_RECV_FLAG_SWAPPED   1
+
+/*
+ * Supply data for the next completed unprocessed receive descriptor.
+ * Pops buffer from Dest ring.
+ */
+int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+                                 void **per_transfer_contextp,
+                                 u32 *bufferp,
+                                 unsigned int *nbytesp,
+                                 unsigned int *transfer_idp,
+                                 unsigned int *flagsp);
+/*
+ * Supply data for the next completed unprocessed send descriptor.
+ * Pops 1 completed send buffer from Source ring.
+ */
+int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+                          void **per_transfer_contextp,
+                          u32 *bufferp,
+                          unsigned int *nbytesp,
+                          unsigned int *transfer_idp);
+
+/*==================CE Engine Initialization=======================*/
+
+/* Initialize an instance of a CE */
+struct ce_state *ath10k_ce_init(struct ath10k *ar,
+                               unsigned int ce_id,
+                               const struct ce_attr *attr);
+
+/*==================CE Engine Shutdown=======================*/
+/*
+ * Support clean shutdown by allowing the caller to revoke
+ * receive buffers.  Target DMA must be stopped before using
+ * this API.
+ */
+int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+                              void **per_transfer_contextp,
+                              u32 *bufferp);
+
+/*
+ * Support clean shutdown by allowing the caller to cancel
+ * pending sends.  Target DMA must be stopped before using
+ * this API.
+ */
+int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+                              void **per_transfer_contextp,
+                              u32 *bufferp,
+                              unsigned int *nbytesp,
+                              unsigned int *transfer_idp);
+
+void ath10k_ce_deinit(struct ce_state *ce_state);
+
+/*==================CE Interrupt Handlers====================*/
+void ath10k_ce_per_engine_service_any(struct ath10k *ar);
+void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
+void ath10k_ce_disable_interrupts(struct ath10k *ar);
+
+/* ce_attr.flags values */
+/* Use NonSnooping PCIe accesses? */
+#define CE_ATTR_NO_SNOOP               1
+
+/* Byte swap data words */
+#define CE_ATTR_BYTE_SWAP_DATA         2
+
+/* Swizzle descriptors? */
+#define CE_ATTR_SWIZZLE_DESCRIPTORS    4
+
+/* no interrupt on copy completion */
+#define CE_ATTR_DIS_INTR               8
+
+/* Attributes of an instance of a Copy Engine */
+struct ce_attr {
+       /* CE_ATTR_* values */
+       unsigned int flags;
+
+       /* currently not in use */
+       unsigned int priority;
+
+       /* #entries in source ring - Must be a power of 2 */
+       unsigned int src_nentries;
+
+       /*
+        * Max source send size for this CE.
+        * This is also the minimum size of a destination buffer.
+        */
+       unsigned int src_sz_max;
+
+       /* #entries in destination ring - Must be a power of 2 */
+       unsigned int dest_nentries;
+
+       /* Future use */
+       void *reserved;
+};
+
+/*
+ * When using sendlist_send to transfer multiple buffer fragments, the
+ * transfer context of each fragment, except last one, will be filled
+ * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
+ * each fragment done with send and the transfer context would be
+ * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
+ * status of a send completion.
+ */
+#define CE_SENDLIST_ITEM_CTXT  ((void *)0xcecebeef)
+
+#define SR_BA_ADDRESS          0x0000
+#define SR_SIZE_ADDRESS                0x0004
+#define DR_BA_ADDRESS          0x0008
+#define DR_SIZE_ADDRESS                0x000c
+#define CE_CMD_ADDRESS         0x0018
+
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB     17
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB     17
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK    0x00020000
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
+       (((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
+       CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
+
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB     16
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB     16
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK    0x00010000
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \
+       (((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \
+        CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
+       (((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
+        CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
+
+#define CE_CTRL1_DMAX_LENGTH_MSB               15
+#define CE_CTRL1_DMAX_LENGTH_LSB               0
+#define CE_CTRL1_DMAX_LENGTH_MASK              0x0000ffff
+#define CE_CTRL1_DMAX_LENGTH_GET(x) \
+       (((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB)
+#define CE_CTRL1_DMAX_LENGTH_SET(x) \
+       (((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
+
+#define CE_CTRL1_ADDRESS                       0x0010
+#define CE_CTRL1_HW_MASK                       0x0007ffff
+#define CE_CTRL1_SW_MASK                       0x0007ffff
+#define CE_CTRL1_HW_WRITE_MASK                 0x00000000
+#define CE_CTRL1_SW_WRITE_MASK                 0x0007ffff
+#define CE_CTRL1_RSTMASK                       0xffffffff
+#define CE_CTRL1_RESET                         0x00000080
+
+#define CE_CMD_HALT_STATUS_MSB                 3
+#define CE_CMD_HALT_STATUS_LSB                 3
+#define CE_CMD_HALT_STATUS_MASK                        0x00000008
+#define CE_CMD_HALT_STATUS_GET(x) \
+       (((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB)
+#define CE_CMD_HALT_STATUS_SET(x) \
+       (((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK)
+#define CE_CMD_HALT_STATUS_RESET               0
+#define CE_CMD_HALT_MSB                                0
+#define CE_CMD_HALT_MASK                       0x00000001
+
+#define HOST_IE_COPY_COMPLETE_MSB              0
+#define HOST_IE_COPY_COMPLETE_LSB              0
+#define HOST_IE_COPY_COMPLETE_MASK             0x00000001
+#define HOST_IE_COPY_COMPLETE_GET(x) \
+       (((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB)
+#define HOST_IE_COPY_COMPLETE_SET(x) \
+       (((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK)
+#define HOST_IE_COPY_COMPLETE_RESET            0
+#define HOST_IE_ADDRESS                                0x002c
+
+#define HOST_IS_DST_RING_LOW_WATERMARK_MASK    0x00000010
+#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK   0x00000008
+#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK    0x00000004
+#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK   0x00000002
+#define HOST_IS_COPY_COMPLETE_MASK             0x00000001
+#define HOST_IS_ADDRESS                                0x0030
+
+#define MISC_IE_ADDRESS                                0x0034
+
+#define MISC_IS_AXI_ERR_MASK                   0x00000400
+
+#define MISC_IS_DST_ADDR_ERR_MASK              0x00000200
+#define MISC_IS_SRC_LEN_ERR_MASK               0x00000100
+#define MISC_IS_DST_MAX_LEN_VIO_MASK           0x00000080
+#define MISC_IS_DST_RING_OVERFLOW_MASK         0x00000040
+#define MISC_IS_SRC_RING_OVERFLOW_MASK         0x00000020
+
+#define MISC_IS_ADDRESS                                0x0038
+
+#define SR_WR_INDEX_ADDRESS                    0x003c
+
+#define DST_WR_INDEX_ADDRESS                   0x0040
+
+#define CURRENT_SRRI_ADDRESS                   0x0044
+
+#define CURRENT_DRRI_ADDRESS                   0x0048
+
+#define SRC_WATERMARK_LOW_MSB                  31
+#define SRC_WATERMARK_LOW_LSB                  16
+#define SRC_WATERMARK_LOW_MASK                 0xffff0000
+#define SRC_WATERMARK_LOW_GET(x) \
+       (((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB)
+#define SRC_WATERMARK_LOW_SET(x) \
+       (((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
+#define SRC_WATERMARK_LOW_RESET                        0
+#define SRC_WATERMARK_HIGH_MSB                 15
+#define SRC_WATERMARK_HIGH_LSB                 0
+#define SRC_WATERMARK_HIGH_MASK                        0x0000ffff
+#define SRC_WATERMARK_HIGH_GET(x) \
+       (((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB)
+#define SRC_WATERMARK_HIGH_SET(x) \
+       (((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
+#define SRC_WATERMARK_HIGH_RESET               0
+#define SRC_WATERMARK_ADDRESS                  0x004c
+
+#define DST_WATERMARK_LOW_LSB                  16
+#define DST_WATERMARK_LOW_MASK                 0xffff0000
+#define DST_WATERMARK_LOW_SET(x) \
+       (((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
+#define DST_WATERMARK_LOW_RESET                        0
+#define DST_WATERMARK_HIGH_MSB                 15
+#define DST_WATERMARK_HIGH_LSB                 0
+#define DST_WATERMARK_HIGH_MASK                        0x0000ffff
+#define DST_WATERMARK_HIGH_GET(x) \
+       (((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB)
+#define DST_WATERMARK_HIGH_SET(x) \
+       (((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
+#define DST_WATERMARK_HIGH_RESET               0
+#define DST_WATERMARK_ADDRESS                  0x0050
+
+
+static inline u32 ath10k_ce_base_address(unsigned int ce_id)
+{
+       return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
+}
+
+#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK  | \
+                          HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
+                          HOST_IS_DST_RING_LOW_WATERMARK_MASK  | \
+                          HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
+
+#define CE_ERROR_MASK  (MISC_IS_AXI_ERR_MASK           | \
+                        MISC_IS_DST_ADDR_ERR_MASK      | \
+                        MISC_IS_SRC_LEN_ERR_MASK       | \
+                        MISC_IS_DST_MAX_LEN_VIO_MASK   | \
+                        MISC_IS_DST_RING_OVERFLOW_MASK | \
+                        MISC_IS_SRC_RING_OVERFLOW_MASK)
+
+#define CE_SRC_RING_TO_DESC(baddr, idx) \
+       (&(((struct ce_desc *)baddr)[idx]))
+
+#define CE_DEST_RING_TO_DESC(baddr, idx) \
+       (&(((struct ce_desc *)baddr)[idx]))
+
+/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
+#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
+       (((int)(toidx)-(int)(fromidx)) & (nentries_mask))
+
+#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB              8
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK             0x0000ff00
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
+       (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
+               CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
+#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS                   0x0000
+
+#define CE_INTERRUPT_SUMMARY(ar) \
+       CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \
+               ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \
+               CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS))
+
+#endif /* _CE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
new file mode 100644 (file)
index 0000000..2b3426b
--- /dev/null
@@ -0,0 +1,665 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+
+#include "core.h"
+#include "mac.h"
+#include "htc.h"
+#include "hif.h"
+#include "wmi.h"
+#include "bmi.h"
+#include "debug.h"
+#include "htt.h"
+
+unsigned int ath10k_debug_mask;
+static bool uart_print;
+static unsigned int ath10k_p2p;
+module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
+module_param(uart_print, bool, 0644);
+module_param_named(p2p, ath10k_p2p, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+MODULE_PARM_DESC(uart_print, "Uart target debugging");
+MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
+
+static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+       {
+               .id = QCA988X_HW_1_0_VERSION,
+               .name = "qca988x hw1.0",
+               .patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR,
+               .fw = {
+                       .dir = QCA988X_HW_1_0_FW_DIR,
+                       .fw = QCA988X_HW_1_0_FW_FILE,
+                       .otp = QCA988X_HW_1_0_OTP_FILE,
+                       .board = QCA988X_HW_1_0_BOARD_DATA_FILE,
+               },
+       },
+       {
+               .id = QCA988X_HW_2_0_VERSION,
+               .name = "qca988x hw2.0",
+               .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
+               .fw = {
+                       .dir = QCA988X_HW_2_0_FW_DIR,
+                       .fw = QCA988X_HW_2_0_FW_FILE,
+                       .otp = QCA988X_HW_2_0_OTP_FILE,
+                       .board = QCA988X_HW_2_0_BOARD_DATA_FILE,
+               },
+       },
+};
+
+static void ath10k_send_suspend_complete(struct ath10k *ar)
+{
+       ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__);
+
+       ar->is_target_paused = true;
+       wake_up(&ar->event_queue);
+}
+
+static int ath10k_check_fw_version(struct ath10k *ar)
+{
+       char version[32];
+
+       if (ar->fw_version_major >= SUPPORTED_FW_MAJOR &&
+           ar->fw_version_minor >= SUPPORTED_FW_MINOR &&
+           ar->fw_version_release >= SUPPORTED_FW_RELEASE &&
+           ar->fw_version_build >= SUPPORTED_FW_BUILD)
+               return 0;
+
+       snprintf(version, sizeof(version), "%u.%u.%u.%u",
+                SUPPORTED_FW_MAJOR, SUPPORTED_FW_MINOR,
+                SUPPORTED_FW_RELEASE, SUPPORTED_FW_BUILD);
+
+       ath10k_warn("WARNING: Firmware version %s is not officially supported.\n",
+                   ar->hw->wiphy->fw_version);
+       ath10k_warn("Please upgrade to version %s (or newer)\n", version);
+
+       return 0;
+}
+
+static int ath10k_init_connect_htc(struct ath10k *ar)
+{
+       int status;
+
+       status = ath10k_wmi_connect_htc_service(ar);
+       if (status)
+               goto conn_fail;
+
+       /* Start HTC */
+       status = ath10k_htc_start(ar->htc);
+       if (status)
+               goto conn_fail;
+
+       /* Wait for WMI event to be ready */
+       status = ath10k_wmi_wait_for_service_ready(ar);
+       if (status <= 0) {
+               ath10k_warn("wmi service ready event not received");
+               status = -ETIMEDOUT;
+               goto timeout;
+       }
+
+       ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n");
+       return 0;
+
+timeout:
+       ath10k_htc_stop(ar->htc);
+conn_fail:
+       return status;
+}
+
+static int ath10k_init_configure_target(struct ath10k *ar)
+{
+       u32 param_host;
+       int ret;
+
+       /* tell target which HTC version it is used*/
+       ret = ath10k_bmi_write32(ar, hi_app_host_interest,
+                                HTC_PROTOCOL_VERSION);
+       if (ret) {
+               ath10k_err("settings HTC version failed\n");
+               return ret;
+       }
+
+       /* set the firmware mode to STA/IBSS/AP */
+       ret = ath10k_bmi_read32(ar, hi_option_flag, &param_host);
+       if (ret) {
+               ath10k_err("setting firmware mode (1/2) failed\n");
+               return ret;
+       }
+
+       /* TODO following parameters need to be re-visited. */
+       /* num_device */
+       param_host |= (1 << HI_OPTION_NUM_DEV_SHIFT);
+       /* Firmware mode */
+       /* FIXME: Why FW_MODE_AP ??.*/
+       param_host |= (HI_OPTION_FW_MODE_AP << HI_OPTION_FW_MODE_SHIFT);
+       /* mac_addr_method */
+       param_host |= (1 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
+       /* firmware_bridge */
+       param_host |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
+       /* fwsubmode */
+       param_host |= (0 << HI_OPTION_FW_SUBMODE_SHIFT);
+
+       ret = ath10k_bmi_write32(ar, hi_option_flag, param_host);
+       if (ret) {
+               ath10k_err("setting firmware mode (2/2) failed\n");
+               return ret;
+       }
+
+       /* We do all byte-swapping on the host */
+       ret = ath10k_bmi_write32(ar, hi_be, 0);
+       if (ret) {
+               ath10k_err("setting host CPU BE mode failed\n");
+               return ret;
+       }
+
+       /* FW descriptor/Data swap flags */
+       ret = ath10k_bmi_write32(ar, hi_fw_swap, 0);
+
+       if (ret) {
+               ath10k_err("setting FW data/desc swap flags failed\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
+                                                  const char *dir,
+                                                  const char *file)
+{
+       char filename[100];
+       const struct firmware *fw;
+       int ret;
+
+       if (file == NULL)
+               return ERR_PTR(-ENOENT);
+
+       if (dir == NULL)
+               dir = ".";
+
+       snprintf(filename, sizeof(filename), "%s/%s", dir, file);
+       ret = request_firmware(&fw, filename, ar->dev);
+       if (ret)
+               return ERR_PTR(ret);
+
+       return fw;
+}
+
+static int ath10k_push_board_ext_data(struct ath10k *ar,
+                                     const struct firmware *fw)
+{
+       u32 board_data_size = QCA988X_BOARD_DATA_SZ;
+       u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ;
+       u32 board_ext_data_addr;
+       int ret;
+
+       ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr);
+       if (ret) {
+               ath10k_err("could not read board ext data addr (%d)\n", ret);
+               return ret;
+       }
+
+       ath10k_dbg(ATH10K_DBG_CORE,
+                  "ath10k: Board extended Data download addr: 0x%x\n",
+                  board_ext_data_addr);
+
+       if (board_ext_data_addr == 0)
+               return 0;
+
+       if (fw->size != (board_data_size + board_ext_data_size)) {
+               ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n",
+                          fw->size, board_data_size, board_ext_data_size);
+               return -EINVAL;
+       }
+
+       ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
+                                     fw->data + board_data_size,
+                                     board_ext_data_size);
+       if (ret) {
+               ath10k_err("could not write board ext data (%d)\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_bmi_write32(ar, hi_board_ext_data_config,
+                                (board_ext_data_size << 16) | 1);
+       if (ret) {
+               ath10k_err("could not write board ext data bit (%d)\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int ath10k_download_board_data(struct ath10k *ar)
+{
+       u32 board_data_size = QCA988X_BOARD_DATA_SZ;
+       u32 address;
+       const struct firmware *fw;
+       int ret;
+
+       fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+                                 ar->hw_params.fw.board);
+       if (IS_ERR(fw)) {
+               ath10k_err("could not fetch board data fw file (%ld)\n",
+                          PTR_ERR(fw));
+               return PTR_ERR(fw);
+       }
+
+       ret = ath10k_push_board_ext_data(ar, fw);
+       if (ret) {
+               ath10k_err("could not push board ext data (%d)\n", ret);
+               goto exit;
+       }
+
+       ret = ath10k_bmi_read32(ar, hi_board_data, &address);
+       if (ret) {
+               ath10k_err("could not read board data addr (%d)\n", ret);
+               goto exit;
+       }
+
+       ret = ath10k_bmi_write_memory(ar, address, fw->data,
+                                     min_t(u32, board_data_size, fw->size));
+       if (ret) {
+               ath10k_err("could not write board data (%d)\n", ret);
+               goto exit;
+       }
+
+       ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1);
+       if (ret) {
+               ath10k_err("could not write board data bit (%d)\n", ret);
+               goto exit;
+       }
+
+exit:
+       release_firmware(fw);
+       return ret;
+}
+
+static int ath10k_download_and_run_otp(struct ath10k *ar)
+{
+       const struct firmware *fw;
+       u32 address;
+       u32 exec_param;
+       int ret;
+
+       /* OTP is optional */
+
+       if (ar->hw_params.fw.otp == NULL) {
+               ath10k_info("otp file not defined\n");
+               return 0;
+       }
+
+       address = ar->hw_params.patch_load_addr;
+
+       fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+                                 ar->hw_params.fw.otp);
+       if (IS_ERR(fw)) {
+               ath10k_warn("could not fetch otp (%ld)\n", PTR_ERR(fw));
+               return 0;
+       }
+
+       ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
+       if (ret) {
+               ath10k_err("could not write otp (%d)\n", ret);
+               goto exit;
+       }
+
+       exec_param = 0;
+       ret = ath10k_bmi_execute(ar, address, &exec_param);
+       if (ret) {
+               ath10k_err("could not execute otp (%d)\n", ret);
+               goto exit;
+       }
+
+exit:
+       release_firmware(fw);
+       return ret;
+}
+
+static int ath10k_download_fw(struct ath10k *ar)
+{
+       const struct firmware *fw;
+       u32 address;
+       int ret;
+
+       if (ar->hw_params.fw.fw == NULL)
+               return -EINVAL;
+
+       address = ar->hw_params.patch_load_addr;
+
+       fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+                                 ar->hw_params.fw.fw);
+       if (IS_ERR(fw)) {
+               ath10k_err("could not fetch fw (%ld)\n", PTR_ERR(fw));
+               return PTR_ERR(fw);
+       }
+
+       ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
+       if (ret) {
+               ath10k_err("could not write fw (%d)\n", ret);
+               goto exit;
+       }
+
+exit:
+       release_firmware(fw);
+       return ret;
+}
+
+static int ath10k_init_download_firmware(struct ath10k *ar)
+{
+       int ret;
+
+       ret = ath10k_download_board_data(ar);
+       if (ret)
+               return ret;
+
+       ret = ath10k_download_and_run_otp(ar);
+       if (ret)
+               return ret;
+
+       ret = ath10k_download_fw(ar);
+       if (ret)
+               return ret;
+
+       return ret;
+}
+
+static int ath10k_init_uart(struct ath10k *ar)
+{
+       int ret;
+
+       /*
+        * Explicitly setting UART prints to zero as target turns it on
+        * based on scratch registers.
+        */
+       ret = ath10k_bmi_write32(ar, hi_serial_enable, 0);
+       if (ret) {
+               ath10k_warn("could not disable UART prints (%d)\n", ret);
+               return ret;
+       }
+
+       if (!uart_print) {
+               ath10k_info("UART prints disabled\n");
+               return 0;
+       }
+
+       ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7);
+       if (ret) {
+               ath10k_warn("could not enable UART prints (%d)\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_bmi_write32(ar, hi_serial_enable, 1);
+       if (ret) {
+               ath10k_warn("could not enable UART prints (%d)\n", ret);
+               return ret;
+       }
+
+       ath10k_info("UART prints enabled\n");
+       return 0;
+}
+
+static int ath10k_init_hw_params(struct ath10k *ar)
+{
+       const struct ath10k_hw_params *uninitialized_var(hw_params);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) {
+               hw_params = &ath10k_hw_params_list[i];
+
+               if (hw_params->id == ar->target_version)
+                       break;
+       }
+
+       if (i == ARRAY_SIZE(ath10k_hw_params_list)) {
+               ath10k_err("Unsupported hardware version: 0x%x\n",
+                          ar->target_version);
+               return -EINVAL;
+       }
+
+       ar->hw_params = *hw_params;
+
+       ath10k_info("Hardware name %s version 0x%x\n",
+                   ar->hw_params.name, ar->target_version);
+
+       return 0;
+}
+
+struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
+                                 enum ath10k_bus bus,
+                                 const struct ath10k_hif_ops *hif_ops)
+{
+       struct ath10k *ar;
+
+       ar = ath10k_mac_create();
+       if (!ar)
+               return NULL;
+
+       ar->ath_common.priv = ar;
+       ar->ath_common.hw = ar->hw;
+
+       ar->p2p = !!ath10k_p2p;
+       ar->dev = dev;
+
+       ar->hif.priv = hif_priv;
+       ar->hif.ops = hif_ops;
+       ar->hif.bus = bus;
+
+       ar->free_vdev_map = 0xFF; /* 8 vdevs */
+
+       init_completion(&ar->scan.started);
+       init_completion(&ar->scan.completed);
+       init_completion(&ar->scan.on_channel);
+
+       init_completion(&ar->install_key_done);
+       init_completion(&ar->vdev_setup_done);
+
+       setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar);
+
+       ar->workqueue = create_singlethread_workqueue("ath10k_wq");
+       if (!ar->workqueue)
+               goto err_wq;
+
+       mutex_init(&ar->conf_mutex);
+       spin_lock_init(&ar->data_lock);
+
+       INIT_LIST_HEAD(&ar->peers);
+       init_waitqueue_head(&ar->peer_mapping_wq);
+
+       init_completion(&ar->offchan_tx_completed);
+       INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
+       skb_queue_head_init(&ar->offchan_tx_queue);
+
+       init_waitqueue_head(&ar->event_queue);
+
+       return ar;
+
+err_wq:
+       ath10k_mac_destroy(ar);
+       return NULL;
+}
+EXPORT_SYMBOL(ath10k_core_create);
+
+void ath10k_core_destroy(struct ath10k *ar)
+{
+       flush_workqueue(ar->workqueue);
+       destroy_workqueue(ar->workqueue);
+
+       ath10k_mac_destroy(ar);
+}
+EXPORT_SYMBOL(ath10k_core_destroy);
+
+
+int ath10k_core_register(struct ath10k *ar)
+{
+       struct ath10k_htc_ops htc_ops;
+       struct bmi_target_info target_info;
+       int status;
+
+       memset(&target_info, 0, sizeof(target_info));
+       status = ath10k_bmi_get_target_info(ar, &target_info);
+       if (status)
+               goto err;
+
+       ar->target_version = target_info.version;
+       ar->hw->wiphy->hw_version = target_info.version;
+
+       status = ath10k_init_hw_params(ar);
+       if (status)
+               goto err;
+
+       if (ath10k_init_configure_target(ar)) {
+               status = -EINVAL;
+               goto err;
+       }
+
+       status = ath10k_init_download_firmware(ar);
+       if (status)
+               goto err;
+
+       status = ath10k_init_uart(ar);
+       if (status)
+               goto err;
+
+       htc_ops.target_send_suspend_complete = ath10k_send_suspend_complete;
+
+       ar->htc = ath10k_htc_create(ar, &htc_ops);
+       if (IS_ERR(ar->htc)) {
+               status = PTR_ERR(ar->htc);
+               ath10k_err("could not create HTC (%d)\n", status);
+               goto err;
+       }
+
+       status = ath10k_bmi_done(ar);
+       if (status)
+               goto err_htc_destroy;
+
+       status = ath10k_wmi_attach(ar);
+       if (status) {
+               ath10k_err("WMI attach failed: %d\n", status);
+               goto err_htc_destroy;
+       }
+
+       status = ath10k_htc_wait_target(ar->htc);
+       if (status)
+               goto err_wmi_detach;
+
+       ar->htt = ath10k_htt_attach(ar);
+       if (!ar->htt) {
+               status = -ENOMEM;
+               goto err_wmi_detach;
+       }
+
+       status = ath10k_init_connect_htc(ar);
+       if (status)
+               goto err_htt_detach;
+
+       ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version);
+
+       status = ath10k_check_fw_version(ar);
+       if (status)
+               goto err_disconnect_htc;
+
+       status = ath10k_wmi_cmd_init(ar);
+       if (status) {
+               ath10k_err("could not send WMI init command (%d)\n", status);
+               goto err_disconnect_htc;
+       }
+
+       status = ath10k_wmi_wait_for_unified_ready(ar);
+       if (status <= 0) {
+               ath10k_err("wmi unified ready event not received\n");
+               status = -ETIMEDOUT;
+               goto err_disconnect_htc;
+       }
+
+       status = ath10k_htt_attach_target(ar->htt);
+       if (status)
+               goto err_disconnect_htc;
+
+       status = ath10k_mac_register(ar);
+       if (status)
+               goto err_disconnect_htc;
+
+       status = ath10k_debug_create(ar);
+       if (status) {
+               ath10k_err("unable to initialize debugfs\n");
+               goto err_unregister_mac;
+       }
+
+       return 0;
+
+err_unregister_mac:
+       ath10k_mac_unregister(ar);
+err_disconnect_htc:
+       ath10k_htc_stop(ar->htc);
+err_htt_detach:
+       ath10k_htt_detach(ar->htt);
+err_wmi_detach:
+       ath10k_wmi_detach(ar);
+err_htc_destroy:
+       ath10k_htc_destroy(ar->htc);
+err:
+       return status;
+}
+EXPORT_SYMBOL(ath10k_core_register);
+
+void ath10k_core_unregister(struct ath10k *ar)
+{
+       /* We must unregister from mac80211 before we stop HTC and HIF.
+        * Otherwise we will fail to submit commands to FW and mac80211 will be
+        * unhappy about callback failures. */
+       ath10k_mac_unregister(ar);
+       ath10k_htc_stop(ar->htc);
+       ath10k_htt_detach(ar->htt);
+       ath10k_wmi_detach(ar);
+       ath10k_htc_destroy(ar->htc);
+}
+EXPORT_SYMBOL(ath10k_core_unregister);
+
+int ath10k_core_target_suspend(struct ath10k *ar)
+{
+       int ret;
+
+       ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__);
+
+       ret = ath10k_wmi_pdev_suspend_target(ar);
+       if (ret)
+               ath10k_warn("could not suspend target (%d)\n", ret);
+
+       return ret;
+}
+EXPORT_SYMBOL(ath10k_core_target_suspend);
+
+int ath10k_core_target_resume(struct ath10k *ar)
+{
+       int ret;
+
+       ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__);
+
+       ret = ath10k_wmi_pdev_resume_target(ar);
+       if (ret)
+               ath10k_warn("could not resume target (%d)\n", ret);
+
+       return ret;
+}
+EXPORT_SYMBOL(ath10k_core_target_resume);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
new file mode 100644 (file)
index 0000000..539336d
--- /dev/null
@@ -0,0 +1,369 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _CORE_H_
+#define _CORE_H_
+
+#include <linux/completion.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include "htc.h"
+#include "hw.h"
+#include "targaddrs.h"
+#include "wmi.h"
+#include "../ath.h"
+#include "../regd.h"
+
+#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
+#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
+#define WO(_f)      ((_f##_OFFSET) >> 2)
+
+#define ATH10K_SCAN_ID 0
+#define WMI_READY_TIMEOUT (5 * HZ)
+#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
+
+/* Antenna noise floor */
+#define ATH10K_DEFAULT_NOISE_FLOOR -95
+
+struct ath10k;
+
+enum ath10k_bus {
+       ATH10K_BUS_PCI,
+};
+
+struct ath10k_skb_cb {
+       dma_addr_t paddr;
+       bool is_mapped;
+       bool is_aborted;
+
+       struct {
+               u8 vdev_id;
+               u16 msdu_id;
+               u8 tid;
+               bool is_offchan;
+               bool is_conf;
+               bool discard;
+               bool no_ack;
+               u8 refcount;
+               struct sk_buff *txfrag;
+               struct sk_buff *msdu;
+       } __packed htt;
+
+       /* 4 bytes left on 64bit arch */
+} __packed;
+
+static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
+{
+       BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
+                    IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+       return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
+}
+
+static inline int ath10k_skb_map(struct device *dev, struct sk_buff *skb)
+{
+       if (ATH10K_SKB_CB(skb)->is_mapped)
+               return -EINVAL;
+
+       ATH10K_SKB_CB(skb)->paddr = dma_map_single(dev, skb->data, skb->len,
+                                                  DMA_TO_DEVICE);
+
+       if (unlikely(dma_mapping_error(dev, ATH10K_SKB_CB(skb)->paddr)))
+               return -EIO;
+
+       ATH10K_SKB_CB(skb)->is_mapped = true;
+       return 0;
+}
+
+static inline int ath10k_skb_unmap(struct device *dev, struct sk_buff *skb)
+{
+       if (!ATH10K_SKB_CB(skb)->is_mapped)
+               return -EINVAL;
+
+       dma_unmap_single(dev, ATH10K_SKB_CB(skb)->paddr, skb->len,
+                        DMA_TO_DEVICE);
+       ATH10K_SKB_CB(skb)->is_mapped = false;
+       return 0;
+}
+
+static inline u32 host_interest_item_address(u32 item_offset)
+{
+       return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
+}
+
+struct ath10k_bmi {
+       bool done_sent;
+};
+
+struct ath10k_wmi {
+       enum ath10k_htc_ep_id eid;
+       struct completion service_ready;
+       struct completion unified_ready;
+       atomic_t pending_tx_count;
+       wait_queue_head_t wq;
+
+       struct sk_buff_head wmi_event_list;
+       struct work_struct wmi_event_work;
+};
+
+struct ath10k_peer_stat {
+       u8 peer_macaddr[ETH_ALEN];
+       u32 peer_rssi;
+       u32 peer_tx_rate;
+};
+
+struct ath10k_target_stats {
+       /* PDEV stats */
+       s32 ch_noise_floor;
+       u32 tx_frame_count;
+       u32 rx_frame_count;
+       u32 rx_clear_count;
+       u32 cycle_count;
+       u32 phy_err_count;
+       u32 chan_tx_power;
+
+       /* PDEV TX stats */
+       s32 comp_queued;
+       s32 comp_delivered;
+       s32 msdu_enqued;
+       s32 mpdu_enqued;
+       s32 wmm_drop;
+       s32 local_enqued;
+       s32 local_freed;
+       s32 hw_queued;
+       s32 hw_reaped;
+       s32 underrun;
+       s32 tx_abort;
+       s32 mpdus_requed;
+       u32 tx_ko;
+       u32 data_rc;
+       u32 self_triggers;
+       u32 sw_retry_failure;
+       u32 illgl_rate_phy_err;
+       u32 pdev_cont_xretry;
+       u32 pdev_tx_timeout;
+       u32 pdev_resets;
+       u32 phy_underrun;
+       u32 txop_ovf;
+
+       /* PDEV RX stats */
+       s32 mid_ppdu_route_change;
+       s32 status_rcvd;
+       s32 r0_frags;
+       s32 r1_frags;
+       s32 r2_frags;
+       s32 r3_frags;
+       s32 htt_msdus;
+       s32 htt_mpdus;
+       s32 loc_msdus;
+       s32 loc_mpdus;
+       s32 oversize_amsdu;
+       s32 phy_errs;
+       s32 phy_err_drop;
+       s32 mpdu_errs;
+
+       /* VDEV STATS */
+
+       /* PEER STATS */
+       u8 peers;
+       struct ath10k_peer_stat peer_stat[TARGET_NUM_PEERS];
+
+       /* TODO: Beacon filter stats */
+
+};
+
+#define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */
+
+struct ath10k_peer {
+       struct list_head list;
+       int vdev_id;
+       u8 addr[ETH_ALEN];
+       DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
+       struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+};
+
+#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
+
+struct ath10k_vif {
+       u32 vdev_id;
+       enum wmi_vdev_type vdev_type;
+       enum wmi_vdev_subtype vdev_subtype;
+       u32 beacon_interval;
+       u32 dtim_period;
+
+       struct ath10k *ar;
+       struct ieee80211_vif *vif;
+
+       struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
+       u8 def_wep_key_index;
+
+       u16 tx_seq_no;
+
+       union {
+               struct {
+                       u8 bssid[ETH_ALEN];
+                       u32 uapsd;
+               } sta;
+               struct {
+                       /* 127 stations; wmi limit */
+                       u8 tim_bitmap[16];
+                       u8 tim_len;
+                       u32 ssid_len;
+                       u8 ssid[IEEE80211_MAX_SSID_LEN];
+                       bool hidden_ssid;
+                       /* P2P_IE with NoA attribute for P2P_GO case */
+                       u32 noa_len;
+                       u8 *noa_data;
+               } ap;
+               struct {
+                       u8 bssid[ETH_ALEN];
+               } ibss;
+       } u;
+};
+
+struct ath10k_vif_iter {
+       u32 vdev_id;
+       struct ath10k_vif *arvif;
+};
+
+struct ath10k_debug {
+       struct dentry *debugfs_phy;
+
+       struct ath10k_target_stats target_stats;
+       u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
+
+       struct completion event_stats_compl;
+};
+
+struct ath10k {
+       struct ath_common ath_common;
+       struct ieee80211_hw *hw;
+       struct device *dev;
+       u8 mac_addr[ETH_ALEN];
+
+       u32 target_version;
+       u8 fw_version_major;
+       u32 fw_version_minor;
+       u16 fw_version_release;
+       u16 fw_version_build;
+       u32 phy_capability;
+       u32 hw_min_tx_power;
+       u32 hw_max_tx_power;
+       u32 ht_cap_info;
+       u32 vht_cap_info;
+
+       struct targetdef *targetdef;
+       struct hostdef *hostdef;
+
+       bool p2p;
+
+       struct {
+               void *priv;
+               enum ath10k_bus bus;
+               const struct ath10k_hif_ops *ops;
+       } hif;
+
+       struct ath10k_wmi wmi;
+
+       wait_queue_head_t event_queue;
+       bool is_target_paused;
+
+       struct ath10k_bmi bmi;
+
+       struct ath10k_htc *htc;
+       struct ath10k_htt *htt;
+
+       struct ath10k_hw_params {
+               u32 id;
+               const char *name;
+               u32 patch_load_addr;
+
+               struct ath10k_hw_params_fw {
+                       const char *dir;
+                       const char *fw;
+                       const char *otp;
+                       const char *board;
+               } fw;
+       } hw_params;
+
+       struct {
+               struct completion started;
+               struct completion completed;
+               struct completion on_channel;
+               struct timer_list timeout;
+               bool is_roc;
+               bool in_progress;
+               bool aborting;
+               int vdev_id;
+               int roc_freq;
+       } scan;
+
+       struct {
+               struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+       } mac;
+
+       /* should never be NULL; needed for regular htt rx */
+       struct ieee80211_channel *rx_channel;
+
+       /* valid during scan; needed for mgmt rx during scan */
+       struct ieee80211_channel *scan_channel;
+
+       int free_vdev_map;
+       int monitor_vdev_id;
+       bool monitor_enabled;
+       bool monitor_present;
+       unsigned int filter_flags;
+
+       struct wmi_pdev_set_wmm_params_arg wmm_params;
+       struct completion install_key_done;
+
+       struct completion vdev_setup_done;
+
+       struct workqueue_struct *workqueue;
+
+       /* prevents concurrent FW reconfiguration */
+       struct mutex conf_mutex;
+
+       /* protects shared structure data */
+       spinlock_t data_lock;
+
+       struct list_head peers;
+       wait_queue_head_t peer_mapping_wq;
+
+       struct work_struct offchan_tx_work;
+       struct sk_buff_head offchan_tx_queue;
+       struct completion offchan_tx_completed;
+       struct sk_buff *offchan_tx_skb;
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+       struct ath10k_debug debug;
+#endif
+};
+
+struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
+                                 enum ath10k_bus bus,
+                                 const struct ath10k_hif_ops *hif_ops);
+void ath10k_core_destroy(struct ath10k *ar);
+
+int ath10k_core_register(struct ath10k *ar);
+void ath10k_core_unregister(struct ath10k *ar);
+
+int ath10k_core_target_suspend(struct ath10k *ar);
+int ath10k_core_target_resume(struct ath10k *ar);
+
+#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
new file mode 100644 (file)
index 0000000..499034b
--- /dev/null
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+
+#include "core.h"
+#include "debug.h"
+
+static int ath10k_printk(const char *level, const char *fmt, ...)
+{
+       struct va_format vaf;
+       va_list args;
+       int rtn;
+
+       va_start(args, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &args;
+
+       rtn = printk("%sath10k: %pV", level, &vaf);
+
+       va_end(args);
+
+       return rtn;
+}
+
+int ath10k_info(const char *fmt, ...)
+{
+       struct va_format vaf = {
+               .fmt = fmt,
+       };
+       va_list args;
+       int ret;
+
+       va_start(args, fmt);
+       vaf.va = &args;
+       ret = ath10k_printk(KERN_INFO, "%pV", &vaf);
+       trace_ath10k_log_info(&vaf);
+       va_end(args);
+
+       return ret;
+}
+EXPORT_SYMBOL(ath10k_info);
+
+int ath10k_err(const char *fmt, ...)
+{
+       struct va_format vaf = {
+               .fmt = fmt,
+       };
+       va_list args;
+       int ret;
+
+       va_start(args, fmt);
+       vaf.va = &args;
+       ret = ath10k_printk(KERN_ERR, "%pV", &vaf);
+       trace_ath10k_log_err(&vaf);
+       va_end(args);
+
+       return ret;
+}
+EXPORT_SYMBOL(ath10k_err);
+
+int ath10k_warn(const char *fmt, ...)
+{
+       struct va_format vaf = {
+               .fmt = fmt,
+       };
+       va_list args;
+       int ret = 0;
+
+       va_start(args, fmt);
+       vaf.va = &args;
+
+       if (net_ratelimit())
+               ret = ath10k_printk(KERN_WARNING, "%pV", &vaf);
+
+       trace_ath10k_log_warn(&vaf);
+
+       va_end(args);
+
+       return ret;
+}
+EXPORT_SYMBOL(ath10k_warn);
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+
+void ath10k_debug_read_service_map(struct ath10k *ar,
+                                  void *service_map,
+                                  size_t map_size)
+{
+       memcpy(ar->debug.wmi_service_bitmap, service_map, map_size);
+}
+
+static ssize_t ath10k_read_wmi_services(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct ath10k *ar = file->private_data;
+       char *buf;
+       unsigned int len = 0, buf_len = 1500;
+       const char *status;
+       ssize_t ret_cnt;
+       int i;
+
+       buf = kzalloc(buf_len, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (len > buf_len)
+               len = buf_len;
+
+       for (i = 0; i < WMI_SERVICE_LAST; i++) {
+               if (WMI_SERVICE_IS_ENABLED(ar->debug.wmi_service_bitmap, i))
+                       status = "enabled";
+               else
+                       status = "disabled";
+
+               len += scnprintf(buf + len, buf_len - len,
+                                "0x%02x - %20s - %s\n",
+                                i, wmi_service_name(i), status);
+       }
+
+       ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+       mutex_unlock(&ar->conf_mutex);
+
+       kfree(buf);
+       return ret_cnt;
+}
+
+static const struct file_operations fops_wmi_services = {
+       .read = ath10k_read_wmi_services,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+void ath10k_debug_read_target_stats(struct ath10k *ar,
+                                   struct wmi_stats_event *ev)
+{
+       u8 *tmp = ev->data;
+       struct ath10k_target_stats *stats;
+       int num_pdev_stats, num_vdev_stats, num_peer_stats;
+       struct wmi_pdev_stats *ps;
+       int i;
+
+       mutex_lock(&ar->conf_mutex);
+
+       stats = &ar->debug.target_stats;
+
+       num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); /* 0 or 1 */
+       num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); /* 0 or max vdevs */
+       num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
+
+       if (num_pdev_stats) {
+               ps = (struct wmi_pdev_stats *)tmp;
+
+               stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
+               stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
+               stats->rx_frame_count = __le32_to_cpu(ps->rx_frame_count);
+               stats->rx_clear_count = __le32_to_cpu(ps->rx_clear_count);
+               stats->cycle_count = __le32_to_cpu(ps->cycle_count);
+               stats->phy_err_count = __le32_to_cpu(ps->phy_err_count);
+               stats->chan_tx_power = __le32_to_cpu(ps->chan_tx_pwr);
+
+               stats->comp_queued = __le32_to_cpu(ps->wal.tx.comp_queued);
+               stats->comp_delivered =
+                       __le32_to_cpu(ps->wal.tx.comp_delivered);
+               stats->msdu_enqued = __le32_to_cpu(ps->wal.tx.msdu_enqued);
+               stats->mpdu_enqued = __le32_to_cpu(ps->wal.tx.mpdu_enqued);
+               stats->wmm_drop = __le32_to_cpu(ps->wal.tx.wmm_drop);
+               stats->local_enqued = __le32_to_cpu(ps->wal.tx.local_enqued);
+               stats->local_freed = __le32_to_cpu(ps->wal.tx.local_freed);
+               stats->hw_queued = __le32_to_cpu(ps->wal.tx.hw_queued);
+               stats->hw_reaped = __le32_to_cpu(ps->wal.tx.hw_reaped);
+               stats->underrun = __le32_to_cpu(ps->wal.tx.underrun);
+               stats->tx_abort = __le32_to_cpu(ps->wal.tx.tx_abort);
+               stats->mpdus_requed = __le32_to_cpu(ps->wal.tx.mpdus_requed);
+               stats->tx_ko = __le32_to_cpu(ps->wal.tx.tx_ko);
+               stats->data_rc = __le32_to_cpu(ps->wal.tx.data_rc);
+               stats->self_triggers = __le32_to_cpu(ps->wal.tx.self_triggers);
+               stats->sw_retry_failure =
+                       __le32_to_cpu(ps->wal.tx.sw_retry_failure);
+               stats->illgl_rate_phy_err =
+                       __le32_to_cpu(ps->wal.tx.illgl_rate_phy_err);
+               stats->pdev_cont_xretry =
+                       __le32_to_cpu(ps->wal.tx.pdev_cont_xretry);
+               stats->pdev_tx_timeout =
+                       __le32_to_cpu(ps->wal.tx.pdev_tx_timeout);
+               stats->pdev_resets = __le32_to_cpu(ps->wal.tx.pdev_resets);
+               stats->phy_underrun = __le32_to_cpu(ps->wal.tx.phy_underrun);
+               stats->txop_ovf = __le32_to_cpu(ps->wal.tx.txop_ovf);
+
+               stats->mid_ppdu_route_change =
+                       __le32_to_cpu(ps->wal.rx.mid_ppdu_route_change);
+               stats->status_rcvd = __le32_to_cpu(ps->wal.rx.status_rcvd);
+               stats->r0_frags = __le32_to_cpu(ps->wal.rx.r0_frags);
+               stats->r1_frags = __le32_to_cpu(ps->wal.rx.r1_frags);
+               stats->r2_frags = __le32_to_cpu(ps->wal.rx.r2_frags);
+               stats->r3_frags = __le32_to_cpu(ps->wal.rx.r3_frags);
+               stats->htt_msdus = __le32_to_cpu(ps->wal.rx.htt_msdus);
+               stats->htt_mpdus = __le32_to_cpu(ps->wal.rx.htt_mpdus);
+               stats->loc_msdus = __le32_to_cpu(ps->wal.rx.loc_msdus);
+               stats->loc_mpdus = __le32_to_cpu(ps->wal.rx.loc_mpdus);
+               stats->oversize_amsdu =
+                       __le32_to_cpu(ps->wal.rx.oversize_amsdu);
+               stats->phy_errs = __le32_to_cpu(ps->wal.rx.phy_errs);
+               stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
+               stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
+
+               tmp += sizeof(struct wmi_pdev_stats);
+       }
+
+       /* 0 or max vdevs */
+       /* Currently firmware does not support VDEV stats */
+       if (num_vdev_stats) {
+               struct wmi_vdev_stats *vdev_stats;
+
+               for (i = 0; i < num_vdev_stats; i++) {
+                       vdev_stats = (struct wmi_vdev_stats *)tmp;
+                       tmp += sizeof(struct wmi_vdev_stats);
+               }
+       }
+
+       if (num_peer_stats) {
+               struct wmi_peer_stats *peer_stats;
+               struct ath10k_peer_stat *s;
+
+               stats->peers = num_peer_stats;
+
+               for (i = 0; i < num_peer_stats; i++) {
+                       peer_stats = (struct wmi_peer_stats *)tmp;
+                       s = &stats->peer_stat[i];
+
+                       WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr,
+                                                  s->peer_macaddr);
+                       s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
+                       s->peer_tx_rate =
+                               __le32_to_cpu(peer_stats->peer_tx_rate);
+
+                       tmp += sizeof(struct wmi_peer_stats);
+               }
+       }
+
+       mutex_unlock(&ar->conf_mutex);
+       complete(&ar->debug.event_stats_compl);
+}
+
+static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
+                                   size_t count, loff_t *ppos)
+{
+       struct ath10k *ar = file->private_data;
+       struct ath10k_target_stats *fw_stats;
+       char *buf;
+       unsigned int len = 0, buf_len = 2500;
+       ssize_t ret_cnt;
+       long left;
+       int i;
+       int ret;
+
+       fw_stats = &ar->debug.target_stats;
+
+       buf = kzalloc(buf_len, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
+       if (ret) {
+               ath10k_warn("could not request stats (%d)\n", ret);
+               kfree(buf);
+               return -EIO;
+       }
+
+       left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ);
+
+       if (left <= 0) {
+               kfree(buf);
+               return -ETIMEDOUT;
+       }
+
+       mutex_lock(&ar->conf_mutex);
+
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n",
+                        "ath10k PDEV stats");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+                                "=================");
+
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Channel noise floor", fw_stats->ch_noise_floor);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "Channel TX power", fw_stats->chan_tx_power);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "TX frame count", fw_stats->tx_frame_count);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "RX frame count", fw_stats->rx_frame_count);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "RX clear count", fw_stats->rx_clear_count);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "Cycle count", fw_stats->cycle_count);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "PHY error count", fw_stats->phy_err_count);
+
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n",
+                        "ath10k PDEV TX stats");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+                                "=================");
+
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "HTT cookies queued", fw_stats->comp_queued);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "HTT cookies disp.", fw_stats->comp_delivered);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MSDU queued", fw_stats->msdu_enqued);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MPDU queued", fw_stats->mpdu_enqued);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MSDUs dropped", fw_stats->wmm_drop);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Local enqued", fw_stats->local_enqued);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Local freed", fw_stats->local_freed);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "HW queued", fw_stats->hw_queued);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "PPDUs reaped", fw_stats->hw_reaped);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Num underruns", fw_stats->underrun);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "PPDUs cleaned", fw_stats->tx_abort);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MPDUs requed", fw_stats->mpdus_requed);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Excessive retries", fw_stats->tx_ko);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "HW rate", fw_stats->data_rc);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Sched self tiggers", fw_stats->self_triggers);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Dropped due to SW retries",
+                        fw_stats->sw_retry_failure);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Illegal rate phy errors",
+                        fw_stats->illgl_rate_phy_err);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Pdev continous xretry", fw_stats->pdev_cont_xretry);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "TX timeout", fw_stats->pdev_tx_timeout);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "PDEV resets", fw_stats->pdev_resets);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "PHY underrun", fw_stats->phy_underrun);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MPDU is more than txop limit", fw_stats->txop_ovf);
+
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n",
+                        "ath10k PDEV RX stats");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+                                "=================");
+
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Mid PPDU route change",
+                        fw_stats->mid_ppdu_route_change);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Tot. number of statuses", fw_stats->status_rcvd);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Extra frags on rings 0", fw_stats->r0_frags);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Extra frags on rings 1", fw_stats->r1_frags);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Extra frags on rings 2", fw_stats->r2_frags);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Extra frags on rings 3", fw_stats->r3_frags);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MSDUs delivered to HTT", fw_stats->htt_msdus);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MPDUs delivered to HTT", fw_stats->htt_mpdus);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MSDUs delivered to stack", fw_stats->loc_msdus);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MPDUs delivered to stack", fw_stats->loc_mpdus);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Oversized AMSUs", fw_stats->oversize_amsdu);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "PHY errors", fw_stats->phy_errs);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "PHY errors drops", fw_stats->phy_err_drop);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
+
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n",
+                        "ath10k PEER stats");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+                                "=================");
+
+       for (i = 0; i < fw_stats->peers; i++) {
+               len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+                                "Peer MAC address",
+                                fw_stats->peer_stat[i].peer_macaddr);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "Peer RSSI", fw_stats->peer_stat[i].peer_rssi);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "Peer TX rate",
+                                fw_stats->peer_stat[i].peer_tx_rate);
+               len += scnprintf(buf + len, buf_len - len, "\n");
+       }
+
+       if (len > buf_len)
+               len = buf_len;
+
+       ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+       mutex_unlock(&ar->conf_mutex);
+
+       kfree(buf);
+       return ret_cnt;
+}
+
+static const struct file_operations fops_fw_stats = {
+       .read = ath10k_read_fw_stats,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+int ath10k_debug_create(struct ath10k *ar)
+{
+       ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
+                                                  ar->hw->wiphy->debugfsdir);
+
+       if (!ar->debug.debugfs_phy)
+               return -ENOMEM;
+
+       init_completion(&ar->debug.event_stats_compl);
+
+       debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
+                           &fops_fw_stats);
+
+       debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
+                           &fops_wmi_services);
+
+       return 0;
+}
+#endif /* CONFIG_ATH10K_DEBUGFS */
+
+#ifdef CONFIG_ATH10K_DEBUG
+void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...)
+{
+       struct va_format vaf;
+       va_list args;
+
+       va_start(args, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &args;
+
+       if (ath10k_debug_mask & mask)
+               ath10k_printk(KERN_DEBUG, "%pV", &vaf);
+
+       trace_ath10k_log_dbg(mask, &vaf);
+
+       va_end(args);
+}
+EXPORT_SYMBOL(ath10k_dbg);
+
+void ath10k_dbg_dump(enum ath10k_debug_mask mask,
+                    const char *msg, const char *prefix,
+                    const void *buf, size_t len)
+{
+       if (ath10k_debug_mask & mask) {
+               if (msg)
+                       ath10k_dbg(mask, "%s\n", msg);
+
+               print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
+       }
+
+       /* tracing code doesn't like null strings :/ */
+       trace_ath10k_log_dbg_dump(msg ? msg : "", prefix ? prefix : "",
+                                 buf, len);
+}
+EXPORT_SYMBOL(ath10k_dbg_dump);
+
+#endif /* CONFIG_ATH10K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
new file mode 100644 (file)
index 0000000..168140c
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DEBUG_H_
+#define _DEBUG_H_
+
+#include <linux/types.h>
+#include "trace.h"
+
+enum ath10k_debug_mask {
+       ATH10K_DBG_PCI          = 0x00000001,
+       ATH10K_DBG_WMI          = 0x00000002,
+       ATH10K_DBG_HTC          = 0x00000004,
+       ATH10K_DBG_HTT          = 0x00000008,
+       ATH10K_DBG_MAC          = 0x00000010,
+       ATH10K_DBG_CORE         = 0x00000020,
+       ATH10K_DBG_PCI_DUMP     = 0x00000040,
+       ATH10K_DBG_HTT_DUMP     = 0x00000080,
+       ATH10K_DBG_MGMT         = 0x00000100,
+       ATH10K_DBG_DATA         = 0x00000200,
+       ATH10K_DBG_ANY          = 0xffffffff,
+};
+
+extern unsigned int ath10k_debug_mask;
+
+extern __printf(1, 2) int ath10k_info(const char *fmt, ...);
+extern __printf(1, 2) int ath10k_err(const char *fmt, ...);
+extern __printf(1, 2) int ath10k_warn(const char *fmt, ...);
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+int ath10k_debug_create(struct ath10k *ar);
+void ath10k_debug_read_service_map(struct ath10k *ar,
+                                  void *service_map,
+                                  size_t map_size);
+void ath10k_debug_read_target_stats(struct ath10k *ar,
+                                   struct wmi_stats_event *ev);
+
+#else
+static inline int ath10k_debug_create(struct ath10k *ar)
+{
+       return 0;
+}
+
+static inline void ath10k_debug_read_service_map(struct ath10k *ar,
+                                                void *service_map,
+                                                size_t map_size)
+{
+}
+
+static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
+                                                 struct wmi_stats_event *ev)
+{
+}
+#endif /* CONFIG_ATH10K_DEBUGFS */
+
+#ifdef CONFIG_ATH10K_DEBUG
+extern __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
+                                     const char *fmt, ...);
+void ath10k_dbg_dump(enum ath10k_debug_mask mask,
+                    const char *msg, const char *prefix,
+                    const void *buf, size_t len);
+#else /* CONFIG_ATH10K_DEBUG */
+
+static inline int ath10k_dbg(enum ath10k_debug_mask dbg_mask,
+                            const char *fmt, ...)
+{
+       return 0;
+}
+
+static inline void ath10k_dbg_dump(enum ath10k_debug_mask mask,
+                                  const char *msg, const char *prefix,
+                                  const void *buf, size_t len)
+{
+}
+#endif /* CONFIG_ATH10K_DEBUG */
+#endif /* _DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
new file mode 100644 (file)
index 0000000..73a24d4
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HIF_H_
+#define _HIF_H_
+
+#include <linux/kernel.h>
+#include "core.h"
+
+struct ath10k_hif_cb {
+       int (*tx_completion)(struct ath10k *ar,
+                            struct sk_buff *wbuf,
+                            unsigned transfer_id);
+       int (*rx_completion)(struct ath10k *ar,
+                            struct sk_buff *wbuf,
+                            u8 pipe_id);
+};
+
+struct ath10k_hif_ops {
+       /* Send the head of a buffer to HIF for transmission to the target. */
+       int (*send_head)(struct ath10k *ar, u8 pipe_id,
+                        unsigned int transfer_id,
+                        unsigned int nbytes,
+                        struct sk_buff *buf);
+
+       /*
+        * API to handle HIF-specific BMI message exchanges, this API is
+        * synchronous and only allowed to be called from a context that
+        * can block (sleep)
+        */
+       int (*exchange_bmi_msg)(struct ath10k *ar,
+                               void *request, u32 request_len,
+                               void *response, u32 *response_len);
+
+       int (*start)(struct ath10k *ar);
+
+       void (*stop)(struct ath10k *ar);
+
+       int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
+                                  u8 *ul_pipe, u8 *dl_pipe,
+                                  int *ul_is_polled, int *dl_is_polled);
+
+       void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe);
+
+       /*
+        * Check if prior sends have completed.
+        *
+        * Check whether the pipe in question has any completed
+        * sends that have not yet been processed.
+        * This function is only relevant for HIF pipes that are configured
+        * to be polled rather than interrupt-driven.
+        */
+       void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
+
+       void (*init)(struct ath10k *ar,
+                    struct ath10k_hif_cb *callbacks);
+
+       u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
+};
+
+
+static inline int ath10k_hif_send_head(struct ath10k *ar, u8 pipe_id,
+                                      unsigned int transfer_id,
+                                      unsigned int nbytes,
+                                      struct sk_buff *buf)
+{
+       return ar->hif.ops->send_head(ar, pipe_id, transfer_id, nbytes, buf);
+}
+
+static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
+                                             void *request, u32 request_len,
+                                             void *response, u32 *response_len)
+{
+       return ar->hif.ops->exchange_bmi_msg(ar, request, request_len,
+                                            response, response_len);
+}
+
+static inline int ath10k_hif_start(struct ath10k *ar)
+{
+       return ar->hif.ops->start(ar);
+}
+
+static inline void ath10k_hif_stop(struct ath10k *ar)
+{
+       return ar->hif.ops->stop(ar);
+}
+
+static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar,
+                                                u16 service_id,
+                                                u8 *ul_pipe, u8 *dl_pipe,
+                                                int *ul_is_polled,
+                                                int *dl_is_polled)
+{
+       return ar->hif.ops->map_service_to_pipe(ar, service_id,
+                                               ul_pipe, dl_pipe,
+                                               ul_is_polled, dl_is_polled);
+}
+
+static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
+                                              u8 *ul_pipe, u8 *dl_pipe)
+{
+       ar->hif.ops->get_default_pipe(ar, ul_pipe, dl_pipe);
+}
+
+static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
+                                                 u8 pipe_id, int force)
+{
+       ar->hif.ops->send_complete_check(ar, pipe_id, force);
+}
+
+static inline void ath10k_hif_init(struct ath10k *ar,
+                                  struct ath10k_hif_cb *callbacks)
+{
+       ar->hif.ops->init(ar, callbacks);
+}
+
+static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
+                                                  u8 pipe_id)
+{
+       return ar->hif.ops->get_free_queue_number(ar, pipe_id);
+}
+
+#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
new file mode 100644 (file)
index 0000000..74363c9
--- /dev/null
@@ -0,0 +1,1000 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "hif.h"
+#include "debug.h"
+
+/********/
+/* Send */
+/********/
+
+static inline void ath10k_htc_send_complete_check(struct ath10k_htc_ep *ep,
+                                                 int force)
+{
+       /*
+        * Check whether HIF has any prior sends that have finished,
+        * have not had the post-processing done.
+        */
+       ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force);
+}
+
+static void ath10k_htc_control_tx_complete(struct ath10k *ar,
+                                          struct sk_buff *skb)
+{
+       kfree_skb(skb);
+}
+
+static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
+{
+       struct sk_buff *skb;
+       struct ath10k_skb_cb *skb_cb;
+
+       skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
+       if (!skb) {
+               ath10k_warn("Unable to allocate ctrl skb\n");
+               return NULL;
+       }
+
+       skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
+       WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+       skb_cb = ATH10K_SKB_CB(skb);
+       memset(skb_cb, 0, sizeof(*skb_cb));
+
+       ath10k_dbg(ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
+       return skb;
+}
+
+static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
+                                            struct sk_buff *skb)
+{
+       ath10k_skb_unmap(htc->ar->dev, skb);
+       skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+}
+
+static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
+                                           struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
+                  ep->eid, skb);
+
+       ath10k_htc_restore_tx_skb(ep->htc, skb);
+
+       if (!ep->ep_ops.ep_tx_complete) {
+               ath10k_warn("no tx handler for eid %d\n", ep->eid);
+               dev_kfree_skb_any(skb);
+               return;
+       }
+
+       ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
+}
+
+/* assumes tx_lock is held */
+static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
+{
+       if (!ep->tx_credit_flow_enabled)
+               return false;
+       if (ep->tx_credits >= ep->tx_credits_per_max_message)
+               return false;
+
+       ath10k_dbg(ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
+                  ep->eid);
+       return true;
+}
+
+static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
+                                     struct sk_buff *skb)
+{
+       struct ath10k_htc_hdr *hdr;
+
+       hdr = (struct ath10k_htc_hdr *)skb->data;
+       memset(hdr, 0, sizeof(*hdr));
+
+       hdr->eid = ep->eid;
+       hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
+
+       spin_lock_bh(&ep->htc->tx_lock);
+       hdr->seq_no = ep->seq_no++;
+
+       if (ath10k_htc_ep_need_credit_update(ep))
+               hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
+
+       spin_unlock_bh(&ep->htc->tx_lock);
+}
+
+static int ath10k_htc_issue_skb(struct ath10k_htc *htc,
+                               struct ath10k_htc_ep *ep,
+                               struct sk_buff *skb,
+                               u8 credits)
+{
+       struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+       int ret;
+
+       ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
+                  ep->eid, skb);
+
+       ath10k_htc_prepare_tx_skb(ep, skb);
+
+       ret = ath10k_skb_map(htc->ar->dev, skb);
+       if (ret)
+               goto err;
+
+       ret = ath10k_hif_send_head(htc->ar,
+                                  ep->ul_pipe_id,
+                                  ep->eid,
+                                  skb->len,
+                                  skb);
+       if (unlikely(ret))
+               goto err;
+
+       return 0;
+err:
+       ath10k_warn("HTC issue failed: %d\n", ret);
+
+       spin_lock_bh(&htc->tx_lock);
+       ep->tx_credits += credits;
+       spin_unlock_bh(&htc->tx_lock);
+
+       /* this is the simplest way to handle out-of-resources for non-credit
+        * based endpoints. credit based endpoints can still get -ENOSR, but
+        * this is highly unlikely as credit reservation should prevent that */
+       if (ret == -ENOSR) {
+               spin_lock_bh(&htc->tx_lock);
+               __skb_queue_head(&ep->tx_queue, skb);
+               spin_unlock_bh(&htc->tx_lock);
+
+               return ret;
+       }
+
+       skb_cb->is_aborted = true;
+       ath10k_htc_notify_tx_completion(ep, skb);
+
+       return ret;
+}
+
+static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc,
+                                                      struct ath10k_htc_ep *ep,
+                                                      u8 *credits)
+{
+       struct sk_buff *skb;
+       struct ath10k_skb_cb *skb_cb;
+       int credits_required;
+       int remainder;
+       unsigned int transfer_len;
+
+       lockdep_assert_held(&htc->tx_lock);
+
+       skb = __skb_dequeue(&ep->tx_queue);
+       if (!skb)
+               return NULL;
+
+       skb_cb = ATH10K_SKB_CB(skb);
+       transfer_len = skb->len;
+
+       if (likely(transfer_len <= htc->target_credit_size)) {
+               credits_required = 1;
+       } else {
+               /* figure out how many credits this message requires */
+               credits_required = transfer_len / htc->target_credit_size;
+               remainder = transfer_len % htc->target_credit_size;
+
+               if (remainder)
+                       credits_required++;
+       }
+
+       ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n",
+                  credits_required, ep->tx_credits);
+
+       if (ep->tx_credits < credits_required) {
+               __skb_queue_head(&ep->tx_queue, skb);
+               return NULL;
+       }
+
+       ep->tx_credits -= credits_required;
+       *credits = credits_required;
+       return skb;
+}
+
+static void ath10k_htc_send_work(struct work_struct *work)
+{
+       struct ath10k_htc_ep *ep = container_of(work,
+                                       struct ath10k_htc_ep, send_work);
+       struct ath10k_htc *htc = ep->htc;
+       struct sk_buff *skb;
+       u8 credits = 0;
+       int ret;
+
+       while (true) {
+               if (ep->ul_is_polled)
+                       ath10k_htc_send_complete_check(ep, 0);
+
+               spin_lock_bh(&htc->tx_lock);
+               if (ep->tx_credit_flow_enabled)
+                       skb = ath10k_htc_get_skb_credit_based(htc, ep,
+                                                             &credits);
+               else
+                       skb = __skb_dequeue(&ep->tx_queue);
+               spin_unlock_bh(&htc->tx_lock);
+
+               if (!skb)
+                       break;
+
+               ret = ath10k_htc_issue_skb(htc, ep, skb, credits);
+               if (ret == -ENOSR)
+                       break;
+       }
+}
+
+int ath10k_htc_send(struct ath10k_htc *htc,
+                   enum ath10k_htc_ep_id eid,
+                   struct sk_buff *skb)
+{
+       struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+
+       if (eid >= ATH10K_HTC_EP_COUNT) {
+               ath10k_warn("Invalid endpoint id: %d\n", eid);
+               return -ENOENT;
+       }
+
+       skb_push(skb, sizeof(struct ath10k_htc_hdr));
+
+       spin_lock_bh(&htc->tx_lock);
+       __skb_queue_tail(&ep->tx_queue, skb);
+       spin_unlock_bh(&htc->tx_lock);
+
+       queue_work(htc->ar->workqueue, &ep->send_work);
+       return 0;
+}
+
+static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
+                                           struct sk_buff *skb,
+                                           unsigned int eid)
+{
+       struct ath10k_htc *htc = ar->htc;
+       struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+       bool stopping;
+
+       ath10k_htc_notify_tx_completion(ep, skb);
+       /* the skb now belongs to the completion handler */
+
+       spin_lock_bh(&htc->tx_lock);
+       stopping = htc->stopping;
+       spin_unlock_bh(&htc->tx_lock);
+
+       if (!ep->tx_credit_flow_enabled && !stopping)
+               /*
+                * note: when using TX credit flow, the re-checking of
+                * queues happens when credits flow back from the target.
+                * in the non-TX credit case, we recheck after the packet
+                * completes
+                */
+               queue_work(ar->workqueue, &ep->send_work);
+
+       return 0;
+}
+
+/* flush endpoint TX queue */
+static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc,
+                                        struct ath10k_htc_ep *ep)
+{
+       struct sk_buff *skb;
+       struct ath10k_skb_cb *skb_cb;
+
+       spin_lock_bh(&htc->tx_lock);
+       for (;;) {
+               skb = __skb_dequeue(&ep->tx_queue);
+               if (!skb)
+                       break;
+
+               skb_cb = ATH10K_SKB_CB(skb);
+               skb_cb->is_aborted = true;
+               ath10k_htc_notify_tx_completion(ep, skb);
+       }
+       spin_unlock_bh(&htc->tx_lock);
+
+       cancel_work_sync(&ep->send_work);
+}
+
+/***********/
+/* Receive */
+/***********/
+
+static void
+ath10k_htc_process_credit_report(struct ath10k_htc *htc,
+                                const struct ath10k_htc_credit_report *report,
+                                int len,
+                                enum ath10k_htc_ep_id eid)
+{
+       struct ath10k_htc_ep *ep;
+       int i, n_reports;
+
+       if (len % sizeof(*report))
+               ath10k_warn("Uneven credit report len %d", len);
+
+       n_reports = len / sizeof(*report);
+
+       spin_lock_bh(&htc->tx_lock);
+       for (i = 0; i < n_reports; i++, report++) {
+               if (report->eid >= ATH10K_HTC_EP_COUNT)
+                       break;
+
+               ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n",
+                          report->eid, report->credits);
+
+               ep = &htc->endpoint[report->eid];
+               ep->tx_credits += report->credits;
+
+               if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue))
+                       queue_work(htc->ar->workqueue, &ep->send_work);
+       }
+       spin_unlock_bh(&htc->tx_lock);
+}
+
+static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
+                                     u8 *buffer,
+                                     int length,
+                                     enum ath10k_htc_ep_id src_eid)
+{
+       int status = 0;
+       struct ath10k_htc_record *record;
+       u8 *orig_buffer;
+       int orig_length;
+       size_t len;
+
+       orig_buffer = buffer;
+       orig_length = length;
+
+       while (length > 0) {
+               record = (struct ath10k_htc_record *)buffer;
+
+               if (length < sizeof(record->hdr)) {
+                       status = -EINVAL;
+                       break;
+               }
+
+               if (record->hdr.len > length) {
+                       /* no room left in buffer for record */
+                       ath10k_warn("Invalid record length: %d\n",
+                                   record->hdr.len);
+                       status = -EINVAL;
+                       break;
+               }
+
+               switch (record->hdr.id) {
+               case ATH10K_HTC_RECORD_CREDITS:
+                       len = sizeof(struct ath10k_htc_credit_report);
+                       if (record->hdr.len < len) {
+                               ath10k_warn("Credit report too long\n");
+                               status = -EINVAL;
+                               break;
+                       }
+                       ath10k_htc_process_credit_report(htc,
+                                                        record->credit_report,
+                                                        record->hdr.len,
+                                                        src_eid);
+                       break;
+               default:
+                       ath10k_warn("Unhandled record: id:%d length:%d\n",
+                                   record->hdr.id, record->hdr.len);
+                       break;
+               }
+
+               if (status)
+                       break;
+
+               /* multiple records may be present in a trailer */
+               buffer += sizeof(record->hdr) + record->hdr.len;
+               length -= sizeof(record->hdr) + record->hdr.len;
+       }
+
+       if (status)
+               ath10k_dbg_dump(ATH10K_DBG_HTC, "htc rx bad trailer", "",
+                               orig_buffer, orig_length);
+
+       return status;
+}
+
+static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
+                                           struct sk_buff *skb,
+                                           u8 pipe_id)
+{
+       int status = 0;
+       struct ath10k_htc *htc = ar->htc;
+       struct ath10k_htc_hdr *hdr;
+       struct ath10k_htc_ep *ep;
+       u16 payload_len;
+       u32 trailer_len = 0;
+       size_t min_len;
+       u8 eid;
+       bool trailer_present;
+
+       hdr = (struct ath10k_htc_hdr *)skb->data;
+       skb_pull(skb, sizeof(*hdr));
+
+       eid = hdr->eid;
+
+       if (eid >= ATH10K_HTC_EP_COUNT) {
+               ath10k_warn("HTC Rx: invalid eid %d\n", eid);
+               ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad header", "",
+                               hdr, sizeof(*hdr));
+               status = -EINVAL;
+               goto out;
+       }
+
+       ep = &htc->endpoint[eid];
+
+       /*
+        * If this endpoint that received a message from the target has
+        * a to-target HIF pipe whose send completions are polled rather
+        * than interrupt-driven, this is a good point to ask HIF to check
+        * whether it has any completed sends to handle.
+        */
+       if (ep->ul_is_polled)
+               ath10k_htc_send_complete_check(ep, 1);
+
+       payload_len = __le16_to_cpu(hdr->len);
+
+       if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
+               ath10k_warn("HTC rx frame too long, len: %zu\n",
+                           payload_len + sizeof(*hdr));
+               ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", "",
+                               hdr, sizeof(*hdr));
+               status = -EINVAL;
+               goto out;
+       }
+
+       if (skb->len < payload_len) {
+               ath10k_dbg(ATH10K_DBG_HTC,
+                          "HTC Rx: insufficient length, got %d, expected %d\n",
+                          skb->len, payload_len);
+               ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len",
+                               "", hdr, sizeof(*hdr));
+               status = -EINVAL;
+               goto out;
+       }
+
+       /* get flags to check for trailer */
+       trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
+       if (trailer_present) {
+               u8 *trailer;
+
+               trailer_len = hdr->trailer_len;
+               min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
+
+               if ((trailer_len < min_len) ||
+                   (trailer_len > payload_len)) {
+                       ath10k_warn("Invalid trailer length: %d\n",
+                                   trailer_len);
+                       status = -EPROTO;
+                       goto out;
+               }
+
+               trailer = (u8 *)hdr;
+               trailer += sizeof(*hdr);
+               trailer += payload_len;
+               trailer -= trailer_len;
+               status = ath10k_htc_process_trailer(htc, trailer,
+                                                   trailer_len, hdr->eid);
+               if (status)
+                       goto out;
+
+               skb_trim(skb, skb->len - trailer_len);
+       }
+
+       if (((int)payload_len - (int)trailer_len) <= 0)
+               /* zero length packet with trailer data, just drop these */
+               goto out;
+
+       if (eid == ATH10K_HTC_EP_0) {
+               struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
+
+               switch (__le16_to_cpu(msg->hdr.message_id)) {
+               default:
+                       /* handle HTC control message */
+                       if (completion_done(&htc->ctl_resp)) {
+                               /*
+                                * this is a fatal error, target should not be
+                                * sending unsolicited messages on the ep 0
+                                */
+                               ath10k_warn("HTC rx ctrl still processing\n");
+                               status = -EINVAL;
+                               complete(&htc->ctl_resp);
+                               goto out;
+                       }
+
+                       htc->control_resp_len =
+                               min_t(int, skb->len,
+                                     ATH10K_HTC_MAX_CTRL_MSG_LEN);
+
+                       memcpy(htc->control_resp_buffer, skb->data,
+                              htc->control_resp_len);
+
+                       complete(&htc->ctl_resp);
+                       break;
+               case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
+                       htc->htc_ops.target_send_suspend_complete(ar);
+               }
+               goto out;
+       }
+
+       ath10k_dbg(ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
+                  eid, skb);
+       ep->ep_ops.ep_rx_complete(ar, skb);
+
+       /* skb is now owned by the rx completion handler */
+       skb = NULL;
+out:
+       kfree_skb(skb);
+
+       return status;
+}
+
+static void ath10k_htc_control_rx_complete(struct ath10k *ar,
+                                          struct sk_buff *skb)
+{
+       /* This is unexpected. FW is not supposed to send regular rx on this
+        * endpoint. */
+       ath10k_warn("unexpected htc rx\n");
+       kfree_skb(skb);
+}
+
+/***************/
+/* Init/Deinit */
+/***************/
+
+static const char *htc_service_name(enum ath10k_htc_svc_id id)
+{
+       switch (id) {
+       case ATH10K_HTC_SVC_ID_RESERVED:
+               return "Reserved";
+       case ATH10K_HTC_SVC_ID_RSVD_CTRL:
+               return "Control";
+       case ATH10K_HTC_SVC_ID_WMI_CONTROL:
+               return "WMI";
+       case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
+               return "DATA BE";
+       case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
+               return "DATA BK";
+       case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
+               return "DATA VI";
+       case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
+               return "DATA VO";
+       case ATH10K_HTC_SVC_ID_NMI_CONTROL:
+               return "NMI Control";
+       case ATH10K_HTC_SVC_ID_NMI_DATA:
+               return "NMI Data";
+       case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
+               return "HTT Data";
+       case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
+               return "RAW";
+       }
+
+       return "Unknown";
+}
+
+static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
+{
+       struct ath10k_htc_ep *ep;
+       int i;
+
+       for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
+               ep = &htc->endpoint[i];
+               ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
+               ep->max_ep_message_len = 0;
+               ep->max_tx_queue_depth = 0;
+               ep->eid = i;
+               skb_queue_head_init(&ep->tx_queue);
+               ep->htc = htc;
+               ep->tx_credit_flow_enabled = true;
+               INIT_WORK(&ep->send_work, ath10k_htc_send_work);
+       }
+}
+
+static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc)
+{
+       struct ath10k_htc_svc_tx_credits *entry;
+
+       entry = &htc->service_tx_alloc[0];
+
+       /*
+        * for PCIE allocate all credists/HTC buffers to WMI.
+        * no buffers are used/required for data. data always
+        * remains on host.
+        */
+       entry++;
+       entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
+       entry->credit_allocation = htc->total_transmit_credits;
+}
+
+static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
+                                          u16 service_id)
+{
+       u8 allocation = 0;
+       int i;
+
+       for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
+               if (htc->service_tx_alloc[i].service_id == service_id)
+                       allocation =
+                           htc->service_tx_alloc[i].credit_allocation;
+       }
+
+       return allocation;
+}
+
+int ath10k_htc_wait_target(struct ath10k_htc *htc)
+{
+       int status = 0;
+       struct ath10k_htc_svc_conn_req conn_req;
+       struct ath10k_htc_svc_conn_resp conn_resp;
+       struct ath10k_htc_msg *msg;
+       u16 message_id;
+       u16 credit_count;
+       u16 credit_size;
+
+       INIT_COMPLETION(htc->ctl_resp);
+
+       status = ath10k_hif_start(htc->ar);
+       if (status) {
+               ath10k_err("could not start HIF (%d)\n", status);
+               goto err_start;
+       }
+
+       status = wait_for_completion_timeout(&htc->ctl_resp,
+                                            ATH10K_HTC_WAIT_TIMEOUT_HZ);
+       if (status <= 0) {
+               if (status == 0)
+                       status = -ETIMEDOUT;
+
+               ath10k_err("ctl_resp never came in (%d)\n", status);
+               goto err_target;
+       }
+
+       if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
+               ath10k_err("Invalid HTC ready msg len:%d\n",
+                          htc->control_resp_len);
+
+               status = -ECOMM;
+               goto err_target;
+       }
+
+       msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
+       message_id   = __le16_to_cpu(msg->hdr.message_id);
+       credit_count = __le16_to_cpu(msg->ready.credit_count);
+       credit_size  = __le16_to_cpu(msg->ready.credit_size);
+
+       if (message_id != ATH10K_HTC_MSG_READY_ID) {
+               ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id);
+               status = -ECOMM;
+               goto err_target;
+       }
+
+       htc->total_transmit_credits = credit_count;
+       htc->target_credit_size = credit_size;
+
+       ath10k_dbg(ATH10K_DBG_HTC,
+                  "Target ready! transmit resources: %d size:%d\n",
+                  htc->total_transmit_credits,
+                  htc->target_credit_size);
+
+       if ((htc->total_transmit_credits == 0) ||
+           (htc->target_credit_size == 0)) {
+               status = -ECOMM;
+               ath10k_err("Invalid credit size received\n");
+               goto err_target;
+       }
+
+       ath10k_htc_setup_target_buffer_assignments(htc);
+
+       /* setup our pseudo HTC control endpoint connection */
+       memset(&conn_req, 0, sizeof(conn_req));
+       memset(&conn_resp, 0, sizeof(conn_resp));
+       conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
+       conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
+       conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
+       conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
+
+       /* connect fake service */
+       status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
+       if (status) {
+               ath10k_err("could not connect to htc service (%d)\n", status);
+               goto err_target;
+       }
+
+       return 0;
+err_target:
+       ath10k_hif_stop(htc->ar);
+err_start:
+       return status;
+}
+
+int ath10k_htc_connect_service(struct ath10k_htc *htc,
+                              struct ath10k_htc_svc_conn_req *conn_req,
+                              struct ath10k_htc_svc_conn_resp *conn_resp)
+{
+       struct ath10k_htc_msg *msg;
+       struct ath10k_htc_conn_svc *req_msg;
+       struct ath10k_htc_conn_svc_response resp_msg_dummy;
+       struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
+       enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
+       struct ath10k_htc_ep *ep;
+       struct sk_buff *skb;
+       unsigned int max_msg_size = 0;
+       int length, status;
+       bool disable_credit_flow_ctrl = false;
+       u16 message_id, service_id, flags = 0;
+       u8 tx_alloc = 0;
+
+       /* special case for HTC pseudo control service */
+       if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
+               disable_credit_flow_ctrl = true;
+               assigned_eid = ATH10K_HTC_EP_0;
+               max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
+               memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
+               goto setup;
+       }
+
+       tx_alloc = ath10k_htc_get_credit_allocation(htc,
+                                                   conn_req->service_id);
+       if (!tx_alloc)
+               ath10k_warn("HTC Service %s does not allocate target credits\n",
+                           htc_service_name(conn_req->service_id));
+
+       skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
+       if (!skb) {
+               ath10k_err("Failed to allocate HTC packet\n");
+               return -ENOMEM;
+       }
+
+       length = sizeof(msg->hdr) + sizeof(msg->connect_service);
+       skb_put(skb, length);
+       memset(skb->data, 0, length);
+
+       msg = (struct ath10k_htc_msg *)skb->data;
+       msg->hdr.message_id =
+               __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
+
+       flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
+
+       req_msg = &msg->connect_service;
+       req_msg->flags = __cpu_to_le16(flags);
+       req_msg->service_id = __cpu_to_le16(conn_req->service_id);
+
+       /* Only enable credit flow control for WMI ctrl service */
+       if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
+               flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+               disable_credit_flow_ctrl = true;
+       }
+
+       INIT_COMPLETION(htc->ctl_resp);
+
+       status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
+       if (status) {
+               kfree_skb(skb);
+               return status;
+       }
+
+       /* wait for response */
+       status = wait_for_completion_timeout(&htc->ctl_resp,
+                                            ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
+       if (status <= 0) {
+               if (status == 0)
+                       status = -ETIMEDOUT;
+               ath10k_err("Service connect timeout: %d\n", status);
+               return status;
+       }
+
+       /* we controlled the buffer creation, it's aligned */
+       msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
+       resp_msg = &msg->connect_service_response;
+       message_id = __le16_to_cpu(msg->hdr.message_id);
+       service_id = __le16_to_cpu(resp_msg->service_id);
+
+       if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
+           (htc->control_resp_len < sizeof(msg->hdr) +
+            sizeof(msg->connect_service_response))) {
+               ath10k_err("Invalid resp message ID 0x%x", message_id);
+               return -EPROTO;
+       }
+
+       ath10k_dbg(ATH10K_DBG_HTC,
+                  "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
+                  htc_service_name(service_id),
+                  resp_msg->status, resp_msg->eid);
+
+       conn_resp->connect_resp_code = resp_msg->status;
+
+       /* check response status */
+       if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
+               ath10k_err("HTC Service %s connect request failed: 0x%x)\n",
+                          htc_service_name(service_id),
+                          resp_msg->status);
+               return -EPROTO;
+       }
+
+       assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
+       max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
+
+setup:
+
+       if (assigned_eid >= ATH10K_HTC_EP_COUNT)
+               return -EPROTO;
+
+       if (max_msg_size == 0)
+               return -EPROTO;
+
+       ep = &htc->endpoint[assigned_eid];
+       ep->eid = assigned_eid;
+
+       if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
+               return -EPROTO;
+
+       /* return assigned endpoint to caller */
+       conn_resp->eid = assigned_eid;
+       conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
+
+       /* setup the endpoint */
+       ep->service_id = conn_req->service_id;
+       ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
+       ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
+       ep->tx_credits = tx_alloc;
+       ep->tx_credit_size = htc->target_credit_size;
+       ep->tx_credits_per_max_message = ep->max_ep_message_len /
+                                        htc->target_credit_size;
+
+       if (ep->max_ep_message_len % htc->target_credit_size)
+               ep->tx_credits_per_max_message++;
+
+       /* copy all the callbacks */
+       ep->ep_ops = conn_req->ep_ops;
+
+       status = ath10k_hif_map_service_to_pipe(htc->ar,
+                                               ep->service_id,
+                                               &ep->ul_pipe_id,
+                                               &ep->dl_pipe_id,
+                                               &ep->ul_is_polled,
+                                               &ep->dl_is_polled);
+       if (status)
+               return status;
+
+       ath10k_dbg(ATH10K_DBG_HTC,
+                  "HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n",
+                  htc_service_name(ep->service_id), ep->ul_pipe_id,
+                  ep->dl_pipe_id, ep->eid);
+
+       ath10k_dbg(ATH10K_DBG_HTC,
+                  "EP %d UL polled: %d, DL polled: %d\n",
+                  ep->eid, ep->ul_is_polled, ep->dl_is_polled);
+
+       if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
+               ep->tx_credit_flow_enabled = false;
+               ath10k_dbg(ATH10K_DBG_HTC,
+                          "HTC service: %s eid: %d TX flow control disabled\n",
+                          htc_service_name(ep->service_id), assigned_eid);
+       }
+
+       return status;
+}
+
+struct sk_buff *ath10k_htc_alloc_skb(int size)
+{
+       struct sk_buff *skb;
+
+       skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
+       if (!skb) {
+               ath10k_warn("could not allocate HTC tx skb\n");
+               return NULL;
+       }
+
+       skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
+
+       /* FW/HTC requires 4-byte aligned streams */
+       if (!IS_ALIGNED((unsigned long)skb->data, 4))
+               ath10k_warn("Unaligned HTC tx skb\n");
+
+       return skb;
+}
+
+int ath10k_htc_start(struct ath10k_htc *htc)
+{
+       struct sk_buff *skb;
+       int status = 0;
+       struct ath10k_htc_msg *msg;
+
+       skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
+       memset(skb->data, 0, skb->len);
+
+       msg = (struct ath10k_htc_msg *)skb->data;
+       msg->hdr.message_id =
+               __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+       ath10k_dbg(ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
+
+       status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
+       if (status) {
+               kfree_skb(skb);
+               return status;
+       }
+
+       return 0;
+}
+
+/*
+ * stop HTC communications, i.e. stop interrupt reception, and flush all
+ * queued buffers
+ */
+void ath10k_htc_stop(struct ath10k_htc *htc)
+{
+       int i;
+       struct ath10k_htc_ep *ep;
+
+       spin_lock_bh(&htc->tx_lock);
+       htc->stopping = true;
+       spin_unlock_bh(&htc->tx_lock);
+
+       for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
+               ep = &htc->endpoint[i];
+               ath10k_htc_flush_endpoint_tx(htc, ep);
+       }
+
+       ath10k_hif_stop(htc->ar);
+       ath10k_htc_reset_endpoint_states(htc);
+}
+
+/* registered target arrival callback from the HIF layer */
+struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
+                                    struct ath10k_htc_ops *htc_ops)
+{
+       struct ath10k_hif_cb htc_callbacks;
+       struct ath10k_htc_ep *ep = NULL;
+       struct ath10k_htc *htc = NULL;
+
+       /* FIXME: use struct ath10k instead */
+       htc = kzalloc(sizeof(struct ath10k_htc), GFP_KERNEL);
+       if (!htc)
+               return ERR_PTR(-ENOMEM);
+
+       spin_lock_init(&htc->tx_lock);
+
+       memcpy(&htc->htc_ops, htc_ops, sizeof(struct ath10k_htc_ops));
+
+       ath10k_htc_reset_endpoint_states(htc);
+
+       /* setup HIF layer callbacks */
+       htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
+       htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
+       htc->ar = ar;
+
+       /* Get HIF default pipe for HTC message exchange */
+       ep = &htc->endpoint[ATH10K_HTC_EP_0];
+
+       ath10k_hif_init(ar, &htc_callbacks);
+       ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
+
+       init_completion(&htc->ctl_resp);
+
+       return htc;
+}
+
+void ath10k_htc_destroy(struct ath10k_htc *htc)
+{
+       kfree(htc);
+}
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
new file mode 100644 (file)
index 0000000..fa45844
--- /dev/null
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HTC_H_
+#define _HTC_H_
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/skbuff.h>
+#include <linux/semaphore.h>
+#include <linux/timer.h>
+
+struct ath10k;
+
+/****************/
+/* HTC protocol */
+/****************/
+
+/*
+ * HTC - host-target control protocol
+ *
+ * tx packets are generally <htc_hdr><payload>
+ * rx packets are more complex: <htc_hdr><payload><trailer>
+ *
+ * The payload + trailer length is stored in len.
+ * To get payload-only length one needs to payload - trailer_len.
+ *
+ * Trailer contains (possibly) multiple <htc_record>.
+ * Each record is a id-len-value.
+ *
+ * HTC header flags, control_byte0, control_byte1
+ * have different meaning depending whether its tx
+ * or rx.
+ *
+ * Alignment: htc_hdr, payload and trailer are
+ * 4-byte aligned.
+ */
+
+enum ath10k_htc_tx_flags {
+       ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
+       ATH10K_HTC_FLAG_SEND_BUNDLE        = 0x02
+};
+
+enum ath10k_htc_rx_flags {
+       ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
+       ATH10K_HTC_FLAG_BUNDLE_MASK     = 0xF0
+};
+
+struct ath10k_htc_hdr {
+       u8 eid; /* @enum ath10k_htc_ep_id */
+       u8 flags; /* @enum ath10k_htc_tx_flags, ath10k_htc_rx_flags */
+       __le16 len;
+       union {
+               u8 trailer_len; /* for rx */
+               u8 control_byte0;
+       } __packed;
+       union {
+               u8 seq_no; /* for tx */
+               u8 control_byte1;
+       } __packed;
+       u8 pad0;
+       u8 pad1;
+} __packed __aligned(4);
+
+enum ath10k_ath10k_htc_msg_id {
+       ATH10K_HTC_MSG_READY_ID                = 1,
+       ATH10K_HTC_MSG_CONNECT_SERVICE_ID      = 2,
+       ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
+       ATH10K_HTC_MSG_SETUP_COMPLETE_ID       = 4,
+       ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID    = 5,
+       ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE   = 6
+};
+
+enum ath10k_htc_version {
+       ATH10K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
+       ATH10K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
+};
+
+enum ath10k_htc_conn_flags {
+       ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH    = 0x0,
+       ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF      = 0x1,
+       ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
+       ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY         = 0x3,
+#define ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK 0x3
+       ATH10K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE    = 1 << 2,
+       ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3
+#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_MASK 0xFF00
+#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB  8
+};
+
+enum ath10k_htc_conn_svc_status {
+       ATH10K_HTC_CONN_SVC_STATUS_SUCCESS      = 0,
+       ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND    = 1,
+       ATH10K_HTC_CONN_SVC_STATUS_FAILED       = 2,
+       ATH10K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
+       ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP   = 4
+};
+
+struct ath10k_ath10k_htc_msg_hdr {
+       __le16 message_id; /* @enum htc_message_id */
+} __packed;
+
+struct ath10k_htc_unknown {
+       u8 pad0;
+       u8 pad1;
+} __packed;
+
+struct ath10k_htc_ready {
+       __le16 credit_count;
+       __le16 credit_size;
+       u8 max_endpoints;
+       u8 pad0;
+} __packed;
+
+struct ath10k_htc_ready_extended {
+       struct ath10k_htc_ready base;
+       u8 htc_version; /* @enum ath10k_htc_version */
+       u8 max_msgs_per_htc_bundle;
+       u8 pad0;
+       u8 pad1;
+} __packed;
+
+struct ath10k_htc_conn_svc {
+       __le16 service_id;
+       __le16 flags; /* @enum ath10k_htc_conn_flags */
+       u8 pad0;
+       u8 pad1;
+} __packed;
+
+struct ath10k_htc_conn_svc_response {
+       __le16 service_id;
+       u8 status; /* @enum ath10k_htc_conn_svc_status */
+       u8 eid;
+       __le16 max_msg_size;
+} __packed;
+
+struct ath10k_htc_setup_complete_extended {
+       u8 pad0;
+       u8 pad1;
+       __le32 flags; /* @enum htc_setup_complete_flags */
+       u8 max_msgs_per_bundled_recv;
+       u8 pad2;
+       u8 pad3;
+       u8 pad4;
+} __packed;
+
+struct ath10k_htc_msg {
+       struct ath10k_ath10k_htc_msg_hdr hdr;
+       union {
+               /* host-to-target */
+               struct ath10k_htc_conn_svc connect_service;
+               struct ath10k_htc_ready ready;
+               struct ath10k_htc_ready_extended ready_ext;
+               struct ath10k_htc_unknown unknown;
+               struct ath10k_htc_setup_complete_extended setup_complete_ext;
+
+               /* target-to-host */
+               struct ath10k_htc_conn_svc_response connect_service_response;
+       };
+} __packed __aligned(4);
+
+enum ath10k_ath10k_htc_record_id {
+       ATH10K_HTC_RECORD_NULL    = 0,
+       ATH10K_HTC_RECORD_CREDITS = 1
+};
+
+struct ath10k_ath10k_htc_record_hdr {
+       u8 id; /* @enum ath10k_ath10k_htc_record_id */
+       u8 len;
+       u8 pad0;
+       u8 pad1;
+} __packed;
+
+struct ath10k_htc_credit_report {
+       u8 eid; /* @enum ath10k_htc_ep_id */
+       u8 credits;
+       u8 pad0;
+       u8 pad1;
+} __packed;
+
+struct ath10k_htc_record {
+       struct ath10k_ath10k_htc_record_hdr hdr;
+       union {
+               struct ath10k_htc_credit_report credit_report[0];
+               u8 pauload[0];
+       };
+} __packed __aligned(4);
+
+/*
+ * note: the trailer offset is dynamic depending
+ * on payload length. this is only a struct layout draft
+ */
+struct ath10k_htc_frame {
+       struct ath10k_htc_hdr hdr;
+       union {
+               struct ath10k_htc_msg msg;
+               u8 payload[0];
+       };
+       struct ath10k_htc_record trailer[0];
+} __packed __aligned(4);
+
+
+/*******************/
+/* Host-side stuff */
+/*******************/
+
+enum ath10k_htc_svc_gid {
+       ATH10K_HTC_SVC_GRP_RSVD = 0,
+       ATH10K_HTC_SVC_GRP_WMI = 1,
+       ATH10K_HTC_SVC_GRP_NMI = 2,
+       ATH10K_HTC_SVC_GRP_HTT = 3,
+
+       ATH10K_HTC_SVC_GRP_TEST = 254,
+       ATH10K_HTC_SVC_GRP_LAST = 255,
+};
+
+#define SVC(group, idx) \
+       (int)(((int)(group) << 8) | (int)(idx))
+
+enum ath10k_htc_svc_id {
+       /* NOTE: service ID of 0x0000 is reserved and should never be used */
+       ATH10K_HTC_SVC_ID_RESERVED      = 0x0000,
+       ATH10K_HTC_SVC_ID_UNUSED        = ATH10K_HTC_SVC_ID_RESERVED,
+
+       ATH10K_HTC_SVC_ID_RSVD_CTRL     = SVC(ATH10K_HTC_SVC_GRP_RSVD, 1),
+       ATH10K_HTC_SVC_ID_WMI_CONTROL   = SVC(ATH10K_HTC_SVC_GRP_WMI, 0),
+       ATH10K_HTC_SVC_ID_WMI_DATA_BE   = SVC(ATH10K_HTC_SVC_GRP_WMI, 1),
+       ATH10K_HTC_SVC_ID_WMI_DATA_BK   = SVC(ATH10K_HTC_SVC_GRP_WMI, 2),
+       ATH10K_HTC_SVC_ID_WMI_DATA_VI   = SVC(ATH10K_HTC_SVC_GRP_WMI, 3),
+       ATH10K_HTC_SVC_ID_WMI_DATA_VO   = SVC(ATH10K_HTC_SVC_GRP_WMI, 4),
+
+       ATH10K_HTC_SVC_ID_NMI_CONTROL   = SVC(ATH10K_HTC_SVC_GRP_NMI, 0),
+       ATH10K_HTC_SVC_ID_NMI_DATA      = SVC(ATH10K_HTC_SVC_GRP_NMI, 1),
+
+       ATH10K_HTC_SVC_ID_HTT_DATA_MSG  = SVC(ATH10K_HTC_SVC_GRP_HTT, 0),
+
+       /* raw stream service (i.e. flash, tcmd, calibration apps) */
+       ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0),
+};
+
+#undef SVC
+
+enum ath10k_htc_ep_id {
+       ATH10K_HTC_EP_UNUSED = -1,
+       ATH10K_HTC_EP_0 = 0,
+       ATH10K_HTC_EP_1 = 1,
+       ATH10K_HTC_EP_2,
+       ATH10K_HTC_EP_3,
+       ATH10K_HTC_EP_4,
+       ATH10K_HTC_EP_5,
+       ATH10K_HTC_EP_6,
+       ATH10K_HTC_EP_7,
+       ATH10K_HTC_EP_8,
+       ATH10K_HTC_EP_COUNT,
+};
+
+struct ath10k_htc_ops {
+       void (*target_send_suspend_complete)(struct ath10k *ar);
+};
+
+struct ath10k_htc_ep_ops {
+       void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
+       void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
+};
+
+/* service connection information */
+struct ath10k_htc_svc_conn_req {
+       u16 service_id;
+       struct ath10k_htc_ep_ops ep_ops;
+       int max_send_queue_depth;
+};
+
+/* service connection response information */
+struct ath10k_htc_svc_conn_resp {
+       u8 buffer_len;
+       u8 actual_len;
+       enum ath10k_htc_ep_id eid;
+       unsigned int max_msg_len;
+       u8 connect_resp_code;
+};
+
+#define ATH10K_NUM_CONTROL_TX_BUFFERS 2
+#define ATH10K_HTC_MAX_LEN 4096
+#define ATH10K_HTC_MAX_CTRL_MSG_LEN 256
+#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ)
+#define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \
+                                       sizeof(struct ath10k_htc_hdr))
+#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ)
+
+struct ath10k_htc_ep {
+       struct ath10k_htc *htc;
+       enum ath10k_htc_ep_id eid;
+       enum ath10k_htc_svc_id service_id;
+       struct ath10k_htc_ep_ops ep_ops;
+
+       int max_tx_queue_depth;
+       int max_ep_message_len;
+       u8 ul_pipe_id;
+       u8 dl_pipe_id;
+       int ul_is_polled; /* call HIF to get tx completions */
+       int dl_is_polled; /* call HIF to fetch rx (not implemented) */
+
+       struct sk_buff_head tx_queue;
+
+       u8 seq_no; /* for debugging */
+       int tx_credits;
+       int tx_credit_size;
+       int tx_credits_per_max_message;
+       bool tx_credit_flow_enabled;
+
+       struct work_struct send_work;
+};
+
+struct ath10k_htc_svc_tx_credits {
+       u16 service_id;
+       u8  credit_allocation;
+};
+
+struct ath10k_htc {
+       struct ath10k *ar;
+       struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
+
+       /* protects endpoint and stopping fields */
+       spinlock_t tx_lock;
+
+       struct ath10k_htc_ops htc_ops;
+
+       u8 control_resp_buffer[ATH10K_HTC_MAX_CTRL_MSG_LEN];
+       int control_resp_len;
+
+       struct completion ctl_resp;
+
+       int total_transmit_credits;
+       struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
+       int target_credit_size;
+
+       bool stopping;
+};
+
+struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
+                                    struct ath10k_htc_ops *htc_ops);
+int ath10k_htc_wait_target(struct ath10k_htc *htc);
+int ath10k_htc_start(struct ath10k_htc *htc);
+int ath10k_htc_connect_service(struct ath10k_htc *htc,
+                              struct ath10k_htc_svc_conn_req  *conn_req,
+                              struct ath10k_htc_svc_conn_resp *conn_resp);
+int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
+                   struct sk_buff *packet);
+void ath10k_htc_stop(struct ath10k_htc *htc);
+void ath10k_htc_destroy(struct ath10k_htc *htc);
+struct sk_buff *ath10k_htc_alloc_skb(int size);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
new file mode 100644 (file)
index 0000000..185a546
--- /dev/null
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/slab.h>
+
+#include "htt.h"
+#include "core.h"
+#include "debug.h"
+
+static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
+{
+       struct ath10k_htc_svc_conn_req conn_req;
+       struct ath10k_htc_svc_conn_resp conn_resp;
+       int status;
+
+       memset(&conn_req, 0, sizeof(conn_req));
+       memset(&conn_resp, 0, sizeof(conn_resp));
+
+       conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
+       conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler;
+
+       /* connect to control service */
+       conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
+
+       status = ath10k_htc_connect_service(htt->ar->htc, &conn_req,
+                                           &conn_resp);
+
+       if (status)
+               return status;
+
+       htt->eid = conn_resp.eid;
+
+       return 0;
+}
+
+struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
+{
+       struct ath10k_htt *htt;
+       int ret;
+
+       htt = kzalloc(sizeof(*htt), GFP_KERNEL);
+       if (!htt)
+               return NULL;
+
+       htt->ar = ar;
+       htt->max_throughput_mbps = 800;
+
+       /*
+        * Connect to HTC service.
+        * This has to be done before calling ath10k_htt_rx_attach,
+        * since ath10k_htt_rx_attach involves sending a rx ring configure
+        * message to the target.
+        */
+       if (ath10k_htt_htc_attach(htt))
+               goto err_htc_attach;
+
+       ret = ath10k_htt_tx_attach(htt);
+       if (ret) {
+               ath10k_err("could not attach htt tx (%d)\n", ret);
+               goto err_htc_attach;
+       }
+
+       if (ath10k_htt_rx_attach(htt))
+               goto err_rx_attach;
+
+       /*
+        * Prefetch enough data to satisfy target
+        * classification engine.
+        * This is for LL chips. HL chips will probably
+        * transfer all frame in the tx fragment.
+        */
+       htt->prefetch_len =
+               36 + /* 802.11 + qos + ht */
+               4 + /* 802.1q */
+               8 + /* llc snap */
+               2; /* ip4 dscp or ip6 priority */
+
+       return htt;
+
+err_rx_attach:
+       ath10k_htt_tx_detach(htt);
+err_htc_attach:
+       kfree(htt);
+       return NULL;
+}
+
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
+
+static int ath10k_htt_verify_version(struct ath10k_htt *htt)
+{
+       ath10k_dbg(ATH10K_DBG_HTT,
+                  "htt target version %d.%d; host version %d.%d\n",
+                   htt->target_version_major,
+                   htt->target_version_minor,
+                   HTT_CURRENT_VERSION_MAJOR,
+                   HTT_CURRENT_VERSION_MINOR);
+
+       if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) {
+               ath10k_err("htt major versions are incompatible!\n");
+               return -ENOTSUPP;
+       }
+
+       if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR)
+               ath10k_warn("htt minor version differ but still compatible\n");
+
+       return 0;
+}
+
+int ath10k_htt_attach_target(struct ath10k_htt *htt)
+{
+       int status;
+
+       init_completion(&htt->target_version_received);
+
+       status = ath10k_htt_h2t_ver_req_msg(htt);
+       if (status)
+               return status;
+
+       status = wait_for_completion_timeout(&htt->target_version_received,
+                                               HTT_TARGET_VERSION_TIMEOUT_HZ);
+       if (status <= 0) {
+               ath10k_warn("htt version request timed out\n");
+               return -ETIMEDOUT;
+       }
+
+       status = ath10k_htt_verify_version(htt);
+       if (status)
+               return status;
+
+       return ath10k_htt_send_rx_ring_cfg_ll(htt);
+}
+
+void ath10k_htt_detach(struct ath10k_htt *htt)
+{
+       ath10k_htt_rx_detach(htt);
+       ath10k_htt_tx_detach(htt);
+       kfree(htt);
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
new file mode 100644 (file)
index 0000000..a7a7aa0
--- /dev/null
@@ -0,0 +1,1338 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HTT_H_
+#define _HTT_H_
+
+#include <linux/bug.h>
+
+#include "core.h"
+#include "htc.h"
+#include "rx_desc.h"
+
+#define HTT_CURRENT_VERSION_MAJOR      2
+#define HTT_CURRENT_VERSION_MINOR      1
+
+enum htt_dbg_stats_type {
+       HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
+       HTT_DBG_STATS_RX_REORDER    = 1 << 1,
+       HTT_DBG_STATS_RX_RATE_INFO  = 1 << 2,
+       HTT_DBG_STATS_TX_PPDU_LOG   = 1 << 3,
+       HTT_DBG_STATS_TX_RATE_INFO  = 1 << 4,
+       /* bits 5-23 currently reserved */
+
+       HTT_DBG_NUM_STATS /* keep this last */
+};
+
+enum htt_h2t_msg_type { /* host-to-target */
+       HTT_H2T_MSG_TYPE_VERSION_REQ        = 0,
+       HTT_H2T_MSG_TYPE_TX_FRM             = 1,
+       HTT_H2T_MSG_TYPE_RX_RING_CFG        = 2,
+       HTT_H2T_MSG_TYPE_STATS_REQ          = 3,
+       HTT_H2T_MSG_TYPE_SYNC               = 4,
+       HTT_H2T_MSG_TYPE_AGGR_CFG           = 5,
+       HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
+       HTT_H2T_MSG_TYPE_MGMT_TX            = 7,
+
+       HTT_H2T_NUM_MSGS /* keep this last */
+};
+
+struct htt_cmd_hdr {
+       u8 msg_type;
+} __packed;
+
+struct htt_ver_req {
+       u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
+} __packed;
+
+/*
+ * HTT tx MSDU descriptor
+ *
+ * The HTT tx MSDU descriptor is created by the host HTT SW for each
+ * tx MSDU.  The HTT tx MSDU descriptor contains the information that
+ * the target firmware needs for the FW's tx processing, particularly
+ * for creating the HW msdu descriptor.
+ * The same HTT tx descriptor is used for HL and LL systems, though
+ * a few fields within the tx descriptor are used only by LL or
+ * only by HL.
+ * The HTT tx descriptor is defined in two manners: by a struct with
+ * bitfields, and by a series of [dword offset, bit mask, bit shift]
+ * definitions.
+ * The target should use the struct def, for simplicitly and clarity,
+ * but the host shall use the bit-mast + bit-shift defs, to be endian-
+ * neutral.  Specifically, the host shall use the get/set macros built
+ * around the mask + shift defs.
+ */
+struct htt_data_tx_desc_frag {
+       __le32 paddr;
+       __le32 len;
+} __packed;
+
+enum htt_data_tx_desc_flags0 {
+       HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
+       HTT_DATA_TX_DESC_FLAGS0_NO_AGGR         = 1 << 1,
+       HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT      = 1 << 2,
+       HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY     = 1 << 3,
+       HTT_DATA_TX_DESC_FLAGS0_RSVD0           = 1 << 4
+#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0
+#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5
+};
+
+enum htt_data_tx_desc_flags1 {
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB  0
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB  6
+       HTT_DATA_TX_DESC_FLAGS1_POSTPONED        = 1 << 11,
+       HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH    = 1 << 12,
+       HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13,
+       HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14,
+       HTT_DATA_TX_DESC_FLAGS1_RSVD1            = 1 << 15
+};
+
+enum htt_data_tx_ext_tid {
+       HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16,
+       HTT_DATA_TX_EXT_TID_MGMT                = 17,
+       HTT_DATA_TX_EXT_TID_INVALID             = 31
+};
+
+#define HTT_INVALID_PEERID 0xFFFF
+
+/*
+ * htt_data_tx_desc - used for data tx path
+ *
+ * Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1.
+ *       ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_
+ *                for special kinds of tids
+ *       postponed: only for HL hosts. indicates if this is a resend
+ *                  (HL hosts manage queues on the host )
+ *       more_in_batch: only for HL hosts. indicates if more packets are
+ *                      pending. this allows target to wait and aggregate
+ */
+struct htt_data_tx_desc {
+       u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
+       __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
+       __le16 len;
+       __le16 id;
+       __le32 frags_paddr;
+       __le32 peerid;
+       u8 prefetch[0]; /* start of frame, for FW classification engine */
+} __packed;
+
+enum htt_rx_ring_flags {
+       HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
+       HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
+       HTT_RX_RING_FLAGS_PPDU_START   = 1 << 2,
+       HTT_RX_RING_FLAGS_PPDU_END     = 1 << 3,
+       HTT_RX_RING_FLAGS_MPDU_START   = 1 << 4,
+       HTT_RX_RING_FLAGS_MPDU_END     = 1 << 5,
+       HTT_RX_RING_FLAGS_MSDU_START   = 1 << 6,
+       HTT_RX_RING_FLAGS_MSDU_END     = 1 << 7,
+       HTT_RX_RING_FLAGS_RX_ATTENTION = 1 << 8,
+       HTT_RX_RING_FLAGS_FRAG_INFO    = 1 << 9,
+       HTT_RX_RING_FLAGS_UNICAST_RX   = 1 << 10,
+       HTT_RX_RING_FLAGS_MULTICAST_RX = 1 << 11,
+       HTT_RX_RING_FLAGS_CTRL_RX      = 1 << 12,
+       HTT_RX_RING_FLAGS_MGMT_RX      = 1 << 13,
+       HTT_RX_RING_FLAGS_NULL_RX      = 1 << 14,
+       HTT_RX_RING_FLAGS_PHY_DATA_RX  = 1 << 15
+};
+
+struct htt_rx_ring_setup_ring {
+       __le32 fw_idx_shadow_reg_paddr;
+       __le32 rx_ring_base_paddr;
+       __le16 rx_ring_len; /* in 4-byte words */
+       __le16 rx_ring_bufsize; /* rx skb size - in bytes */
+       __le16 flags; /* %HTT_RX_RING_FLAGS_ */
+       __le16 fw_idx_init_val;
+
+       /* the following offsets are in 4-byte units */
+       __le16 mac80211_hdr_offset;
+       __le16 msdu_payload_offset;
+       __le16 ppdu_start_offset;
+       __le16 ppdu_end_offset;
+       __le16 mpdu_start_offset;
+       __le16 mpdu_end_offset;
+       __le16 msdu_start_offset;
+       __le16 msdu_end_offset;
+       __le16 rx_attention_offset;
+       __le16 frag_info_offset;
+} __packed;
+
+struct htt_rx_ring_setup_hdr {
+       u8 num_rings; /* supported values: 1, 2 */
+       __le16 rsvd0;
+} __packed;
+
+struct htt_rx_ring_setup {
+       struct htt_rx_ring_setup_hdr hdr;
+       struct htt_rx_ring_setup_ring rings[0];
+} __packed;
+
+/*
+ * htt_stats_req - request target to send specified statistics
+ *
+ * @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ
+ * @upload_types: see %htt_dbg_stats_type. this is 24bit field actually
+ *     so make sure its little-endian.
+ * @reset_types: see %htt_dbg_stats_type. this is 24bit field actually
+ *     so make sure its little-endian.
+ * @cfg_val: stat_type specific configuration
+ * @stat_type: see %htt_dbg_stats_type
+ * @cookie_lsb: used for confirmation message from target->host
+ * @cookie_msb: ditto as %cookie
+ */
+struct htt_stats_req {
+       u8 upload_types[3];
+       u8 rsvd0;
+       u8 reset_types[3];
+       struct {
+               u8 mpdu_bytes;
+               u8 mpdu_num_msdus;
+               u8 msdu_bytes;
+       } __packed;
+       u8 stat_type;
+       __le32 cookie_lsb;
+       __le32 cookie_msb;
+} __packed;
+
+#define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff
+
+/*
+ * htt_oob_sync_req - request out-of-band sync
+ *
+ * The HTT SYNC tells the target to suspend processing of subsequent
+ * HTT host-to-target messages until some other target agent locally
+ * informs the target HTT FW that the current sync counter is equal to
+ * or greater than (in a modulo sense) the sync counter specified in
+ * the SYNC message.
+ *
+ * This allows other host-target components to synchronize their operation
+ * with HTT, e.g. to ensure that tx frames don't get transmitted until a
+ * security key has been downloaded to and activated by the target.
+ * In the absence of any explicit synchronization counter value
+ * specification, the target HTT FW will use zero as the default current
+ * sync value.
+ *
+ * The HTT target FW will suspend its host->target message processing as long
+ * as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128.
+ */
+struct htt_oob_sync_req {
+       u8 sync_count;
+       __le16 rsvd0;
+} __packed;
+
+#define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_MASK 0x1F
+#define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_LSB  0
+
+struct htt_aggr_conf {
+       u8 max_num_ampdu_subframes;
+       union {
+               /* dont use bitfields; undefined behaviour */
+               u8 flags; /* see %HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_ */
+               u8 max_num_amsdu_subframes:5;
+       } __packed;
+} __packed;
+
+#define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
+
+struct htt_mgmt_tx_desc {
+       u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
+       __le32 msdu_paddr;
+       __le32 desc_id;
+       __le32 len;
+       __le32 vdev_id;
+       u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
+} __packed;
+
+enum htt_mgmt_tx_status {
+       HTT_MGMT_TX_STATUS_OK    = 0,
+       HTT_MGMT_TX_STATUS_RETRY = 1,
+       HTT_MGMT_TX_STATUS_DROP  = 2
+};
+
+/*=== target -> host messages ===============================================*/
+
+
+enum htt_t2h_msg_type {
+       HTT_T2H_MSG_TYPE_VERSION_CONF           = 0x0,
+       HTT_T2H_MSG_TYPE_RX_IND                 = 0x1,
+       HTT_T2H_MSG_TYPE_RX_FLUSH               = 0x2,
+       HTT_T2H_MSG_TYPE_PEER_MAP               = 0x3,
+       HTT_T2H_MSG_TYPE_PEER_UNMAP             = 0x4,
+       HTT_T2H_MSG_TYPE_RX_ADDBA               = 0x5,
+       HTT_T2H_MSG_TYPE_RX_DELBA               = 0x6,
+       HTT_T2H_MSG_TYPE_TX_COMPL_IND           = 0x7,
+       HTT_T2H_MSG_TYPE_PKTLOG                 = 0x8,
+       HTT_T2H_MSG_TYPE_STATS_CONF             = 0x9,
+       HTT_T2H_MSG_TYPE_RX_FRAG_IND            = 0xa,
+       HTT_T2H_MSG_TYPE_SEC_IND                = 0xb,
+       HTT_T2H_MSG_TYPE_RC_UPDATE_IND          = 0xc,
+       HTT_T2H_MSG_TYPE_TX_INSPECT_IND         = 0xd,
+       HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION     = 0xe,
+       HTT_T2H_MSG_TYPE_TEST,
+       /* keep this last */
+       HTT_T2H_NUM_MSGS
+};
+
+/*
+ * htt_resp_hdr - header for target-to-host messages
+ *
+ * msg_type: see htt_t2h_msg_type
+ */
+struct htt_resp_hdr {
+       u8 msg_type;
+} __packed;
+
+#define HTT_RESP_HDR_MSG_TYPE_OFFSET 0
+#define HTT_RESP_HDR_MSG_TYPE_MASK   0xff
+#define HTT_RESP_HDR_MSG_TYPE_LSB    0
+
+/* htt_ver_resp - response sent for htt_ver_req */
+struct htt_ver_resp {
+       u8 minor;
+       u8 major;
+       u8 rsvd0;
+} __packed;
+
+struct htt_mgmt_tx_completion {
+       u8 rsvd0;
+       u8 rsvd1;
+       u8 rsvd2;
+       __le32 desc_id;
+       __le32 status;
+} __packed;
+
+#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK  (0x3F)
+#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB   (0)
+#define HTT_RX_INDICATION_INFO0_FLUSH_VALID   (1 << 6)
+#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 7)
+
+#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK   0x0000003F
+#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB    0
+#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK     0x00000FC0
+#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB      6
+#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000
+#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB  12
+#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK   0x00FC0000
+#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB    18
+#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK     0xFF000000
+#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB      24
+
+struct htt_rx_indication_hdr {
+       u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
+       __le16 peer_id;
+       __le32 info1; /* %HTT_RX_INDICATION_INFO1_ */
+} __packed;
+
+#define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID    (1 << 0)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB  (1)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK  (1 << 5)
+#define HTT_RX_INDICATION_INFO0_END_VALID        (1 << 6)
+#define HTT_RX_INDICATION_INFO0_START_VALID      (1 << 7)
+
+#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK    0x00FFFFFF
+#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB     0
+#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000
+#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB  24
+
+#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF
+#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB  0
+#define HTT_RX_INDICATION_INFO2_SERVICE_MASK    0xFF000000
+#define HTT_RX_INDICATION_INFO2_SERVICE_LSB     24
+
+enum htt_rx_legacy_rate {
+       HTT_RX_OFDM_48 = 0,
+       HTT_RX_OFDM_24 = 1,
+       HTT_RX_OFDM_12,
+       HTT_RX_OFDM_6,
+       HTT_RX_OFDM_54,
+       HTT_RX_OFDM_36,
+       HTT_RX_OFDM_18,
+       HTT_RX_OFDM_9,
+
+       /* long preamble */
+       HTT_RX_CCK_11_LP = 0,
+       HTT_RX_CCK_5_5_LP = 1,
+       HTT_RX_CCK_2_LP,
+       HTT_RX_CCK_1_LP,
+       /* short preamble */
+       HTT_RX_CCK_11_SP,
+       HTT_RX_CCK_5_5_SP,
+       HTT_RX_CCK_2_SP
+};
+
+enum htt_rx_legacy_rate_type {
+       HTT_RX_LEGACY_RATE_OFDM = 0,
+       HTT_RX_LEGACY_RATE_CCK
+};
+
+enum htt_rx_preamble_type {
+       HTT_RX_LEGACY        = 0x4,
+       HTT_RX_HT            = 0x8,
+       HTT_RX_HT_WITH_TXBF  = 0x9,
+       HTT_RX_VHT           = 0xC,
+       HTT_RX_VHT_WITH_TXBF = 0xD,
+};
+
+/*
+ * Fields: phy_err_valid, phy_err_code, tsf,
+ * usec_timestamp, sub_usec_timestamp
+ * ..are valid only if end_valid == 1.
+ *
+ * Fields: rssi_chains, legacy_rate_type,
+ * legacy_rate_cck, preamble_type, service,
+ * vht_sig_*
+ * ..are valid only if start_valid == 1;
+ */
+struct htt_rx_indication_ppdu {
+       u8 combined_rssi;
+       u8 sub_usec_timestamp;
+       u8 phy_err_code;
+       u8 info0; /* HTT_RX_INDICATION_INFO0_ */
+       struct {
+               u8 pri20_db;
+               u8 ext20_db;
+               u8 ext40_db;
+               u8 ext80_db;
+       } __packed rssi_chains[4];
+       __le32 tsf;
+       __le32 usec_timestamp;
+       __le32 info1; /* HTT_RX_INDICATION_INFO1_ */
+       __le32 info2; /* HTT_RX_INDICATION_INFO2_ */
+} __packed;
+
+enum htt_rx_mpdu_status {
+       HTT_RX_IND_MPDU_STATUS_UNKNOWN = 0x0,
+       HTT_RX_IND_MPDU_STATUS_OK,
+       HTT_RX_IND_MPDU_STATUS_ERR_FCS,
+       HTT_RX_IND_MPDU_STATUS_ERR_DUP,
+       HTT_RX_IND_MPDU_STATUS_ERR_REPLAY,
+       HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER,
+       /* only accept EAPOL frames */
+       HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
+       HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
+       /* Non-data in promiscous mode */
+       HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
+       HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
+       HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
+       HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR,
+       HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR,
+       HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR,
+
+       /*
+        * MISC: discard for unspecified reasons.
+        * Leave this enum value last.
+        */
+       HTT_RX_IND_MPDU_STATUS_ERR_MISC = 0xFF
+};
+
+struct htt_rx_indication_mpdu_range {
+       u8 mpdu_count;
+       u8 mpdu_range_status; /* %htt_rx_mpdu_status */
+       u8 pad0;
+       u8 pad1;
+} __packed;
+
+struct htt_rx_indication_prefix {
+       __le16 fw_rx_desc_bytes;
+       u8 pad0;
+       u8 pad1;
+};
+
+struct htt_rx_indication {
+       struct htt_rx_indication_hdr hdr;
+       struct htt_rx_indication_ppdu ppdu;
+       struct htt_rx_indication_prefix prefix;
+
+       /*
+        * the following fields are both dynamically sized, so
+        * take care addressing them
+        */
+
+       /* the size of this is %fw_rx_desc_bytes */
+       struct fw_rx_desc_base fw_desc;
+
+       /*
+        * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4)
+        * and has %num_mpdu_ranges elements.
+        */
+       struct htt_rx_indication_mpdu_range mpdu_ranges[0];
+} __packed;
+
+static inline struct htt_rx_indication_mpdu_range *
+               htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
+{
+       void *ptr = rx_ind;
+
+       ptr += sizeof(rx_ind->hdr)
+            + sizeof(rx_ind->ppdu)
+            + sizeof(rx_ind->prefix)
+            + roundup(__le16_to_cpu(rx_ind->prefix.fw_rx_desc_bytes), 4);
+       return ptr;
+}
+
+enum htt_rx_flush_mpdu_status {
+       HTT_RX_FLUSH_MPDU_DISCARD = 0,
+       HTT_RX_FLUSH_MPDU_REORDER = 1,
+};
+
+/*
+ * htt_rx_flush - discard or reorder given range of mpdus
+ *
+ * Note: host must check if all sequence numbers between
+ *     [seq_num_start, seq_num_end-1] are valid.
+ */
+struct htt_rx_flush {
+       __le16 peer_id;
+       u8 tid;
+       u8 rsvd0;
+       u8 mpdu_status; /* %htt_rx_flush_mpdu_status */
+       u8 seq_num_start; /* it is 6 LSBs of 802.11 seq no */
+       u8 seq_num_end; /* it is 6 LSBs of 802.11 seq no */
+};
+
+struct htt_rx_peer_map {
+       u8 vdev_id;
+       __le16 peer_id;
+       u8 addr[6];
+       u8 rsvd0;
+       u8 rsvd1;
+} __packed;
+
+struct htt_rx_peer_unmap {
+       u8 rsvd0;
+       __le16 peer_id;
+} __packed;
+
+enum htt_security_types {
+       HTT_SECURITY_NONE,
+       HTT_SECURITY_WEP128,
+       HTT_SECURITY_WEP104,
+       HTT_SECURITY_WEP40,
+       HTT_SECURITY_TKIP,
+       HTT_SECURITY_TKIP_NOMIC,
+       HTT_SECURITY_AES_CCMP,
+       HTT_SECURITY_WAPI,
+
+       HTT_NUM_SECURITY_TYPES /* keep this last! */
+};
+
+enum htt_security_flags {
+#define HTT_SECURITY_TYPE_MASK 0x7F
+#define HTT_SECURITY_TYPE_LSB  0
+       HTT_SECURITY_IS_UNICAST = 1 << 7
+};
+
+struct htt_security_indication {
+       union {
+               /* dont use bitfields; undefined behaviour */
+               u8 flags; /* %htt_security_flags */
+               struct {
+                       u8 security_type:7, /* %htt_security_types */
+                          is_unicast:1;
+               } __packed;
+       } __packed;
+       __le16 peer_id;
+       u8 michael_key[8];
+       u8 wapi_rsc[16];
+} __packed;
+
+#define HTT_RX_BA_INFO0_TID_MASK     0x000F
+#define HTT_RX_BA_INFO0_TID_LSB      0
+#define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0
+#define HTT_RX_BA_INFO0_PEER_ID_LSB  4
+
+struct htt_rx_addba {
+       u8 window_size;
+       __le16 info0; /* %HTT_RX_BA_INFO0_ */
+} __packed;
+
+struct htt_rx_delba {
+       u8 rsvd0;
+       __le16 info0; /* %HTT_RX_BA_INFO0_ */
+} __packed;
+
+enum htt_data_tx_status {
+       HTT_DATA_TX_STATUS_OK            = 0,
+       HTT_DATA_TX_STATUS_DISCARD       = 1,
+       HTT_DATA_TX_STATUS_NO_ACK        = 2,
+       HTT_DATA_TX_STATUS_POSTPONE      = 3, /* HL only */
+       HTT_DATA_TX_STATUS_DOWNLOAD_FAIL = 128
+};
+
+enum htt_data_tx_flags {
+#define HTT_DATA_TX_STATUS_MASK 0x07
+#define HTT_DATA_TX_STATUS_LSB  0
+#define HTT_DATA_TX_TID_MASK    0x78
+#define HTT_DATA_TX_TID_LSB     3
+       HTT_DATA_TX_TID_INVALID = 1 << 7
+};
+
+#define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF
+
+struct htt_data_tx_completion {
+       union {
+               u8 flags;
+               struct {
+                       u8 status:3,
+                          tid:4,
+                          tid_invalid:1;
+               } __packed;
+       } __packed;
+       u8 num_msdus;
+       u8 rsvd0;
+       __le16 msdus[0]; /* variable length based on %num_msdus */
+} __packed;
+
+struct htt_tx_compl_ind_base {
+       u32 hdr;
+       u16 payload[1/*or more*/];
+} __packed;
+
+struct htt_rc_tx_done_params {
+       u32 rate_code;
+       u32 rate_code_flags;
+       u32 flags;
+       u32 num_enqued; /* 1 for non-AMPDU */
+       u32 num_retries;
+       u32 num_failed; /* for AMPDU */
+       u32 ack_rssi;
+       u32 time_stamp;
+       u32 is_probe;
+};
+
+struct htt_rc_update {
+       u8 vdev_id;
+       __le16 peer_id;
+       u8 addr[6];
+       u8 num_elems;
+       u8 rsvd0;
+       struct htt_rc_tx_done_params params[0]; /* variable length %num_elems */
+} __packed;
+
+/* see htt_rx_indication for similar fields and descriptions */
+struct htt_rx_fragment_indication {
+       union {
+               u8 info0; /* %HTT_RX_FRAG_IND_INFO0_ */
+               struct {
+                       u8 ext_tid:5,
+                          flush_valid:1;
+               } __packed;
+       } __packed;
+       __le16 peer_id;
+       __le32 info1; /* %HTT_RX_FRAG_IND_INFO1_ */
+       __le16 fw_rx_desc_bytes;
+       __le16 rsvd0;
+
+       u8 fw_msdu_rx_desc[0];
+} __packed;
+
+#define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK     0x1F
+#define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB      0
+#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20
+#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB  5
+
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB  0
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK   0x00000FC0
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB    6
+
+/*
+ * target -> host test message definition
+ *
+ * The following field definitions describe the format of the test
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header, followed by a variable
+ * number of 32-bit integer values, followed by a variable number
+ * of 8-bit character values.
+ *
+ * |31                         16|15           8|7            0|
+ * |-----------------------------------------------------------|
+ * |          num chars          |   num ints   |   msg type   |
+ * |-----------------------------------------------------------|
+ * |                           int 0                           |
+ * |-----------------------------------------------------------|
+ * |                           int 1                           |
+ * |-----------------------------------------------------------|
+ * |                            ...                            |
+ * |-----------------------------------------------------------|
+ * |    char 3    |    char 2    |    char 1    |    char 0    |
+ * |-----------------------------------------------------------|
+ * |              |              |      ...     |    char 4    |
+ * |-----------------------------------------------------------|
+ *   - MSG_TYPE
+ *     Bits 7:0
+ *     Purpose: identifies this as a test message
+ *     Value: HTT_MSG_TYPE_TEST
+ *   - NUM_INTS
+ *     Bits 15:8
+ *     Purpose: indicate how many 32-bit integers follow the message header
+ *   - NUM_CHARS
+ *     Bits 31:16
+ *     Purpose: indicate how many 8-bit charaters follow the series of integers
+ */
+struct htt_rx_test {
+       u8 num_ints;
+       __le16 num_chars;
+
+       /* payload consists of 2 lists:
+        *  a) num_ints * sizeof(__le32)
+        *  b) num_chars * sizeof(u8) aligned to 4bytes */
+       u8 payload[0];
+} __packed;
+
+static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test)
+{
+       return (__le32 *)rx_test->payload;
+}
+
+static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test)
+{
+       return rx_test->payload + (rx_test->num_ints * sizeof(__le32));
+}
+
+/*
+ * target -> host packet log message
+ *
+ * The following field definitions describe the format of the packet log
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header,followed by a variable number
+ * of 32-bit character values.
+ *
+ * |31          24|23          16|15           8|7            0|
+ * |-----------------------------------------------------------|
+ * |              |              |              |   msg type   |
+ * |-----------------------------------------------------------|
+ * |                        payload                            |
+ * |-----------------------------------------------------------|
+ *   - MSG_TYPE
+ *     Bits 7:0
+ *     Purpose: identifies this as a test message
+ *     Value: HTT_MSG_TYPE_PACKETLOG
+ */
+struct htt_pktlog_msg {
+       u8 pad[3];
+       __le32 payload[1 /* or more */];
+} __packed;
+
+struct htt_dbg_stats_rx_reorder_stats {
+       /* Non QoS MPDUs received */
+       __le32 deliver_non_qos;
+
+       /* MPDUs received in-order */
+       __le32 deliver_in_order;
+
+       /* Flush due to reorder timer expired */
+       __le32 deliver_flush_timeout;
+
+       /* Flush due to move out of window */
+       __le32 deliver_flush_oow;
+
+       /* Flush due to DELBA */
+       __le32 deliver_flush_delba;
+
+       /* MPDUs dropped due to FCS error */
+       __le32 fcs_error;
+
+       /* MPDUs dropped due to monitor mode non-data packet */
+       __le32 mgmt_ctrl;
+
+       /* MPDUs dropped due to invalid peer */
+       __le32 invalid_peer;
+
+       /* MPDUs dropped due to duplication (non aggregation) */
+       __le32 dup_non_aggr;
+
+       /* MPDUs dropped due to processed before */
+       __le32 dup_past;
+
+       /* MPDUs dropped due to duplicate in reorder queue */
+       __le32 dup_in_reorder;
+
+       /* Reorder timeout happened */
+       __le32 reorder_timeout;
+
+       /* invalid bar ssn */
+       __le32 invalid_bar_ssn;
+
+       /* reorder reset due to bar ssn */
+       __le32 ssn_reset;
+};
+
+struct htt_dbg_stats_wal_tx_stats {
+       /* Num HTT cookies queued to dispatch list */
+       __le32 comp_queued;
+
+       /* Num HTT cookies dispatched */
+       __le32 comp_delivered;
+
+       /* Num MSDU queued to WAL */
+       __le32 msdu_enqued;
+
+       /* Num MPDU queue to WAL */
+       __le32 mpdu_enqued;
+
+       /* Num MSDUs dropped by WMM limit */
+       __le32 wmm_drop;
+
+       /* Num Local frames queued */
+       __le32 local_enqued;
+
+       /* Num Local frames done */
+       __le32 local_freed;
+
+       /* Num queued to HW */
+       __le32 hw_queued;
+
+       /* Num PPDU reaped from HW */
+       __le32 hw_reaped;
+
+       /* Num underruns */
+       __le32 underrun;
+
+       /* Num PPDUs cleaned up in TX abort */
+       __le32 tx_abort;
+
+       /* Num MPDUs requed by SW */
+       __le32 mpdus_requed;
+
+       /* excessive retries */
+       __le32 tx_ko;
+
+       /* data hw rate code */
+       __le32 data_rc;
+
+       /* Scheduler self triggers */
+       __le32 self_triggers;
+
+       /* frames dropped due to excessive sw retries */
+       __le32 sw_retry_failure;
+
+       /* illegal rate phy errors  */
+       __le32 illgl_rate_phy_err;
+
+       /* wal pdev continous xretry */
+       __le32 pdev_cont_xretry;
+
+       /* wal pdev continous xretry */
+       __le32 pdev_tx_timeout;
+
+       /* wal pdev resets  */
+       __le32 pdev_resets;
+
+       __le32 phy_underrun;
+
+       /* MPDU is more than txop limit */
+       __le32 txop_ovf;
+} __packed;
+
+struct htt_dbg_stats_wal_rx_stats {
+       /* Cnts any change in ring routing mid-ppdu */
+       __le32 mid_ppdu_route_change;
+
+       /* Total number of statuses processed */
+       __le32 status_rcvd;
+
+       /* Extra frags on rings 0-3 */
+       __le32 r0_frags;
+       __le32 r1_frags;
+       __le32 r2_frags;
+       __le32 r3_frags;
+
+       /* MSDUs / MPDUs delivered to HTT */
+       __le32 htt_msdus;
+       __le32 htt_mpdus;
+
+       /* MSDUs / MPDUs delivered to local stack */
+       __le32 loc_msdus;
+       __le32 loc_mpdus;
+
+       /* AMSDUs that have more MSDUs than the status ring size */
+       __le32 oversize_amsdu;
+
+       /* Number of PHY errors */
+       __le32 phy_errs;
+
+       /* Number of PHY errors drops */
+       __le32 phy_err_drop;
+
+       /* Number of mpdu errors - FCS, MIC, ENC etc. */
+       __le32 mpdu_errs;
+} __packed;
+
+struct htt_dbg_stats_wal_peer_stats {
+       __le32 dummy; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
+} __packed;
+
+struct htt_dbg_stats_wal_pdev_txrx {
+       struct htt_dbg_stats_wal_tx_stats tx_stats;
+       struct htt_dbg_stats_wal_rx_stats rx_stats;
+       struct htt_dbg_stats_wal_peer_stats peer_stats;
+} __packed;
+
+struct htt_dbg_stats_rx_rate_info {
+       __le32 mcs[10];
+       __le32 sgi[10];
+       __le32 nss[4];
+       __le32 stbc[10];
+       __le32 bw[3];
+       __le32 pream[6];
+       __le32 ldpc;
+       __le32 txbf;
+};
+
+/*
+ * htt_dbg_stats_status -
+ * present -     The requested stats have been delivered in full.
+ *               This indicates that either the stats information was contained
+ *               in its entirety within this message, or else this message
+ *               completes the delivery of the requested stats info that was
+ *               partially delivered through earlier STATS_CONF messages.
+ * partial -     The requested stats have been delivered in part.
+ *               One or more subsequent STATS_CONF messages with the same
+ *               cookie value will be sent to deliver the remainder of the
+ *               information.
+ * error -       The requested stats could not be delivered, for example due
+ *               to a shortage of memory to construct a message holding the
+ *               requested stats.
+ * invalid -     The requested stat type is either not recognized, or the
+ *               target is configured to not gather the stats type in question.
+ * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ * series_done - This special value indicates that no further stats info
+ *               elements are present within a series of stats info elems
+ *               (within a stats upload confirmation message).
+ */
+enum htt_dbg_stats_status {
+       HTT_DBG_STATS_STATUS_PRESENT     = 0,
+       HTT_DBG_STATS_STATUS_PARTIAL     = 1,
+       HTT_DBG_STATS_STATUS_ERROR       = 2,
+       HTT_DBG_STATS_STATUS_INVALID     = 3,
+       HTT_DBG_STATS_STATUS_SERIES_DONE = 7
+};
+
+/*
+ * target -> host statistics upload
+ *
+ * The following field definitions describe the format of the HTT target
+ * to host stats upload confirmation message.
+ * The message contains a cookie echoed from the HTT host->target stats
+ * upload request, which identifies which request the confirmation is
+ * for, and a series of tag-length-value stats information elements.
+ * The tag-length header for each stats info element also includes a
+ * status field, to indicate whether the request for the stat type in
+ * question was fully met, partially met, unable to be met, or invalid
+ * (if the stat type in question is disabled in the target).
+ * A special value of all 1's in this status field is used to indicate
+ * the end of the series of stats info elements.
+ *
+ *
+ * |31                         16|15           8|7   5|4       0|
+ * |------------------------------------------------------------|
+ * |                  reserved                  |    msg type   |
+ * |------------------------------------------------------------|
+ * |                        cookie LSBs                         |
+ * |------------------------------------------------------------|
+ * |                        cookie MSBs                         |
+ * |------------------------------------------------------------|
+ * |      stats entry length     |   reserved   |  S  |stat type|
+ * |------------------------------------------------------------|
+ * |                                                            |
+ * |                  type-specific stats info                  |
+ * |                                                            |
+ * |------------------------------------------------------------|
+ * |      stats entry length     |   reserved   |  S  |stat type|
+ * |------------------------------------------------------------|
+ * |                                                            |
+ * |                  type-specific stats info                  |
+ * |                                                            |
+ * |------------------------------------------------------------|
+ * |              n/a            |   reserved   | 111 |   n/a   |
+ * |------------------------------------------------------------|
+ * Header fields:
+ *  - MSG_TYPE
+ *    Bits 7:0
+ *    Purpose: identifies this is a statistics upload confirmation message
+ *    Value: 0x9
+ *  - COOKIE_LSBS
+ *    Bits 31:0
+ *    Purpose: Provide a mechanism to match a target->host stats confirmation
+ *        message with its preceding host->target stats request message.
+ *    Value: LSBs of the opaque cookie specified by the host-side requestor
+ *  - COOKIE_MSBS
+ *    Bits 31:0
+ *    Purpose: Provide a mechanism to match a target->host stats confirmation
+ *        message with its preceding host->target stats request message.
+ *    Value: MSBs of the opaque cookie specified by the host-side requestor
+ *
+ * Stats Information Element tag-length header fields:
+ *  - STAT_TYPE
+ *    Bits 4:0
+ *    Purpose: identifies the type of statistics info held in the
+ *        following information element
+ *    Value: htt_dbg_stats_type
+ *  - STATUS
+ *    Bits 7:5
+ *    Purpose: indicate whether the requested stats are present
+ *    Value: htt_dbg_stats_status, including a special value (0x7) to mark
+ *        the completion of the stats entry series
+ *  - LENGTH
+ *    Bits 31:16
+ *    Purpose: indicate the stats information size
+ *    Value: This field specifies the number of bytes of stats information
+ *       that follows the element tag-length header.
+ *       It is expected but not required that this length is a multiple of
+ *       4 bytes.  Even if the length is not an integer multiple of 4, the
+ *       subsequent stats entry header will begin on a 4-byte aligned
+ *       boundary.
+ */
+
+#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_MASK 0x1F
+#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_LSB  0
+#define HTT_STATS_CONF_ITEM_INFO_STATUS_MASK    0xE0
+#define HTT_STATS_CONF_ITEM_INFO_STATUS_LSB     5
+
+struct htt_stats_conf_item {
+       union {
+               u8 info;
+               struct {
+                       u8 stat_type:5; /* %HTT_DBG_STATS_ */
+                       u8 status:3; /* %HTT_DBG_STATS_STATUS_ */
+               } __packed;
+       } __packed;
+       u8 pad;
+       __le16 length;
+       u8 payload[0]; /* roundup(length, 4) long */
+} __packed;
+
+struct htt_stats_conf {
+       u8 pad[3];
+       __le32 cookie_lsb;
+       __le32 cookie_msb;
+
+       /* each item has variable length! */
+       struct htt_stats_conf_item items[0];
+} __packed;
+
+static inline struct htt_stats_conf_item *htt_stats_conf_next_item(
+                                       const struct htt_stats_conf_item *item)
+{
+       return (void *)item + sizeof(*item) + roundup(item->length, 4);
+}
+/*
+ * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
+ *
+ * The following field definitions describe the format of the HTT host
+ * to target frag_desc/msdu_ext bank configuration message.
+ * The message contains the based address and the min and max id of the
+ * MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and
+ * MSDU_EXT/FRAG_DESC.
+ * HTT will use id in HTT descriptor instead sending the frag_desc_ptr.
+ * For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0
+ * the hardware does the mapping/translation.
+ *
+ * Total banks that can be configured is configured to 16.
+ *
+ * This should be called before any TX has be initiated by the HTT
+ *
+ * |31                         16|15           8|7   5|4       0|
+ * |------------------------------------------------------------|
+ * | DESC_SIZE    |  NUM_BANKS   | RES |SWP|pdev|    msg type   |
+ * |------------------------------------------------------------|
+ * |                     BANK0_BASE_ADDRESS                     |
+ * |------------------------------------------------------------|
+ * |                            ...                             |
+ * |------------------------------------------------------------|
+ * |                    BANK15_BASE_ADDRESS                     |
+ * |------------------------------------------------------------|
+ * |       BANK0_MAX_ID          |       BANK0_MIN_ID           |
+ * |------------------------------------------------------------|
+ * |                            ...                             |
+ * |------------------------------------------------------------|
+ * |       BANK15_MAX_ID         |       BANK15_MIN_ID          |
+ * |------------------------------------------------------------|
+ * Header fields:
+ *  - MSG_TYPE
+ *    Bits 7:0
+ *    Value: 0x6
+ *  - BANKx_BASE_ADDRESS
+ *    Bits 31:0
+ *    Purpose: Provide a mechanism to specify the base address of the MSDU_EXT
+ *         bank physical/bus address.
+ *  - BANKx_MIN_ID
+ *    Bits 15:0
+ *    Purpose: Provide a mechanism to specify the min index that needs to
+ *          mapped.
+ *  - BANKx_MAX_ID
+ *    Bits 31:16
+ *    Purpose: Provide a mechanism to specify the max index that needs to
+ *
+ */
+struct htt_frag_desc_bank_id {
+       __le16 bank_min_id;
+       __le16 bank_max_id;
+} __packed;
+
+/* real is 16 but it wouldn't fit in the max htt message size
+ * so we use a conservatively safe value for now */
+#define HTT_FRAG_DESC_BANK_MAX 4
+
+#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
+#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB  0
+#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP         (1 << 2)
+
+struct htt_frag_desc_bank_cfg {
+       u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
+       u8 num_banks;
+       u8 desc_size;
+       __le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
+       struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
+} __packed;
+
+union htt_rx_pn_t {
+       /* WEP: 24-bit PN */
+       u32 pn24;
+
+       /* TKIP or CCMP: 48-bit PN */
+       u_int64_t pn48;
+
+       /* WAPI: 128-bit PN */
+       u_int64_t pn128[2];
+};
+
+struct htt_cmd {
+       struct htt_cmd_hdr hdr;
+       union {
+               struct htt_ver_req ver_req;
+               struct htt_mgmt_tx_desc mgmt_tx;
+               struct htt_data_tx_desc data_tx;
+               struct htt_rx_ring_setup rx_setup;
+               struct htt_stats_req stats_req;
+               struct htt_oob_sync_req oob_sync_req;
+               struct htt_aggr_conf aggr_conf;
+               struct htt_frag_desc_bank_cfg frag_desc_bank_cfg;
+       };
+} __packed;
+
+struct htt_resp {
+       struct htt_resp_hdr hdr;
+       union {
+               struct htt_ver_resp ver_resp;
+               struct htt_mgmt_tx_completion mgmt_tx_completion;
+               struct htt_data_tx_completion data_tx_completion;
+               struct htt_rx_indication rx_ind;
+               struct htt_rx_fragment_indication rx_frag_ind;
+               struct htt_rx_peer_map peer_map;
+               struct htt_rx_peer_unmap peer_unmap;
+               struct htt_rx_flush rx_flush;
+               struct htt_rx_addba rx_addba;
+               struct htt_rx_delba rx_delba;
+               struct htt_security_indication security_indication;
+               struct htt_rc_update rc_update;
+               struct htt_rx_test rx_test;
+               struct htt_pktlog_msg pktlog_msg;
+               struct htt_stats_conf stats_conf;
+       };
+} __packed;
+
+
+/*** host side structures follow ***/
+
+struct htt_tx_done {
+       u32 msdu_id;
+       bool discard;
+       bool no_ack;
+};
+
+struct htt_peer_map_event {
+       u8 vdev_id;
+       u16 peer_id;
+       u8 addr[ETH_ALEN];
+};
+
+struct htt_peer_unmap_event {
+       u16 peer_id;
+};
+
+struct htt_rx_info {
+       struct sk_buff *skb;
+       enum htt_rx_mpdu_status status;
+       enum htt_rx_mpdu_encrypt_type encrypt_type;
+       s8 signal;
+       struct {
+               u8 info0;
+               u32 info1;
+               u32 info2;
+       } rate;
+       bool fcs_err;
+};
+
+struct ath10k_htt {
+       struct ath10k *ar;
+       enum ath10k_htc_ep_id eid;
+
+       int max_throughput_mbps;
+       u8 target_version_major;
+       u8 target_version_minor;
+       struct completion target_version_received;
+
+       struct {
+               /*
+                * Ring of network buffer objects - This ring is
+                * used exclusively by the host SW. This ring
+                * mirrors the dev_addrs_ring that is shared
+                * between the host SW and the MAC HW. The host SW
+                * uses this netbufs ring to locate the network
+                * buffer objects whose data buffers the HW has
+                * filled.
+                */
+               struct sk_buff **netbufs_ring;
+               /*
+                * Ring of buffer addresses -
+                * This ring holds the "physical" device address of the
+                * rx buffers the host SW provides for the MAC HW to
+                * fill.
+                */
+               __le32 *paddrs_ring;
+
+               /*
+                * Base address of ring, as a "physical" device address
+                * rather than a CPU address.
+                */
+               dma_addr_t base_paddr;
+
+               /* how many elems in the ring (power of 2) */
+               int size;
+
+               /* size - 1 */
+               unsigned size_mask;
+
+               /* how many rx buffers to keep in the ring */
+               int fill_level;
+
+               /* how many rx buffers (full+empty) are in the ring */
+               int fill_cnt;
+
+               /*
+                * alloc_idx - where HTT SW has deposited empty buffers
+                * This is allocated in consistent mem, so that the FW can
+                * read this variable, and program the HW's FW_IDX reg with
+                * the value of this shadow register.
+                */
+               struct {
+                       __le32 *vaddr;
+                       dma_addr_t paddr;
+               } alloc_idx;
+
+               /* where HTT SW has processed bufs filled by rx MAC DMA */
+               struct {
+                       unsigned msdu_payld;
+               } sw_rd_idx;
+
+               /*
+                * refill_retry_timer - timer triggered when the ring is
+                * not refilled to the level expected
+                */
+               struct timer_list refill_retry_timer;
+
+               /* Protects access to all rx ring buffer state variables */
+               spinlock_t lock;
+       } rx_ring;
+
+       unsigned int prefetch_len;
+
+       /* Protects access to %pending_tx, %used_msdu_ids */
+       spinlock_t tx_lock;
+       int max_num_pending_tx;
+       int num_pending_tx;
+       struct sk_buff **pending_tx;
+       unsigned long *used_msdu_ids; /* bitmap */
+       wait_queue_head_t empty_tx_wq;
+
+       /* set if host-fw communication goes haywire
+        * used to avoid further failures */
+       bool rx_confused;
+};
+
+#define RX_HTT_HDR_STATUS_LEN 64
+
+/* This structure layout is programmed via rx ring setup
+ * so that FW knows how to transfer the rx descriptor to the host.
+ * Buffers like this are placed on the rx ring. */
+struct htt_rx_desc {
+       union {
+               /* This field is filled on the host using the msdu buffer
+                * from htt_rx_indication */
+               struct fw_rx_desc_base fw_desc;
+               u32 pad;
+       } __packed;
+       struct {
+               struct rx_attention attention;
+               struct rx_frag_info frag_info;
+               struct rx_mpdu_start mpdu_start;
+               struct rx_msdu_start msdu_start;
+               struct rx_msdu_end msdu_end;
+               struct rx_mpdu_end mpdu_end;
+               struct rx_ppdu_start ppdu_start;
+               struct rx_ppdu_end ppdu_end;
+       } __packed;
+       u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
+       u8 msdu_payload[0];
+};
+
+#define HTT_RX_DESC_ALIGN 8
+
+#define HTT_MAC_ADDR_LEN 6
+
+/*
+ * FIX THIS
+ * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
+ * rounded up to a cache line size.
+ */
+#define HTT_RX_BUF_SIZE 1920
+#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
+
+/*
+ * DMA_MAP expects the buffer to be an integral number of cache lines.
+ * Rather than checking the actual cache line size, this code makes a
+ * conservative estimate of what the cache line size could be.
+ */
+#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
+#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
+
+struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar);
+int ath10k_htt_attach_target(struct ath10k_htt *htt);
+void ath10k_htt_detach(struct ath10k_htt *htt);
+
+int ath10k_htt_tx_attach(struct ath10k_htt *htt);
+void ath10k_htt_tx_detach(struct ath10k_htt *htt);
+int ath10k_htt_rx_attach(struct ath10k_htt *htt);
+void ath10k_htt_rx_detach(struct ath10k_htt *htt);
+void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
+int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
+
+void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt);
+void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
+int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
+int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
new file mode 100644 (file)
index 0000000..de058d7
--- /dev/null
@@ -0,0 +1,1167 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+#include "htt.h"
+#include "txrx.h"
+#include "debug.h"
+
+#include <linux/log2.h>
+
+/* slightly larger than one large A-MPDU */
+#define HTT_RX_RING_SIZE_MIN 128
+
+/* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
+#define HTT_RX_RING_SIZE_MAX 2048
+
+#define HTT_RX_AVG_FRM_BYTES 1000
+
+/* ms, very conservative */
+#define HTT_RX_HOST_LATENCY_MAX_MS 20
+
+/* ms, conservative */
+#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
+
+/* when under memory pressure rx ring refill may fail and needs a retry */
+#define HTT_RX_RING_REFILL_RETRY_MS 50
+
+static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
+{
+       int size;
+
+       /*
+        * It is expected that the host CPU will typically be able to
+        * service the rx indication from one A-MPDU before the rx
+        * indication from the subsequent A-MPDU happens, roughly 1-2 ms
+        * later. However, the rx ring should be sized very conservatively,
+        * to accomodate the worst reasonable delay before the host CPU
+        * services a rx indication interrupt.
+        *
+        * The rx ring need not be kept full of empty buffers. In theory,
+        * the htt host SW can dynamically track the low-water mark in the
+        * rx ring, and dynamically adjust the level to which the rx ring
+        * is filled with empty buffers, to dynamically meet the desired
+        * low-water mark.
+        *
+        * In contrast, it's difficult to resize the rx ring itself, once
+        * it's in use. Thus, the ring itself should be sized very
+        * conservatively, while the degree to which the ring is filled
+        * with empty buffers should be sized moderately conservatively.
+        */
+
+       /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
+       size =
+           htt->max_throughput_mbps +
+           1000  /
+           (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
+
+       if (size < HTT_RX_RING_SIZE_MIN)
+               size = HTT_RX_RING_SIZE_MIN;
+
+       if (size > HTT_RX_RING_SIZE_MAX)
+               size = HTT_RX_RING_SIZE_MAX;
+
+       size = roundup_pow_of_two(size);
+
+       return size;
+}
+
+static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
+{
+       int size;
+
+       /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
+       size =
+           htt->max_throughput_mbps *
+           1000  /
+           (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
+
+       /*
+        * Make sure the fill level is at least 1 less than the ring size.
+        * Leaving 1 element empty allows the SW to easily distinguish
+        * between a full ring vs. an empty ring.
+        */
+       if (size >= htt->rx_ring.size)
+               size = htt->rx_ring.size - 1;
+
+       return size;
+}
+
+static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
+{
+       struct sk_buff *skb;
+       struct ath10k_skb_cb *cb;
+       int i;
+
+       for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
+               skb = htt->rx_ring.netbufs_ring[i];
+               cb = ATH10K_SKB_CB(skb);
+               dma_unmap_single(htt->ar->dev, cb->paddr,
+                                skb->len + skb_tailroom(skb),
+                                DMA_FROM_DEVICE);
+               dev_kfree_skb_any(skb);
+       }
+
+       htt->rx_ring.fill_cnt = 0;
+}
+
+static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
+{
+       struct htt_rx_desc *rx_desc;
+       struct sk_buff *skb;
+       dma_addr_t paddr;
+       int ret = 0, idx;
+
+       idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr));
+       while (num > 0) {
+               skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
+               if (!skb) {
+                       ret = -ENOMEM;
+                       goto fail;
+               }
+
+               if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
+                       skb_pull(skb,
+                                PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
+                                skb->data);
+
+               /* Clear rx_desc attention word before posting to Rx ring */
+               rx_desc = (struct htt_rx_desc *)skb->data;
+               rx_desc->attention.flags = __cpu_to_le32(0);
+
+               paddr = dma_map_single(htt->ar->dev, skb->data,
+                                      skb->len + skb_tailroom(skb),
+                                      DMA_FROM_DEVICE);
+
+               if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
+                       dev_kfree_skb_any(skb);
+                       ret = -ENOMEM;
+                       goto fail;
+               }
+
+               ATH10K_SKB_CB(skb)->paddr = paddr;
+               htt->rx_ring.netbufs_ring[idx] = skb;
+               htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
+               htt->rx_ring.fill_cnt++;
+
+               num--;
+               idx++;
+               idx &= htt->rx_ring.size_mask;
+       }
+
+fail:
+       *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx);
+       return ret;
+}
+
+static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
+{
+       lockdep_assert_held(&htt->rx_ring.lock);
+       return __ath10k_htt_rx_ring_fill_n(htt, num);
+}
+
+static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
+{
+       int ret, num_to_fill;
+
+       spin_lock_bh(&htt->rx_ring.lock);
+       num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+       ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
+       if (ret == -ENOMEM) {
+               /*
+                * Failed to fill it to the desired level -
+                * we'll start a timer and try again next time.
+                * As long as enough buffers are left in the ring for
+                * another A-MPDU rx, no special recovery is needed.
+                */
+               mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
+                         msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
+       }
+       spin_unlock_bh(&htt->rx_ring.lock);
+}
+
+static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
+{
+       struct ath10k_htt *htt = (struct ath10k_htt *)arg;
+       ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
+static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt)
+{
+       return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) -
+               htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask;
+}
+
+void ath10k_htt_rx_detach(struct ath10k_htt *htt)
+{
+       int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
+
+       del_timer_sync(&htt->rx_ring.refill_retry_timer);
+
+       while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
+               struct sk_buff *skb =
+                               htt->rx_ring.netbufs_ring[sw_rd_idx];
+               struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+
+               dma_unmap_single(htt->ar->dev, cb->paddr,
+                                skb->len + skb_tailroom(skb),
+                                DMA_FROM_DEVICE);
+               dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
+               sw_rd_idx++;
+               sw_rd_idx &= htt->rx_ring.size_mask;
+       }
+
+       dma_free_coherent(htt->ar->dev,
+                         (htt->rx_ring.size *
+                          sizeof(htt->rx_ring.paddrs_ring)),
+                         htt->rx_ring.paddrs_ring,
+                         htt->rx_ring.base_paddr);
+
+       dma_free_coherent(htt->ar->dev,
+                         sizeof(*htt->rx_ring.alloc_idx.vaddr),
+                         htt->rx_ring.alloc_idx.vaddr,
+                         htt->rx_ring.alloc_idx.paddr);
+
+       kfree(htt->rx_ring.netbufs_ring);
+}
+
+static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
+{
+       int idx;
+       struct sk_buff *msdu;
+
+       spin_lock_bh(&htt->rx_ring.lock);
+
+       if (ath10k_htt_rx_ring_elems(htt) == 0)
+               ath10k_warn("htt rx ring is empty!\n");
+
+       idx = htt->rx_ring.sw_rd_idx.msdu_payld;
+       msdu = htt->rx_ring.netbufs_ring[idx];
+
+       idx++;
+       idx &= htt->rx_ring.size_mask;
+       htt->rx_ring.sw_rd_idx.msdu_payld = idx;
+       htt->rx_ring.fill_cnt--;
+
+       spin_unlock_bh(&htt->rx_ring.lock);
+       return msdu;
+}
+
+static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
+{
+       struct sk_buff *next;
+
+       while (skb) {
+               next = skb->next;
+               dev_kfree_skb_any(skb);
+               skb = next;
+       }
+}
+
+static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
+                                  u8 **fw_desc, int *fw_desc_len,
+                                  struct sk_buff **head_msdu,
+                                  struct sk_buff **tail_msdu)
+{
+       int msdu_len, msdu_chaining = 0;
+       struct sk_buff *msdu;
+       struct htt_rx_desc *rx_desc;
+
+       if (ath10k_htt_rx_ring_elems(htt) == 0)
+               ath10k_warn("htt rx ring is empty!\n");
+
+       if (htt->rx_confused) {
+               ath10k_warn("htt is confused. refusing rx\n");
+               return 0;
+       }
+
+       msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
+       while (msdu) {
+               int last_msdu, msdu_len_invalid, msdu_chained;
+
+               dma_unmap_single(htt->ar->dev,
+                                ATH10K_SKB_CB(msdu)->paddr,
+                                msdu->len + skb_tailroom(msdu),
+                                DMA_FROM_DEVICE);
+
+               ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
+                               msdu->data, msdu->len + skb_tailroom(msdu));
+
+               rx_desc = (struct htt_rx_desc *)msdu->data;
+
+               /* FIXME: we must report msdu payload since this is what caller
+                *        expects now */
+               skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
+               skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
+
+               /*
+                * Sanity check - confirm the HW is finished filling in the
+                * rx data.
+                * If the HW and SW are working correctly, then it's guaranteed
+                * that the HW's MAC DMA is done before this point in the SW.
+                * To prevent the case that we handle a stale Rx descriptor,
+                * just assert for now until we have a way to recover.
+                */
+               if (!(__le32_to_cpu(rx_desc->attention.flags)
+                               & RX_ATTENTION_FLAGS_MSDU_DONE)) {
+                       ath10k_htt_rx_free_msdu_chain(*head_msdu);
+                       *head_msdu = NULL;
+                       msdu = NULL;
+                       ath10k_err("htt rx stopped. cannot recover\n");
+                       htt->rx_confused = true;
+                       break;
+               }
+
+               /*
+                * Copy the FW rx descriptor for this MSDU from the rx
+                * indication message into the MSDU's netbuf. HL uses the
+                * same rx indication message definition as LL, and simply
+                * appends new info (fields from the HW rx desc, and the
+                * MSDU payload itself). So, the offset into the rx
+                * indication message only has to account for the standard
+                * offset of the per-MSDU FW rx desc info within the
+                * message, and how many bytes of the per-MSDU FW rx desc
+                * info have already been consumed. (And the endianness of
+                * the host, since for a big-endian host, the rx ind
+                * message contents, including the per-MSDU rx desc bytes,
+                * were byteswapped during upload.)
+                */
+               if (*fw_desc_len > 0) {
+                       rx_desc->fw_desc.info0 = **fw_desc;
+                       /*
+                        * The target is expected to only provide the basic
+                        * per-MSDU rx descriptors. Just to be sure, verify
+                        * that the target has not attached extension data
+                        * (e.g. LRO flow ID).
+                        */
+
+                       /* or more, if there's extension data */
+                       (*fw_desc)++;
+                       (*fw_desc_len)--;
+               } else {
+                       /*
+                        * When an oversized AMSDU happened, FW will lost
+                        * some of MSDU status - in this case, the FW
+                        * descriptors provided will be less than the
+                        * actual MSDUs inside this MPDU. Mark the FW
+                        * descriptors so that it will still deliver to
+                        * upper stack, if no CRC error for this MPDU.
+                        *
+                        * FIX THIS - the FW descriptors are actually for
+                        * MSDUs in the end of this A-MSDU instead of the
+                        * beginning.
+                        */
+                       rx_desc->fw_desc.info0 = 0;
+               }
+
+               msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
+                                       & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
+                                          RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
+               msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
+                             RX_MSDU_START_INFO0_MSDU_LENGTH);
+               msdu_chained = rx_desc->frag_info.ring2_more_count;
+
+               if (msdu_len_invalid)
+                       msdu_len = 0;
+
+               skb_trim(msdu, 0);
+               skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
+               msdu_len -= msdu->len;
+
+               /* FIXME: Do chained buffers include htt_rx_desc or not? */
+               while (msdu_chained--) {
+                       struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
+
+                       dma_unmap_single(htt->ar->dev,
+                                        ATH10K_SKB_CB(next)->paddr,
+                                        next->len + skb_tailroom(next),
+                                        DMA_FROM_DEVICE);
+
+                       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
+                                       next->data,
+                                       next->len + skb_tailroom(next));
+
+                       skb_trim(next, 0);
+                       skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
+                       msdu_len -= next->len;
+
+                       msdu->next = next;
+                       msdu = next;
+                       msdu_chaining = 1;
+               }
+
+               if (msdu_len > 0) {
+                       /* This may suggest FW bug? */
+                       ath10k_warn("htt rx msdu len not consumed (%d)\n",
+                                   msdu_len);
+               }
+
+               last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
+                               RX_MSDU_END_INFO0_LAST_MSDU;
+
+               if (last_msdu) {
+                       msdu->next = NULL;
+                       break;
+               } else {
+                       struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
+                       msdu->next = next;
+                       msdu = next;
+               }
+       }
+       *tail_msdu = msdu;
+
+       /*
+        * Don't refill the ring yet.
+        *
+        * First, the elements popped here are still in use - it is not
+        * safe to overwrite them until the matching call to
+        * mpdu_desc_list_next. Second, for efficiency it is preferable to
+        * refill the rx ring with 1 PPDU's worth of rx buffers (something
+        * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
+        * (something like 3 buffers). Consequently, we'll rely on the txrx
+        * SW to tell us when it is done pulling all the PPDU's rx buffers
+        * out of the rx ring, and then refill it just once.
+        */
+
+       return msdu_chaining;
+}
+
+int ath10k_htt_rx_attach(struct ath10k_htt *htt)
+{
+       dma_addr_t paddr;
+       void *vaddr;
+       struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
+
+       htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
+       if (!is_power_of_2(htt->rx_ring.size)) {
+               ath10k_warn("htt rx ring size is not power of 2\n");
+               return -EINVAL;
+       }
+
+       htt->rx_ring.size_mask = htt->rx_ring.size - 1;
+
+       /*
+        * Set the initial value for the level to which the rx ring
+        * should be filled, based on the max throughput and the
+        * worst likely latency for the host to fill the rx ring
+        * with new buffers. In theory, this fill level can be
+        * dynamically adjusted from the initial value set here, to
+        * reflect the actual host latency rather than a
+        * conservative assumption about the host latency.
+        */
+       htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
+
+       htt->rx_ring.netbufs_ring =
+               kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
+                       GFP_KERNEL);
+       if (!htt->rx_ring.netbufs_ring)
+               goto err_netbuf;
+
+       vaddr = dma_alloc_coherent(htt->ar->dev,
+                  (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)),
+                  &paddr, GFP_DMA);
+       if (!vaddr)
+               goto err_dma_ring;
+
+       htt->rx_ring.paddrs_ring = vaddr;
+       htt->rx_ring.base_paddr = paddr;
+
+       vaddr = dma_alloc_coherent(htt->ar->dev,
+                                  sizeof(*htt->rx_ring.alloc_idx.vaddr),
+                                  &paddr, GFP_DMA);
+       if (!vaddr)
+               goto err_dma_idx;
+
+       htt->rx_ring.alloc_idx.vaddr = vaddr;
+       htt->rx_ring.alloc_idx.paddr = paddr;
+       htt->rx_ring.sw_rd_idx.msdu_payld = 0;
+       *htt->rx_ring.alloc_idx.vaddr = 0;
+
+       /* Initialize the Rx refill retry timer */
+       setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
+
+       spin_lock_init(&htt->rx_ring.lock);
+
+       htt->rx_ring.fill_cnt = 0;
+       if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
+               goto err_fill_ring;
+
+       ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n",
+                  htt->rx_ring.size, htt->rx_ring.fill_level);
+       return 0;
+
+err_fill_ring:
+       ath10k_htt_rx_ring_free(htt);
+       dma_free_coherent(htt->ar->dev,
+                         sizeof(*htt->rx_ring.alloc_idx.vaddr),
+                         htt->rx_ring.alloc_idx.vaddr,
+                         htt->rx_ring.alloc_idx.paddr);
+err_dma_idx:
+       dma_free_coherent(htt->ar->dev,
+                         (htt->rx_ring.size *
+                          sizeof(htt->rx_ring.paddrs_ring)),
+                         htt->rx_ring.paddrs_ring,
+                         htt->rx_ring.base_paddr);
+err_dma_ring:
+       kfree(htt->rx_ring.netbufs_ring);
+err_netbuf:
+       return -ENOMEM;
+}
+
+static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
+{
+       switch (type) {
+       case HTT_RX_MPDU_ENCRYPT_WEP40:
+       case HTT_RX_MPDU_ENCRYPT_WEP104:
+               return 4;
+       case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+       case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
+       case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+       case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
+       case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+               return 8;
+       case HTT_RX_MPDU_ENCRYPT_NONE:
+               return 0;
+       }
+
+       ath10k_warn("unknown encryption type %d\n", type);
+       return 0;
+}
+
+static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
+{
+       switch (type) {
+       case HTT_RX_MPDU_ENCRYPT_NONE:
+       case HTT_RX_MPDU_ENCRYPT_WEP40:
+       case HTT_RX_MPDU_ENCRYPT_WEP104:
+       case HTT_RX_MPDU_ENCRYPT_WEP128:
+       case HTT_RX_MPDU_ENCRYPT_WAPI:
+               return 0;
+       case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+       case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+               return 4;
+       case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+               return 8;
+       }
+
+       ath10k_warn("unknown encryption type %d\n", type);
+       return 0;
+}
+
+/* Applies for first msdu in chain, before altering it. */
+static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
+{
+       struct htt_rx_desc *rxd;
+       enum rx_msdu_decap_format fmt;
+
+       rxd = (void *)skb->data - sizeof(*rxd);
+       fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
+                       RX_MSDU_START_INFO1_DECAP_FORMAT);
+
+       if (fmt == RX_MSDU_DECAP_RAW)
+               return (void *)skb->data;
+       else
+               return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
+}
+
+/* This function only applies for first msdu in an msdu chain */
+static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
+{
+       if (ieee80211_is_data_qos(hdr->frame_control)) {
+               u8 *qc = ieee80211_get_qos_ctl(hdr);
+               if (qc[0] & 0x80)
+                       return true;
+       }
+       return false;
+}
+
+static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
+                       struct htt_rx_info *info)
+{
+       struct htt_rx_desc *rxd;
+       struct sk_buff *amsdu;
+       struct sk_buff *first;
+       struct ieee80211_hdr *hdr;
+       struct sk_buff *skb = info->skb;
+       enum rx_msdu_decap_format fmt;
+       enum htt_rx_mpdu_encrypt_type enctype;
+       unsigned int hdr_len;
+       int crypto_len;
+
+       rxd = (void *)skb->data - sizeof(*rxd);
+       fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
+                       RX_MSDU_START_INFO1_DECAP_FORMAT);
+       enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+                       RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+       /* FIXME: No idea what assumptions are safe here. Need logs */
+       if ((fmt == RX_MSDU_DECAP_RAW && skb->next) ||
+           (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) {
+               ath10k_htt_rx_free_msdu_chain(skb->next);
+               skb->next = NULL;
+               return -ENOTSUPP;
+       }
+
+       /* A-MSDU max is a little less than 8K */
+       amsdu = dev_alloc_skb(8*1024);
+       if (!amsdu) {
+               ath10k_warn("A-MSDU allocation failed\n");
+               ath10k_htt_rx_free_msdu_chain(skb->next);
+               skb->next = NULL;
+               return -ENOMEM;
+       }
+
+       if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) {
+               int hdrlen;
+
+               hdr = (void *)rxd->rx_hdr_status;
+               hdrlen = ieee80211_hdrlen(hdr->frame_control);
+               memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen);
+       }
+
+       first = skb;
+       while (skb) {
+               void *decap_hdr;
+               int decap_len = 0;
+
+               rxd = (void *)skb->data - sizeof(*rxd);
+               fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
+                               RX_MSDU_START_INFO1_DECAP_FORMAT);
+               decap_hdr = (void *)rxd->rx_hdr_status;
+
+               if (skb == first) {
+                       /* We receive linked A-MSDU subframe skbuffs. The
+                        * first one contains the original 802.11 header (and
+                        * possible crypto param) in the RX descriptor. The
+                        * A-MSDU subframe header follows that. Each part is
+                        * aligned to 4 byte boundary. */
+
+                       hdr = (void *)amsdu->data;
+                       hdr_len = ieee80211_hdrlen(hdr->frame_control);
+                       crypto_len = ath10k_htt_rx_crypto_param_len(enctype);
+
+                       decap_hdr += roundup(hdr_len, 4);
+                       decap_hdr += roundup(crypto_len, 4);
+               }
+
+               if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
+                       /* Ethernet2 decap inserts ethernet header in place of
+                        * A-MSDU subframe header. */
+                       skb_pull(skb, 6 + 6 + 2);
+
+                       /* A-MSDU subframe header length */
+                       decap_len += 6 + 6 + 2;
+
+                       /* Ethernet2 decap also strips the LLC/SNAP so we need
+                        * to re-insert it. The LLC/SNAP follows A-MSDU
+                        * subframe header. */
+                       /* FIXME: Not all LLCs are 8 bytes long */
+                       decap_len += 8;
+
+                       memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
+               }
+
+               if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) {
+                       /* Native Wifi decap inserts regular 802.11 header
+                        * in place of A-MSDU subframe header. */
+                       hdr = (struct ieee80211_hdr *)skb->data;
+                       skb_pull(skb, ieee80211_hdrlen(hdr->frame_control));
+
+                       /* A-MSDU subframe header length */
+                       decap_len += 6 + 6 + 2;
+
+                       memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
+               }
+
+               if (fmt == RX_MSDU_DECAP_RAW)
+                       skb_trim(skb, skb->len - 4); /* remove FCS */
+
+               memcpy(skb_put(amsdu, skb->len), skb->data, skb->len);
+
+               /* A-MSDU subframes are padded to 4bytes
+                * but relative to first subframe, not the whole MPDU */
+               if (skb->next && ((decap_len + skb->len) & 3)) {
+                       int padlen = 4 - ((decap_len + skb->len) & 3);
+                       memset(skb_put(amsdu, padlen), 0, padlen);
+               }
+
+               skb = skb->next;
+       }
+
+       info->skb = amsdu;
+       info->encrypt_type = enctype;
+
+       ath10k_htt_rx_free_msdu_chain(first);
+
+       return 0;
+}
+
+static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
+{
+       struct sk_buff *skb = info->skb;
+       struct htt_rx_desc *rxd;
+       struct ieee80211_hdr *hdr;
+       enum rx_msdu_decap_format fmt;
+       enum htt_rx_mpdu_encrypt_type enctype;
+
+       /* This shouldn't happen. If it does than it may be a FW bug. */
+       if (skb->next) {
+               ath10k_warn("received chained non A-MSDU frame\n");
+               ath10k_htt_rx_free_msdu_chain(skb->next);
+               skb->next = NULL;
+       }
+
+       rxd = (void *)skb->data - sizeof(*rxd);
+       fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
+                       RX_MSDU_START_INFO1_DECAP_FORMAT);
+       enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+                       RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+       hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
+
+       switch (fmt) {
+       case RX_MSDU_DECAP_RAW:
+               /* remove trailing FCS */
+               skb_trim(skb, skb->len - 4);
+               break;
+       case RX_MSDU_DECAP_NATIVE_WIFI:
+               /* nothing to do here */
+               break;
+       case RX_MSDU_DECAP_ETHERNET2_DIX:
+               /* macaddr[6] + macaddr[6] + ethertype[2] */
+               skb_pull(skb, 6 + 6 + 2);
+               break;
+       case RX_MSDU_DECAP_8023_SNAP_LLC:
+               /* macaddr[6] + macaddr[6] + len[2] */
+               /* we don't need this for non-A-MSDU */
+               skb_pull(skb, 6 + 6 + 2);
+               break;
+       }
+
+       if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
+               void *llc;
+               int llclen;
+
+               llclen = 8;
+               llc  = hdr;
+               llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4);
+               llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
+
+               skb_push(skb, llclen);
+               memcpy(skb->data, llc, llclen);
+       }
+
+       if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) {
+               int len = ieee80211_hdrlen(hdr->frame_control);
+               skb_push(skb, len);
+               memcpy(skb->data, hdr, len);
+       }
+
+       info->skb = skb;
+       info->encrypt_type = enctype;
+       return 0;
+}
+
+static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
+{
+       struct htt_rx_desc *rxd;
+       u32 flags;
+
+       rxd = (void *)skb->data - sizeof(*rxd);
+       flags = __le32_to_cpu(rxd->attention.flags);
+
+       if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
+               return true;
+
+       return false;
+}
+
+static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
+{
+       struct htt_rx_desc *rxd;
+       u32 flags;
+
+       rxd = (void *)skb->data - sizeof(*rxd);
+       flags = __le32_to_cpu(rxd->attention.flags);
+
+       if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
+               return true;
+
+       return false;
+}
+
+static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
+                                 struct htt_rx_indication *rx)
+{
+       struct htt_rx_info info;
+       struct htt_rx_indication_mpdu_range *mpdu_ranges;
+       struct ieee80211_hdr *hdr;
+       int num_mpdu_ranges;
+       int fw_desc_len;
+       u8 *fw_desc;
+       int i, j;
+       int ret;
+
+       memset(&info, 0, sizeof(info));
+
+       fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
+       fw_desc = (u8 *)&rx->fw_desc;
+
+       num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
+                            HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
+       mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
+
+       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
+                       rx, sizeof(*rx) +
+                       (sizeof(struct htt_rx_indication_mpdu_range) *
+                               num_mpdu_ranges));
+
+       for (i = 0; i < num_mpdu_ranges; i++) {
+               info.status = mpdu_ranges[i].mpdu_range_status;
+
+               for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
+                       struct sk_buff *msdu_head, *msdu_tail;
+                       enum htt_rx_mpdu_status status;
+                       int msdu_chaining;
+
+                       msdu_head = NULL;
+                       msdu_tail = NULL;
+                       msdu_chaining = ath10k_htt_rx_amsdu_pop(htt,
+                                                        &fw_desc,
+                                                        &fw_desc_len,
+                                                        &msdu_head,
+                                                        &msdu_tail);
+
+                       if (!msdu_head) {
+                               ath10k_warn("htt rx no data!\n");
+                               continue;
+                       }
+
+                       if (msdu_head->len == 0) {
+                               ath10k_dbg(ATH10K_DBG_HTT,
+                                          "htt rx dropping due to zero-len\n");
+                               ath10k_htt_rx_free_msdu_chain(msdu_head);
+                               continue;
+                       }
+
+                       if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
+                               ath10k_htt_rx_free_msdu_chain(msdu_head);
+                               continue;
+                       }
+
+                       status = info.status;
+
+                       /* Skip mgmt frames while we handle this in WMI */
+                       if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) {
+                               ath10k_htt_rx_free_msdu_chain(msdu_head);
+                               continue;
+                       }
+
+                       if (status != HTT_RX_IND_MPDU_STATUS_OK &&
+                           status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
+                           !htt->ar->monitor_enabled) {
+                               ath10k_dbg(ATH10K_DBG_HTT,
+                                          "htt rx ignoring frame w/ status %d\n",
+                                          status);
+                               ath10k_htt_rx_free_msdu_chain(msdu_head);
+                               continue;
+                       }
+
+                       /* FIXME: we do not support chaining yet.
+                        * this needs investigation */
+                       if (msdu_chaining) {
+                               ath10k_warn("msdu_chaining is true\n");
+                               ath10k_htt_rx_free_msdu_chain(msdu_head);
+                               continue;
+                       }
+
+                       info.skb     = msdu_head;
+                       info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
+                       info.signal  = ATH10K_DEFAULT_NOISE_FLOOR;
+                       info.signal += rx->ppdu.combined_rssi;
+
+                       info.rate.info0 = rx->ppdu.info0;
+                       info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
+                       info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
+
+                       hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
+
+                       if (ath10k_htt_rx_hdr_is_amsdu(hdr))
+                               ret = ath10k_htt_rx_amsdu(htt, &info);
+                       else
+                               ret = ath10k_htt_rx_msdu(htt, &info);
+
+                       if (ret && !info.fcs_err) {
+                               ath10k_warn("error processing msdus %d\n", ret);
+                               dev_kfree_skb_any(info.skb);
+                               continue;
+                       }
+
+                       if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
+                               ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
+
+                       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
+                                       info.skb->data, info.skb->len);
+                       ath10k_process_rx(htt->ar, &info);
+               }
+       }
+
+       ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
+static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
+                               struct htt_rx_fragment_indication *frag)
+{
+       struct sk_buff *msdu_head, *msdu_tail;
+       struct htt_rx_desc *rxd;
+       enum rx_msdu_decap_format fmt;
+       struct htt_rx_info info = {};
+       struct ieee80211_hdr *hdr;
+       int msdu_chaining;
+       bool tkip_mic_err;
+       bool decrypt_err;
+       u8 *fw_desc;
+       int fw_desc_len, hdrlen, paramlen;
+       int trim;
+
+       fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
+       fw_desc = (u8 *)frag->fw_msdu_rx_desc;
+
+       msdu_head = NULL;
+       msdu_tail = NULL;
+       msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
+                                               &msdu_head, &msdu_tail);
+
+       ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
+
+       if (!msdu_head) {
+               ath10k_warn("htt rx frag no data\n");
+               return;
+       }
+
+       if (msdu_chaining || msdu_head != msdu_tail) {
+               ath10k_warn("aggregation with fragmentation?!\n");
+               ath10k_htt_rx_free_msdu_chain(msdu_head);
+               return;
+       }
+
+       /* FIXME: implement signal strength */
+
+       hdr = (struct ieee80211_hdr *)msdu_head->data;
+       rxd = (void *)msdu_head->data - sizeof(*rxd);
+       tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) &
+                               RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
+       decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) &
+                               RX_ATTENTION_FLAGS_DECRYPT_ERR);
+       fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
+                       RX_MSDU_START_INFO1_DECAP_FORMAT);
+
+       if (fmt != RX_MSDU_DECAP_RAW) {
+               ath10k_warn("we dont support non-raw fragmented rx yet\n");
+               dev_kfree_skb_any(msdu_head);
+               goto end;
+       }
+
+       info.skb = msdu_head;
+       info.status = HTT_RX_IND_MPDU_STATUS_OK;
+       info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+                               RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+       if (tkip_mic_err) {
+               ath10k_warn("tkip mic error\n");
+               info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
+       }
+
+       if (decrypt_err) {
+               ath10k_warn("decryption err in fragmented rx\n");
+               dev_kfree_skb_any(info.skb);
+               goto end;
+       }
+
+       if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
+               hdrlen = ieee80211_hdrlen(hdr->frame_control);
+               paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type);
+
+               /* It is more efficient to move the header than the payload */
+               memmove((void *)info.skb->data + paramlen,
+                       (void *)info.skb->data,
+                       hdrlen);
+               skb_pull(info.skb, paramlen);
+               hdr = (struct ieee80211_hdr *)info.skb->data;
+       }
+
+       /* remove trailing FCS */
+       trim  = 4;
+
+       /* remove crypto trailer */
+       trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type);
+
+       /* last fragment of TKIP frags has MIC */
+       if (!ieee80211_has_morefrags(hdr->frame_control) &&
+           info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+               trim += 8;
+
+       if (trim > info.skb->len) {
+               ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
+               dev_kfree_skb_any(info.skb);
+               goto end;
+       }
+
+       skb_trim(info.skb, info.skb->len - trim);
+
+       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ",
+                       info.skb->data, info.skb->len);
+       ath10k_process_rx(htt->ar, &info);
+
+end:
+       if (fw_desc_len > 0) {
+               ath10k_dbg(ATH10K_DBG_HTT,
+                          "expecting more fragmented rx in one indication %d\n",
+                          fw_desc_len);
+       }
+}
+
+void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct ath10k_htt *htt = ar->htt;
+       struct htt_resp *resp = (struct htt_resp *)skb->data;
+
+       /* confirm alignment */
+       if (!IS_ALIGNED((unsigned long)skb->data, 4))
+               ath10k_warn("unaligned htt message, expect trouble\n");
+
+       ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n",
+                  resp->hdr.msg_type);
+       switch (resp->hdr.msg_type) {
+       case HTT_T2H_MSG_TYPE_VERSION_CONF: {
+               htt->target_version_major = resp->ver_resp.major;
+               htt->target_version_minor = resp->ver_resp.minor;
+               complete(&htt->target_version_received);
+               break;
+       }
+       case HTT_T2H_MSG_TYPE_RX_IND: {
+               ath10k_htt_rx_handler(htt, &resp->rx_ind);
+               break;
+       }
+       case HTT_T2H_MSG_TYPE_PEER_MAP: {
+               struct htt_peer_map_event ev = {
+                       .vdev_id = resp->peer_map.vdev_id,
+                       .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
+               };
+               memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
+               ath10k_peer_map_event(htt, &ev);
+               break;
+       }
+       case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
+               struct htt_peer_unmap_event ev = {
+                       .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
+               };
+               ath10k_peer_unmap_event(htt, &ev);
+               break;
+       }
+       case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
+               struct htt_tx_done tx_done = {};
+               int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
+
+               tx_done.msdu_id =
+                       __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
+
+               switch (status) {
+               case HTT_MGMT_TX_STATUS_OK:
+                       break;
+               case HTT_MGMT_TX_STATUS_RETRY:
+                       tx_done.no_ack = true;
+                       break;
+               case HTT_MGMT_TX_STATUS_DROP:
+                       tx_done.discard = true;
+                       break;
+               }
+
+               ath10k_txrx_tx_completed(htt, &tx_done);
+               break;
+       }
+       case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
+               struct htt_tx_done tx_done = {};
+               int status = MS(resp->data_tx_completion.flags,
+                               HTT_DATA_TX_STATUS);
+               __le16 msdu_id;
+               int i;
+
+               switch (status) {
+               case HTT_DATA_TX_STATUS_NO_ACK:
+                       tx_done.no_ack = true;
+                       break;
+               case HTT_DATA_TX_STATUS_OK:
+                       break;
+               case HTT_DATA_TX_STATUS_DISCARD:
+               case HTT_DATA_TX_STATUS_POSTPONE:
+               case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
+                       tx_done.discard = true;
+                       break;
+               default:
+                       ath10k_warn("unhandled tx completion status %d\n",
+                                   status);
+                       tx_done.discard = true;
+                       break;
+               }
+
+               ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
+                          resp->data_tx_completion.num_msdus);
+
+               for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
+                       msdu_id = resp->data_tx_completion.msdus[i];
+                       tx_done.msdu_id = __le16_to_cpu(msdu_id);
+                       ath10k_txrx_tx_completed(htt, &tx_done);
+               }
+               break;
+       }
+       case HTT_T2H_MSG_TYPE_SEC_IND: {
+               struct ath10k *ar = htt->ar;
+               struct htt_security_indication *ev = &resp->security_indication;
+
+               ath10k_dbg(ATH10K_DBG_HTT,
+                          "sec ind peer_id %d unicast %d type %d\n",
+                         __le16_to_cpu(ev->peer_id),
+                         !!(ev->flags & HTT_SECURITY_IS_UNICAST),
+                         MS(ev->flags, HTT_SECURITY_TYPE));
+               complete(&ar->install_key_done);
+               break;
+       }
+       case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
+               ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
+                               skb->data, skb->len);
+               ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
+               break;
+       }
+       case HTT_T2H_MSG_TYPE_TEST:
+               /* FIX THIS */
+               break;
+       case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
+       case HTT_T2H_MSG_TYPE_STATS_CONF:
+       case HTT_T2H_MSG_TYPE_RX_ADDBA:
+       case HTT_T2H_MSG_TYPE_RX_DELBA:
+       case HTT_T2H_MSG_TYPE_RX_FLUSH:
+       default:
+               ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
+                          resp->hdr.msg_type);
+               ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
+                               skb->data, skb->len);
+               break;
+       };
+
+       /* Free the indication buffer */
+       dev_kfree_skb_any(skb);
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
new file mode 100644 (file)
index 0000000..ef79106
--- /dev/null
@@ -0,0 +1,510 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include "htt.h"
+#include "mac.h"
+#include "hif.h"
+#include "txrx.h"
+#include "debug.h"
+
+void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+{
+       htt->num_pending_tx--;
+       if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
+               ieee80211_wake_queues(htt->ar->hw);
+}
+
+static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+{
+       spin_lock_bh(&htt->tx_lock);
+       __ath10k_htt_tx_dec_pending(htt);
+       spin_unlock_bh(&htt->tx_lock);
+}
+
+static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
+{
+       int ret = 0;
+
+       spin_lock_bh(&htt->tx_lock);
+
+       if (htt->num_pending_tx >= htt->max_num_pending_tx) {
+               ret = -EBUSY;
+               goto exit;
+       }
+
+       htt->num_pending_tx++;
+       if (htt->num_pending_tx == htt->max_num_pending_tx)
+               ieee80211_stop_queues(htt->ar->hw);
+
+exit:
+       spin_unlock_bh(&htt->tx_lock);
+       return ret;
+}
+
+int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
+{
+       int msdu_id;
+
+       lockdep_assert_held(&htt->tx_lock);
+
+       msdu_id = find_first_zero_bit(htt->used_msdu_ids,
+                                     htt->max_num_pending_tx);
+       if (msdu_id == htt->max_num_pending_tx)
+               return -ENOBUFS;
+
+       ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
+       __set_bit(msdu_id, htt->used_msdu_ids);
+       return msdu_id;
+}
+
+void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
+{
+       lockdep_assert_held(&htt->tx_lock);
+
+       if (!test_bit(msdu_id, htt->used_msdu_ids))
+               ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id);
+
+       ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
+       __clear_bit(msdu_id, htt->used_msdu_ids);
+}
+
+int ath10k_htt_tx_attach(struct ath10k_htt *htt)
+{
+       u8 pipe;
+
+       spin_lock_init(&htt->tx_lock);
+       init_waitqueue_head(&htt->empty_tx_wq);
+
+       /* At the beginning free queue number should hint us the maximum
+        * queue length */
+       pipe = htt->ar->htc->endpoint[htt->eid].ul_pipe_id;
+       htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
+                                                                  pipe);
+
+       ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n",
+                  htt->max_num_pending_tx);
+
+       htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
+                                 htt->max_num_pending_tx, GFP_KERNEL);
+       if (!htt->pending_tx)
+               return -ENOMEM;
+
+       htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
+                                    BITS_TO_LONGS(htt->max_num_pending_tx),
+                                    GFP_KERNEL);
+       if (!htt->used_msdu_ids) {
+               kfree(htt->pending_tx);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
+{
+       struct sk_buff *txdesc;
+       int msdu_id;
+
+       /* No locks needed. Called after communication with the device has
+        * been stopped. */
+
+       for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
+               if (!test_bit(msdu_id, htt->used_msdu_ids))
+                       continue;
+
+               txdesc = htt->pending_tx[msdu_id];
+               if (!txdesc)
+                       continue;
+
+               ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
+                          msdu_id);
+
+               if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
+                       ATH10K_SKB_CB(txdesc)->htt.refcount = 1;
+
+               ATH10K_SKB_CB(txdesc)->htt.discard = true;
+               ath10k_txrx_tx_unref(htt, txdesc);
+       }
+}
+
+void ath10k_htt_tx_detach(struct ath10k_htt *htt)
+{
+       ath10k_htt_tx_cleanup_pending(htt);
+       kfree(htt->pending_tx);
+       kfree(htt->used_msdu_ids);
+       return;
+}
+
+void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+       struct ath10k_htt *htt = ar->htt;
+
+       if (skb_cb->htt.is_conf) {
+               dev_kfree_skb_any(skb);
+               return;
+       }
+
+       if (skb_cb->is_aborted) {
+               skb_cb->htt.discard = true;
+
+               /* if the skbuff is aborted we need to make sure we'll free up
+                * the tx resources, we can't simply run tx_unref() 2 times
+                * because if htt tx completion came in earlier we'd access
+                * unallocated memory */
+               if (skb_cb->htt.refcount > 1)
+                       skb_cb->htt.refcount = 1;
+       }
+
+       ath10k_txrx_tx_unref(htt, skb);
+}
+
+int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
+{
+       struct sk_buff *skb;
+       struct htt_cmd *cmd;
+       int len = 0;
+       int ret;
+
+       len += sizeof(cmd->hdr);
+       len += sizeof(cmd->ver_req);
+
+       skb = ath10k_htc_alloc_skb(len);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, len);
+       cmd = (struct htt_cmd *)skb->data;
+       cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
+
+       ATH10K_SKB_CB(skb)->htt.is_conf = true;
+
+       ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb);
+       if (ret) {
+               dev_kfree_skb_any(skb);
+               return ret;
+       }
+
+       return 0;
+}
+
+int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
+{
+       struct sk_buff *skb;
+       struct htt_cmd *cmd;
+       struct htt_rx_ring_setup_ring *ring;
+       const int num_rx_ring = 1;
+       u16 flags;
+       u32 fw_idx;
+       int len;
+       int ret;
+
+       /*
+        * the HW expects the buffer to be an integral number of 4-byte
+        * "words"
+        */
+       BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
+       BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
+
+       len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
+           + (sizeof(*ring) * num_rx_ring);
+       skb = ath10k_htc_alloc_skb(len);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, len);
+
+       cmd = (struct htt_cmd *)skb->data;
+       ring = &cmd->rx_setup.rings[0];
+
+       cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
+       cmd->rx_setup.hdr.num_rings = 1;
+
+       /* FIXME: do we need all of this? */
+       flags = 0;
+       flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
+       flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
+       flags |= HTT_RX_RING_FLAGS_PPDU_START;
+       flags |= HTT_RX_RING_FLAGS_PPDU_END;
+       flags |= HTT_RX_RING_FLAGS_MPDU_START;
+       flags |= HTT_RX_RING_FLAGS_MPDU_END;
+       flags |= HTT_RX_RING_FLAGS_MSDU_START;
+       flags |= HTT_RX_RING_FLAGS_MSDU_END;
+       flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
+       flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
+       flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
+       flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
+       flags |= HTT_RX_RING_FLAGS_CTRL_RX;
+       flags |= HTT_RX_RING_FLAGS_MGMT_RX;
+       flags |= HTT_RX_RING_FLAGS_NULL_RX;
+       flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
+
+       fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+       ring->fw_idx_shadow_reg_paddr =
+               __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
+       ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
+       ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
+       ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
+       ring->flags = __cpu_to_le16(flags);
+       ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
+
+#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+
+       ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+       ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+       ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+       ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+       ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+       ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+       ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+       ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+       ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+       ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+
+#undef desc_offset
+
+       ATH10K_SKB_CB(skb)->htt.is_conf = true;
+
+       ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb);
+       if (ret) {
+               dev_kfree_skb_any(skb);
+               return ret;
+       }
+
+       return 0;
+}
+
+int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
+{
+       struct device *dev = htt->ar->dev;
+       struct ath10k_skb_cb *skb_cb;
+       struct sk_buff *txdesc = NULL;
+       struct htt_cmd *cmd;
+       u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
+       int len = 0;
+       int msdu_id = -1;
+       int res;
+
+
+       res = ath10k_htt_tx_inc_pending(htt);
+       if (res)
+               return res;
+
+       len += sizeof(cmd->hdr);
+       len += sizeof(cmd->mgmt_tx);
+
+       txdesc = ath10k_htc_alloc_skb(len);
+       if (!txdesc) {
+               res = -ENOMEM;
+               goto err;
+       }
+
+       spin_lock_bh(&htt->tx_lock);
+       msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
+       if (msdu_id < 0) {
+               spin_unlock_bh(&htt->tx_lock);
+               res = msdu_id;
+               goto err;
+       }
+       htt->pending_tx[msdu_id] = txdesc;
+       spin_unlock_bh(&htt->tx_lock);
+
+       res = ath10k_skb_map(dev, msdu);
+       if (res)
+               goto err;
+
+       skb_put(txdesc, len);
+       cmd = (struct htt_cmd *)txdesc->data;
+       cmd->hdr.msg_type         = HTT_H2T_MSG_TYPE_MGMT_TX;
+       cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
+       cmd->mgmt_tx.len        = __cpu_to_le32(msdu->len);
+       cmd->mgmt_tx.desc_id    = __cpu_to_le32(msdu_id);
+       cmd->mgmt_tx.vdev_id    = __cpu_to_le32(vdev_id);
+       memcpy(cmd->mgmt_tx.hdr, msdu->data,
+              min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
+
+       /* refcount is decremented by HTC and HTT completions until it reaches
+        * zero and is freed */
+       skb_cb = ATH10K_SKB_CB(txdesc);
+       skb_cb->htt.msdu_id = msdu_id;
+       skb_cb->htt.refcount = 2;
+       skb_cb->htt.msdu = msdu;
+
+       res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc);
+       if (res)
+               goto err;
+
+       return 0;
+
+err:
+       ath10k_skb_unmap(dev, msdu);
+
+       if (txdesc)
+               dev_kfree_skb_any(txdesc);
+       if (msdu_id >= 0) {
+               spin_lock_bh(&htt->tx_lock);
+               htt->pending_tx[msdu_id] = NULL;
+               ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+               spin_unlock_bh(&htt->tx_lock);
+       }
+       ath10k_htt_tx_dec_pending(htt);
+       return res;
+}
+
+int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
+{
+       struct device *dev = htt->ar->dev;
+       struct htt_cmd *cmd;
+       struct htt_data_tx_desc_frag *tx_frags;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+       struct ath10k_skb_cb *skb_cb;
+       struct sk_buff *txdesc = NULL;
+       struct sk_buff *txfrag = NULL;
+       u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
+       u8 tid;
+       int prefetch_len, desc_len, frag_len;
+       dma_addr_t frags_paddr;
+       int msdu_id = -1;
+       int res;
+       u8 flags0;
+       u16 flags1;
+
+       res = ath10k_htt_tx_inc_pending(htt);
+       if (res)
+               return res;
+
+       prefetch_len = min(htt->prefetch_len, msdu->len);
+       prefetch_len = roundup(prefetch_len, 4);
+
+       desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
+       frag_len = sizeof(*tx_frags) * 2;
+
+       txdesc = ath10k_htc_alloc_skb(desc_len);
+       if (!txdesc) {
+               res = -ENOMEM;
+               goto err;
+       }
+
+       txfrag = dev_alloc_skb(frag_len);
+       if (!txfrag) {
+               res = -ENOMEM;
+               goto err;
+       }
+
+       if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
+               ath10k_warn("htt alignment check failed. dropping packet.\n");
+               res = -EIO;
+               goto err;
+       }
+
+       spin_lock_bh(&htt->tx_lock);
+       msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
+       if (msdu_id < 0) {
+               spin_unlock_bh(&htt->tx_lock);
+               res = msdu_id;
+               goto err;
+       }
+       htt->pending_tx[msdu_id] = txdesc;
+       spin_unlock_bh(&htt->tx_lock);
+
+       res = ath10k_skb_map(dev, msdu);
+       if (res)
+               goto err;
+
+       /* tx fragment list must be terminated with zero-entry */
+       skb_put(txfrag, frag_len);
+       tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data;
+       tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
+       tx_frags[0].len   = __cpu_to_le32(msdu->len);
+       tx_frags[1].paddr = __cpu_to_le32(0);
+       tx_frags[1].len   = __cpu_to_le32(0);
+
+       res = ath10k_skb_map(dev, txfrag);
+       if (res)
+               goto err;
+
+       ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n",
+                  (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr,
+                  (unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
+       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ",
+                       txfrag->data, frag_len);
+       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
+                       msdu->data, msdu->len);
+
+       skb_put(txdesc, desc_len);
+       cmd = (struct htt_cmd *)txdesc->data;
+       memset(cmd, 0, desc_len);
+
+       tid = ATH10K_SKB_CB(msdu)->htt.tid;
+
+       ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid);
+
+       flags0  = 0;
+       if (!ieee80211_has_protected(hdr->frame_control))
+               flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+       flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+       flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
+                    HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+
+       flags1  = 0;
+       flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
+       flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+
+       frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
+
+       cmd->hdr.msg_type        = HTT_H2T_MSG_TYPE_TX_FRM;
+       cmd->data_tx.flags0      = flags0;
+       cmd->data_tx.flags1      = __cpu_to_le16(flags1);
+       cmd->data_tx.len         = __cpu_to_le16(msdu->len);
+       cmd->data_tx.id          = __cpu_to_le16(msdu_id);
+       cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr);
+       cmd->data_tx.peerid      = __cpu_to_le32(HTT_INVALID_PEERID);
+
+       memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len);
+
+       /* refcount is decremented by HTC and HTT completions until it reaches
+        * zero and is freed */
+       skb_cb = ATH10K_SKB_CB(txdesc);
+       skb_cb->htt.msdu_id = msdu_id;
+       skb_cb->htt.refcount = 2;
+       skb_cb->htt.txfrag = txfrag;
+       skb_cb->htt.msdu = msdu;
+
+       res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc);
+       if (res)
+               goto err;
+
+       return 0;
+err:
+       if (txfrag)
+               ath10k_skb_unmap(dev, txfrag);
+       if (txdesc)
+               dev_kfree_skb_any(txdesc);
+       if (txfrag)
+               dev_kfree_skb_any(txfrag);
+       if (msdu_id >= 0) {
+               spin_lock_bh(&htt->tx_lock);
+               htt->pending_tx[msdu_id] = NULL;
+               ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+               spin_unlock_bh(&htt->tx_lock);
+       }
+       ath10k_htt_tx_dec_pending(htt);
+       ath10k_skb_unmap(dev, msdu);
+       return res;
+}
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
new file mode 100644 (file)
index 0000000..44ed5af
--- /dev/null
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HW_H_
+#define _HW_H_
+
+#include "targaddrs.h"
+
+/* Supported FW version */
+#define SUPPORTED_FW_MAJOR     1
+#define SUPPORTED_FW_MINOR     0
+#define SUPPORTED_FW_RELEASE   0
+#define SUPPORTED_FW_BUILD     629
+
+/* QCA988X 1.0 definitions */
+#define QCA988X_HW_1_0_VERSION         0x4000002c
+#define QCA988X_HW_1_0_FW_DIR          "ath10k/QCA988X/hw1.0"
+#define QCA988X_HW_1_0_FW_FILE         "firmware.bin"
+#define QCA988X_HW_1_0_OTP_FILE                "otp.bin"
+#define QCA988X_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA988X_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA988X 2.0 definitions */
+#define QCA988X_HW_2_0_VERSION         0x4100016c
+#define QCA988X_HW_2_0_FW_DIR          "ath10k/QCA988X/hw2.0"
+#define QCA988X_HW_2_0_FW_FILE         "firmware.bin"
+#define QCA988X_HW_2_0_OTP_FILE                "otp.bin"
+#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
+#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
+
+/* Known pecularities:
+ *  - current FW doesn't support raw rx mode (last tested v599)
+ *  - current FW dumps upon raw tx mode (last tested v599)
+ *  - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
+ *  - raw have FCS, nwifi doesn't
+ *  - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
+ *    param, llc/snap) are aligned to 4byte boundaries each */
+enum ath10k_hw_txrx_mode {
+       ATH10K_HW_TXRX_RAW = 0,
+       ATH10K_HW_TXRX_NATIVE_WIFI = 1,
+       ATH10K_HW_TXRX_ETHERNET = 2,
+};
+
+enum ath10k_mcast2ucast_mode {
+       ATH10K_MCAST2UCAST_DISABLED = 0,
+       ATH10K_MCAST2UCAST_ENABLED = 1,
+};
+
+#define TARGET_NUM_VDEVS                       8
+#define TARGET_NUM_PEER_AST                    2
+#define TARGET_NUM_WDS_ENTRIES                 32
+#define TARGET_DMA_BURST_SIZE                  0
+#define TARGET_MAC_AGGR_DELIM                  0
+#define TARGET_AST_SKID_LIMIT                  16
+#define TARGET_NUM_PEERS                       16
+#define TARGET_NUM_OFFLOAD_PEERS               0
+#define TARGET_NUM_OFFLOAD_REORDER_BUFS         0
+#define TARGET_NUM_PEER_KEYS                   2
+#define TARGET_NUM_TIDS                (2 * ((TARGET_NUM_PEERS) + (TARGET_NUM_VDEVS)))
+#define TARGET_TX_CHAIN_MASK                   (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_RX_CHAIN_MASK                   (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_RX_TIMEOUT_LO_PRI               100
+#define TARGET_RX_TIMEOUT_HI_PRI               40
+#define TARGET_RX_DECAP_MODE                   ATH10K_HW_TXRX_ETHERNET
+#define TARGET_SCAN_MAX_PENDING_REQS           4
+#define TARGET_BMISS_OFFLOAD_MAX_VDEV          3
+#define TARGET_ROAM_OFFLOAD_MAX_VDEV           3
+#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES    8
+#define TARGET_GTK_OFFLOAD_MAX_VDEV            3
+#define TARGET_NUM_MCAST_GROUPS                        0
+#define TARGET_NUM_MCAST_TABLE_ELEMS           0
+#define TARGET_MCAST2UCAST_MODE                        ATH10K_MCAST2UCAST_DISABLED
+#define TARGET_TX_DBG_LOG_SIZE                 1024
+#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 0
+#define TARGET_VOW_CONFIG                      0
+#define TARGET_NUM_MSDU_DESC                   (1024 + 400)
+#define TARGET_MAX_FRAG_ENTRIES                        0
+
+
+/* Number of Copy Engines supported */
+#define CE_COUNT 8
+
+/*
+ * Total number of PCIe MSI interrupts requested for all interrupt sources.
+ * PCIe standard forces this to be a power of 2.
+ * Some Host OS's limit MSI requests that can be granted to 8
+ * so for now we abide by this limit and avoid requesting more
+ * than that.
+ */
+#define MSI_NUM_REQUEST_LOG2   3
+#define MSI_NUM_REQUEST                (1<<MSI_NUM_REQUEST_LOG2)
+
+/*
+ * Granted MSIs are assigned as follows:
+ * Firmware uses the first
+ * Remaining MSIs, if any, are used by Copy Engines
+ * This mapping is known to both Target firmware and Host software.
+ * It may be changed as long as Host and Target are kept in sync.
+ */
+/* MSI for firmware (errors, etc.) */
+#define MSI_ASSIGN_FW          0
+
+/* MSIs for Copy Engines */
+#define MSI_ASSIGN_CE_INITIAL  1
+#define MSI_ASSIGN_CE_MAX      7
+
+/* as of IP3.7.1 */
+#define RTC_STATE_V_ON                         3
+
+#define RTC_STATE_COLD_RESET_MASK              0x00000400
+#define RTC_STATE_V_LSB                                0
+#define RTC_STATE_V_MASK                       0x00000007
+#define RTC_STATE_ADDRESS                      0x0000
+#define PCIE_SOC_WAKE_V_MASK                   0x00000001
+#define PCIE_SOC_WAKE_ADDRESS                  0x0004
+#define PCIE_SOC_WAKE_RESET                    0x00000000
+#define SOC_GLOBAL_RESET_ADDRESS               0x0008
+
+#define RTC_SOC_BASE_ADDRESS                   0x00004000
+#define RTC_WMAC_BASE_ADDRESS                  0x00005000
+#define MAC_COEX_BASE_ADDRESS                  0x00006000
+#define BT_COEX_BASE_ADDRESS                   0x00007000
+#define SOC_PCIE_BASE_ADDRESS                  0x00008000
+#define SOC_CORE_BASE_ADDRESS                  0x00009000
+#define WLAN_UART_BASE_ADDRESS                 0x0000c000
+#define WLAN_SI_BASE_ADDRESS                   0x00010000
+#define WLAN_GPIO_BASE_ADDRESS                 0x00014000
+#define WLAN_ANALOG_INTF_BASE_ADDRESS          0x0001c000
+#define WLAN_MAC_BASE_ADDRESS                  0x00020000
+#define EFUSE_BASE_ADDRESS                     0x00030000
+#define FPGA_REG_BASE_ADDRESS                  0x00039000
+#define WLAN_UART2_BASE_ADDRESS                        0x00054c00
+#define CE_WRAPPER_BASE_ADDRESS                        0x00057000
+#define CE0_BASE_ADDRESS                       0x00057400
+#define CE1_BASE_ADDRESS                       0x00057800
+#define CE2_BASE_ADDRESS                       0x00057c00
+#define CE3_BASE_ADDRESS                       0x00058000
+#define CE4_BASE_ADDRESS                       0x00058400
+#define CE5_BASE_ADDRESS                       0x00058800
+#define CE6_BASE_ADDRESS                       0x00058c00
+#define CE7_BASE_ADDRESS                       0x00059000
+#define DBI_BASE_ADDRESS                       0x00060000
+#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS     0x0006c000
+#define PCIE_LOCAL_BASE_ADDRESS                        0x00080000
+
+#define SOC_RESET_CONTROL_OFFSET               0x00000000
+#define SOC_RESET_CONTROL_SI0_RST_MASK         0x00000001
+#define SOC_CPU_CLOCK_OFFSET                   0x00000020
+#define SOC_CPU_CLOCK_STANDARD_LSB             0
+#define SOC_CPU_CLOCK_STANDARD_MASK            0x00000003
+#define SOC_CLOCK_CONTROL_OFFSET               0x00000028
+#define SOC_CLOCK_CONTROL_SI0_CLK_MASK         0x00000001
+#define SOC_SYSTEM_SLEEP_OFFSET                        0x000000c4
+#define SOC_LPO_CAL_OFFSET                     0x000000e0
+#define SOC_LPO_CAL_ENABLE_LSB                 20
+#define SOC_LPO_CAL_ENABLE_MASK                        0x00100000
+
+#define WLAN_RESET_CONTROL_COLD_RST_MASK       0x00000008
+#define WLAN_RESET_CONTROL_WARM_RST_MASK       0x00000004
+#define WLAN_SYSTEM_SLEEP_DISABLE_LSB          0
+#define WLAN_SYSTEM_SLEEP_DISABLE_MASK         0x00000001
+
+#define WLAN_GPIO_PIN0_ADDRESS                 0x00000028
+#define WLAN_GPIO_PIN0_CONFIG_MASK             0x00007800
+#define WLAN_GPIO_PIN1_ADDRESS                 0x0000002c
+#define WLAN_GPIO_PIN1_CONFIG_MASK             0x00007800
+#define WLAN_GPIO_PIN10_ADDRESS                        0x00000050
+#define WLAN_GPIO_PIN11_ADDRESS                        0x00000054
+#define WLAN_GPIO_PIN12_ADDRESS                        0x00000058
+#define WLAN_GPIO_PIN13_ADDRESS                        0x0000005c
+
+#define CLOCK_GPIO_OFFSET                      0xffffffff
+#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB           0
+#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK          0
+
+#define SI_CONFIG_OFFSET                       0x00000000
+#define SI_CONFIG_BIDIR_OD_DATA_LSB            18
+#define SI_CONFIG_BIDIR_OD_DATA_MASK           0x00040000
+#define SI_CONFIG_I2C_LSB                      16
+#define SI_CONFIG_I2C_MASK                     0x00010000
+#define SI_CONFIG_POS_SAMPLE_LSB               7
+#define SI_CONFIG_POS_SAMPLE_MASK              0x00000080
+#define SI_CONFIG_INACTIVE_DATA_LSB            5
+#define SI_CONFIG_INACTIVE_DATA_MASK           0x00000020
+#define SI_CONFIG_INACTIVE_CLK_LSB             4
+#define SI_CONFIG_INACTIVE_CLK_MASK            0x00000010
+#define SI_CONFIG_DIVIDER_LSB                  0
+#define SI_CONFIG_DIVIDER_MASK                 0x0000000f
+#define SI_CS_OFFSET                           0x00000004
+#define SI_CS_DONE_ERR_MASK                    0x00000400
+#define SI_CS_DONE_INT_MASK                    0x00000200
+#define SI_CS_START_LSB                                8
+#define SI_CS_START_MASK                       0x00000100
+#define SI_CS_RX_CNT_LSB                       4
+#define SI_CS_RX_CNT_MASK                      0x000000f0
+#define SI_CS_TX_CNT_LSB                       0
+#define SI_CS_TX_CNT_MASK                      0x0000000f
+
+#define SI_TX_DATA0_OFFSET                     0x00000008
+#define SI_TX_DATA1_OFFSET                     0x0000000c
+#define SI_RX_DATA0_OFFSET                     0x00000010
+#define SI_RX_DATA1_OFFSET                     0x00000014
+
+#define CORE_CTRL_CPU_INTR_MASK                        0x00002000
+#define CORE_CTRL_ADDRESS                      0x0000
+#define PCIE_INTR_ENABLE_ADDRESS               0x0008
+#define PCIE_INTR_CLR_ADDRESS                  0x0014
+#define SCRATCH_3_ADDRESS                      0x0030
+
+/* Firmware indications to the Host via SCRATCH_3 register. */
+#define FW_INDICATOR_ADDRESS   (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
+#define FW_IND_EVENT_PENDING                   1
+#define FW_IND_INITIALIZED                     2
+
+/* HOST_REG interrupt from firmware */
+#define PCIE_INTR_FIRMWARE_MASK                        0x00000400
+#define PCIE_INTR_CE_MASK_ALL                  0x0007f800
+
+#define DRAM_BASE_ADDRESS                      0x00400000
+
+#define MISSING 0
+
+#define SYSTEM_SLEEP_OFFSET                    SOC_SYSTEM_SLEEP_OFFSET
+#define WLAN_SYSTEM_SLEEP_OFFSET               SOC_SYSTEM_SLEEP_OFFSET
+#define WLAN_RESET_CONTROL_OFFSET              SOC_RESET_CONTROL_OFFSET
+#define CLOCK_CONTROL_OFFSET                   SOC_CLOCK_CONTROL_OFFSET
+#define CLOCK_CONTROL_SI0_CLK_MASK             SOC_CLOCK_CONTROL_SI0_CLK_MASK
+#define RESET_CONTROL_MBOX_RST_MASK            MISSING
+#define RESET_CONTROL_SI0_RST_MASK             SOC_RESET_CONTROL_SI0_RST_MASK
+#define GPIO_BASE_ADDRESS                      WLAN_GPIO_BASE_ADDRESS
+#define GPIO_PIN0_OFFSET                       WLAN_GPIO_PIN0_ADDRESS
+#define GPIO_PIN1_OFFSET                       WLAN_GPIO_PIN1_ADDRESS
+#define GPIO_PIN0_CONFIG_MASK                  WLAN_GPIO_PIN0_CONFIG_MASK
+#define GPIO_PIN1_CONFIG_MASK                  WLAN_GPIO_PIN1_CONFIG_MASK
+#define SI_BASE_ADDRESS                                WLAN_SI_BASE_ADDRESS
+#define SCRATCH_BASE_ADDRESS                   SOC_CORE_BASE_ADDRESS
+#define LOCAL_SCRATCH_OFFSET                   0x18
+#define CPU_CLOCK_OFFSET                       SOC_CPU_CLOCK_OFFSET
+#define LPO_CAL_OFFSET                         SOC_LPO_CAL_OFFSET
+#define GPIO_PIN10_OFFSET                      WLAN_GPIO_PIN10_ADDRESS
+#define GPIO_PIN11_OFFSET                      WLAN_GPIO_PIN11_ADDRESS
+#define GPIO_PIN12_OFFSET                      WLAN_GPIO_PIN12_ADDRESS
+#define GPIO_PIN13_OFFSET                      WLAN_GPIO_PIN13_ADDRESS
+#define CPU_CLOCK_STANDARD_LSB                 SOC_CPU_CLOCK_STANDARD_LSB
+#define CPU_CLOCK_STANDARD_MASK                        SOC_CPU_CLOCK_STANDARD_MASK
+#define LPO_CAL_ENABLE_LSB                     SOC_LPO_CAL_ENABLE_LSB
+#define LPO_CAL_ENABLE_MASK                    SOC_LPO_CAL_ENABLE_MASK
+#define ANALOG_INTF_BASE_ADDRESS               WLAN_ANALOG_INTF_BASE_ADDRESS
+#define MBOX_BASE_ADDRESS                      MISSING
+#define INT_STATUS_ENABLE_ERROR_LSB            MISSING
+#define INT_STATUS_ENABLE_ERROR_MASK           MISSING
+#define INT_STATUS_ENABLE_CPU_LSB              MISSING
+#define INT_STATUS_ENABLE_CPU_MASK             MISSING
+#define INT_STATUS_ENABLE_COUNTER_LSB          MISSING
+#define INT_STATUS_ENABLE_COUNTER_MASK         MISSING
+#define INT_STATUS_ENABLE_MBOX_DATA_LSB                MISSING
+#define INT_STATUS_ENABLE_MBOX_DATA_MASK       MISSING
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB   MISSING
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK  MISSING
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB    MISSING
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK   MISSING
+#define COUNTER_INT_STATUS_ENABLE_BIT_LSB      MISSING
+#define COUNTER_INT_STATUS_ENABLE_BIT_MASK     MISSING
+#define INT_STATUS_ENABLE_ADDRESS              MISSING
+#define CPU_INT_STATUS_ENABLE_BIT_LSB          MISSING
+#define CPU_INT_STATUS_ENABLE_BIT_MASK         MISSING
+#define HOST_INT_STATUS_ADDRESS                        MISSING
+#define CPU_INT_STATUS_ADDRESS                 MISSING
+#define ERROR_INT_STATUS_ADDRESS               MISSING
+#define ERROR_INT_STATUS_WAKEUP_MASK           MISSING
+#define ERROR_INT_STATUS_WAKEUP_LSB            MISSING
+#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK     MISSING
+#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB      MISSING
+#define ERROR_INT_STATUS_TX_OVERFLOW_MASK      MISSING
+#define ERROR_INT_STATUS_TX_OVERFLOW_LSB       MISSING
+#define COUNT_DEC_ADDRESS                      MISSING
+#define HOST_INT_STATUS_CPU_MASK               MISSING
+#define HOST_INT_STATUS_CPU_LSB                        MISSING
+#define HOST_INT_STATUS_ERROR_MASK             MISSING
+#define HOST_INT_STATUS_ERROR_LSB              MISSING
+#define HOST_INT_STATUS_COUNTER_MASK           MISSING
+#define HOST_INT_STATUS_COUNTER_LSB            MISSING
+#define RX_LOOKAHEAD_VALID_ADDRESS             MISSING
+#define WINDOW_DATA_ADDRESS                    MISSING
+#define WINDOW_READ_ADDR_ADDRESS               MISSING
+#define WINDOW_WRITE_ADDR_ADDRESS              MISSING
+
+#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
+
+#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
new file mode 100644 (file)
index 0000000..1285554
--- /dev/null
@@ -0,0 +1,3066 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "htt.h"
+#include "txrx.h"
+
+/**********/
+/* Crypto */
+/**********/
+
+static int ath10k_send_key(struct ath10k_vif *arvif,
+                          struct ieee80211_key_conf *key,
+                          enum set_key_cmd cmd,
+                          const u8 *macaddr)
+{
+       struct wmi_vdev_install_key_arg arg = {
+               .vdev_id = arvif->vdev_id,
+               .key_idx = key->keyidx,
+               .key_len = key->keylen,
+               .key_data = key->key,
+               .macaddr = macaddr,
+       };
+
+       if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+               arg.key_flags = WMI_KEY_PAIRWISE;
+       else
+               arg.key_flags = WMI_KEY_GROUP;
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+               arg.key_cipher = WMI_CIPHER_AES_CCM;
+               key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+               break;
+       case WLAN_CIPHER_SUITE_TKIP:
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+               arg.key_cipher = WMI_CIPHER_TKIP;
+               arg.key_txmic_len = 8;
+               arg.key_rxmic_len = 8;
+               break;
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+               arg.key_cipher = WMI_CIPHER_WEP;
+               /* AP/IBSS mode requires self-key to be groupwise
+                * Otherwise pairwise key must be set */
+               if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN))
+                       arg.key_flags = WMI_KEY_PAIRWISE;
+               break;
+       default:
+               ath10k_warn("cipher %d is not supported\n", key->cipher);
+               return -EOPNOTSUPP;
+       }
+
+       if (cmd == DISABLE_KEY) {
+               arg.key_cipher = WMI_CIPHER_NONE;
+               arg.key_data = NULL;
+       }
+
+       return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
+}
+
+static int ath10k_install_key(struct ath10k_vif *arvif,
+                             struct ieee80211_key_conf *key,
+                             enum set_key_cmd cmd,
+                             const u8 *macaddr)
+{
+       struct ath10k *ar = arvif->ar;
+       int ret;
+
+       INIT_COMPLETION(ar->install_key_done);
+
+       ret = ath10k_send_key(arvif, key, cmd, macaddr);
+       if (ret)
+               return ret;
+
+       ret = wait_for_completion_timeout(&ar->install_key_done, 3*HZ);
+       if (ret == 0)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
+                                       const u8 *addr)
+{
+       struct ath10k *ar = arvif->ar;
+       struct ath10k_peer *peer;
+       int ret;
+       int i;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       spin_lock_bh(&ar->data_lock);
+       peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
+       spin_unlock_bh(&ar->data_lock);
+
+       if (!peer)
+               return -ENOENT;
+
+       for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
+               if (arvif->wep_keys[i] == NULL)
+                       continue;
+
+               ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
+                                        addr);
+               if (ret)
+                       return ret;
+
+               peer->keys[i] = arvif->wep_keys[i];
+       }
+
+       return 0;
+}
+
+static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
+                                 const u8 *addr)
+{
+       struct ath10k *ar = arvif->ar;
+       struct ath10k_peer *peer;
+       int first_errno = 0;
+       int ret;
+       int i;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       spin_lock_bh(&ar->data_lock);
+       peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
+       spin_unlock_bh(&ar->data_lock);
+
+       if (!peer)
+               return -ENOENT;
+
+       for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+               if (peer->keys[i] == NULL)
+                       continue;
+
+               ret = ath10k_install_key(arvif, peer->keys[i],
+                                        DISABLE_KEY, addr);
+               if (ret && first_errno == 0)
+                       first_errno = ret;
+
+               if (ret)
+                       ath10k_warn("could not remove peer wep key %d (%d)\n",
+                                   i, ret);
+
+               peer->keys[i] = NULL;
+       }
+
+       return first_errno;
+}
+
+static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
+                                struct ieee80211_key_conf *key)
+{
+       struct ath10k *ar = arvif->ar;
+       struct ath10k_peer *peer;
+       u8 addr[ETH_ALEN];
+       int first_errno = 0;
+       int ret;
+       int i;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       for (;;) {
+               /* since ath10k_install_key we can't hold data_lock all the
+                * time, so we try to remove the keys incrementally */
+               spin_lock_bh(&ar->data_lock);
+               i = 0;
+               list_for_each_entry(peer, &ar->peers, list) {
+                       for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+                               if (peer->keys[i] == key) {
+                                       memcpy(addr, peer->addr, ETH_ALEN);
+                                       peer->keys[i] = NULL;
+                                       break;
+                               }
+                       }
+
+                       if (i < ARRAY_SIZE(peer->keys))
+                               break;
+               }
+               spin_unlock_bh(&ar->data_lock);
+
+               if (i == ARRAY_SIZE(peer->keys))
+                       break;
+
+               ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr);
+               if (ret && first_errno == 0)
+                       first_errno = ret;
+
+               if (ret)
+                       ath10k_warn("could not remove key for %pM\n", addr);
+       }
+
+       return first_errno;
+}
+
+
+/*********************/
+/* General utilities */
+/*********************/
+
+static inline enum wmi_phy_mode
+chan_to_phymode(const struct cfg80211_chan_def *chandef)
+{
+       enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+       switch (chandef->chan->band) {
+       case IEEE80211_BAND_2GHZ:
+               switch (chandef->width) {
+               case NL80211_CHAN_WIDTH_20_NOHT:
+                       phymode = MODE_11G;
+                       break;
+               case NL80211_CHAN_WIDTH_20:
+                       phymode = MODE_11NG_HT20;
+                       break;
+               case NL80211_CHAN_WIDTH_40:
+                       phymode = MODE_11NG_HT40;
+                       break;
+               case NL80211_CHAN_WIDTH_80:
+               case NL80211_CHAN_WIDTH_80P80:
+               case NL80211_CHAN_WIDTH_160:
+                       phymode = MODE_UNKNOWN;
+                       break;
+               }
+               break;
+       case IEEE80211_BAND_5GHZ:
+               switch (chandef->width) {
+               case NL80211_CHAN_WIDTH_20_NOHT:
+                       phymode = MODE_11A;
+                       break;
+               case NL80211_CHAN_WIDTH_20:
+                       phymode = MODE_11NA_HT20;
+                       break;
+               case NL80211_CHAN_WIDTH_40:
+                       phymode = MODE_11NA_HT40;
+                       break;
+               case NL80211_CHAN_WIDTH_80:
+                       phymode = MODE_11AC_VHT80;
+                       break;
+               case NL80211_CHAN_WIDTH_80P80:
+               case NL80211_CHAN_WIDTH_160:
+                       phymode = MODE_UNKNOWN;
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+
+       WARN_ON(phymode == MODE_UNKNOWN);
+       return phymode;
+}
+
+static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
+{
+/*
+ * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
+ *   0 for no restriction
+ *   1 for 1/4 us
+ *   2 for 1/2 us
+ *   3 for 1 us
+ *   4 for 2 us
+ *   5 for 4 us
+ *   6 for 8 us
+ *   7 for 16 us
+ */
+       switch (mpdudensity) {
+       case 0:
+               return 0;
+       case 1:
+       case 2:
+       case 3:
+       /* Our lower layer calculations limit our precision to
+          1 microsecond */
+               return 1;
+       case 4:
+               return 2;
+       case 5:
+               return 4;
+       case 6:
+               return 8;
+       case 7:
+               return 16;
+       default:
+               return 0;
+       }
+}
+
+static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
+{
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
+       if (ret)
+               return ret;
+
+       ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
+{
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
+       if (ret)
+               return ret;
+
+       ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
+{
+       struct ath10k_peer *peer, *tmp;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       spin_lock_bh(&ar->data_lock);
+       list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
+               if (peer->vdev_id != vdev_id)
+                       continue;
+
+               ath10k_warn("removing stale peer %pM from vdev_id %d\n",
+                           peer->addr, vdev_id);
+
+               list_del(&peer->list);
+               kfree(peer);
+       }
+       spin_unlock_bh(&ar->data_lock);
+}
+
+/************************/
+/* Interface management */
+/************************/
+
+static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
+{
+       int ret;
+
+       ret = wait_for_completion_timeout(&ar->vdev_setup_done,
+                                         ATH10K_VDEV_SETUP_TIMEOUT_HZ);
+       if (ret == 0)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static int ath10k_vdev_start(struct ath10k_vif *arvif)
+{
+       struct ath10k *ar = arvif->ar;
+       struct ieee80211_conf *conf = &ar->hw->conf;
+       struct ieee80211_channel *channel = conf->chandef.chan;
+       struct wmi_vdev_start_request_arg arg = {};
+       int ret = 0;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       INIT_COMPLETION(ar->vdev_setup_done);
+
+       arg.vdev_id = arvif->vdev_id;
+       arg.dtim_period = arvif->dtim_period;
+       arg.bcn_intval = arvif->beacon_interval;
+
+       arg.channel.freq = channel->center_freq;
+
+       arg.channel.band_center_freq1 = conf->chandef.center_freq1;
+
+       arg.channel.mode = chan_to_phymode(&conf->chandef);
+
+       arg.channel.min_power = channel->max_power * 3;
+       arg.channel.max_power = channel->max_power * 4;
+       arg.channel.max_reg_power = channel->max_reg_power * 4;
+       arg.channel.max_antenna_gain = channel->max_antenna_gain;
+
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+               arg.ssid = arvif->u.ap.ssid;
+               arg.ssid_len = arvif->u.ap.ssid_len;
+               arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+       } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+               arg.ssid = arvif->vif->bss_conf.ssid;
+               arg.ssid_len = arvif->vif->bss_conf.ssid_len;
+       }
+
+       ret = ath10k_wmi_vdev_start(ar, &arg);
+       if (ret) {
+               ath10k_warn("WMI vdev start failed: ret %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_vdev_setup_sync(ar);
+       if (ret) {
+               ath10k_warn("vdev setup failed %d\n", ret);
+               return ret;
+       }
+
+       return ret;
+}
+
+static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+{
+       struct ath10k *ar = arvif->ar;
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       INIT_COMPLETION(ar->vdev_setup_done);
+
+       ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
+       if (ret) {
+               ath10k_warn("WMI vdev stop failed: ret %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_vdev_setup_sync(ar);
+       if (ret) {
+               ath10k_warn("vdev setup failed %d\n", ret);
+               return ret;
+       }
+
+       return ret;
+}
+
+static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
+{
+       struct ieee80211_channel *channel = ar->hw->conf.chandef.chan;
+       struct wmi_vdev_start_request_arg arg = {};
+       enum nl80211_channel_type type;
+       int ret = 0;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       type = cfg80211_get_chandef_type(&ar->hw->conf.chandef);
+
+       arg.vdev_id = vdev_id;
+       arg.channel.freq = channel->center_freq;
+       arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1;
+
+       /* TODO setup this dynamically, what in case we
+          don't have any vifs? */
+       arg.channel.mode = chan_to_phymode(&ar->hw->conf.chandef);
+
+       arg.channel.min_power = channel->max_power * 3;
+       arg.channel.max_power = channel->max_power * 4;
+       arg.channel.max_reg_power = channel->max_reg_power * 4;
+       arg.channel.max_antenna_gain = channel->max_antenna_gain;
+
+       ret = ath10k_wmi_vdev_start(ar, &arg);
+       if (ret) {
+               ath10k_warn("Monitor vdev start failed: ret %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_vdev_setup_sync(ar);
+       if (ret) {
+               ath10k_warn("Monitor vdev setup failed %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+       if (ret) {
+               ath10k_warn("Monitor vdev up failed: %d\n", ret);
+               goto vdev_stop;
+       }
+
+       ar->monitor_vdev_id = vdev_id;
+       ar->monitor_enabled = true;
+
+       return 0;
+
+vdev_stop:
+       ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+       if (ret)
+               ath10k_warn("Monitor vdev stop failed: %d\n", ret);
+
+       return ret;
+}
+
+static int ath10k_monitor_stop(struct ath10k *ar)
+{
+       int ret = 0;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       /* For some reasons, ath10k_wmi_vdev_down() here couse
+        * often ath10k_wmi_vdev_stop() to fail. Next we could
+        * not run monitor vdev and driver reload
+        * required. Don't see such problems we skip
+        * ath10k_wmi_vdev_down() here.
+        */
+
+       ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+       if (ret)
+               ath10k_warn("Monitor vdev stop failed: %d\n", ret);
+
+       ret = ath10k_vdev_setup_sync(ar);
+       if (ret)
+               ath10k_warn("Monitor_down sync failed: %d\n", ret);
+
+       ar->monitor_enabled = false;
+       return ret;
+}
+
+static int ath10k_monitor_create(struct ath10k *ar)
+{
+       int bit, ret = 0;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       if (ar->monitor_present) {
+               ath10k_warn("Monitor mode already enabled\n");
+               return 0;
+       }
+
+       bit = ffs(ar->free_vdev_map);
+       if (bit == 0) {
+               ath10k_warn("No free VDEV slots\n");
+               return -ENOMEM;
+       }
+
+       ar->monitor_vdev_id = bit - 1;
+       ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id);
+
+       ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
+                                    WMI_VDEV_TYPE_MONITOR,
+                                    0, ar->mac_addr);
+       if (ret) {
+               ath10k_warn("WMI vdev monitor create failed: ret %d\n", ret);
+               goto vdev_fail;
+       }
+
+       ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface created, vdev id: %d\n",
+                  ar->monitor_vdev_id);
+
+       ar->monitor_present = true;
+       return 0;
+
+vdev_fail:
+       /*
+        * Restore the ID to the global map.
+        */
+       ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
+       return ret;
+}
+
+static int ath10k_monitor_destroy(struct ath10k *ar)
+{
+       int ret = 0;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       if (!ar->monitor_present)
+               return 0;
+
+       ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+       if (ret) {
+               ath10k_warn("WMI vdev monitor delete failed: %d\n", ret);
+               return ret;
+       }
+
+       ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
+       ar->monitor_present = false;
+
+       ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface destroyed, vdev id: %d\n",
+                  ar->monitor_vdev_id);
+       return ret;
+}
+
+static void ath10k_control_beaconing(struct ath10k_vif *arvif,
+                               struct ieee80211_bss_conf *info)
+{
+       int ret = 0;
+
+       if (!info->enable_beacon) {
+               ath10k_vdev_stop(arvif);
+               return;
+       }
+
+       arvif->tx_seq_no = 0x1000;
+
+       ret = ath10k_vdev_start(arvif);
+       if (ret)
+               return;
+
+       ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, 0, info->bssid);
+       if (ret) {
+               ath10k_warn("Failed to bring up VDEV: %d\n",
+                           arvif->vdev_id);
+               return;
+       }
+       ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d up\n", arvif->vdev_id);
+}
+
+static void ath10k_control_ibss(struct ath10k_vif *arvif,
+                               struct ieee80211_bss_conf *info,
+                               const u8 self_peer[ETH_ALEN])
+{
+       int ret = 0;
+
+       if (!info->ibss_joined) {
+               ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
+               if (ret)
+                       ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n",
+                                   self_peer, arvif->vdev_id, ret);
+
+               if (is_zero_ether_addr(arvif->u.ibss.bssid))
+                       return;
+
+               ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
+                                        arvif->u.ibss.bssid);
+               if (ret) {
+                       ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n",
+                                   arvif->u.ibss.bssid, arvif->vdev_id, ret);
+                       return;
+               }
+
+               memset(arvif->u.ibss.bssid, 0, ETH_ALEN);
+
+               return;
+       }
+
+       ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
+       if (ret) {
+               ath10k_warn("Failed to create IBSS self peer:%pM for VDEV:%d ret:%d\n",
+                           self_peer, arvif->vdev_id, ret);
+               return;
+       }
+
+       ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
+                                       WMI_VDEV_PARAM_ATIM_WINDOW,
+                                       ATH10K_DEFAULT_ATIM);
+       if (ret)
+               ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n",
+                           arvif->vdev_id, ret);
+}
+
+/*
+ * Review this when mac80211 gains per-interface powersave support.
+ */
+static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct ath10k_generic_iter *ar_iter = data;
+       struct ieee80211_conf *conf = &ar_iter->ar->hw->conf;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       enum wmi_sta_powersave_param param;
+       enum wmi_sta_ps_mode psmode;
+       int ret;
+
+       if (vif->type != NL80211_IFTYPE_STATION)
+               return;
+
+       if (conf->flags & IEEE80211_CONF_PS) {
+               psmode = WMI_STA_PS_MODE_ENABLED;
+               param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+               ret = ath10k_wmi_set_sta_ps_param(ar_iter->ar,
+                                                 arvif->vdev_id,
+                                                 param,
+                                                 conf->dynamic_ps_timeout);
+               if (ret) {
+                       ath10k_warn("Failed to set inactivity time for VDEV: %d\n",
+                                   arvif->vdev_id);
+                       return;
+               }
+
+               ar_iter->ret = ret;
+       } else {
+               psmode = WMI_STA_PS_MODE_DISABLED;
+       }
+
+       ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id,
+                                            psmode);
+       if (ar_iter->ret)
+               ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
+                           psmode, arvif->vdev_id);
+       else
+               ath10k_dbg(ATH10K_DBG_MAC, "Set PS Mode: %d for VDEV: %d\n",
+                          psmode, arvif->vdev_id);
+}
+
+/**********************/
+/* Station management */
+/**********************/
+
+static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
+                                     struct ath10k_vif *arvif,
+                                     struct ieee80211_sta *sta,
+                                     struct ieee80211_bss_conf *bss_conf,
+                                     struct wmi_peer_assoc_complete_arg *arg)
+{
+       memcpy(arg->addr, sta->addr, ETH_ALEN);
+       arg->vdev_id = arvif->vdev_id;
+       arg->peer_aid = sta->aid;
+       arg->peer_flags |= WMI_PEER_AUTH;
+
+       if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+               /*
+                * Seems FW have problems with Power Save in STA
+                * mode when we setup this parameter to high (eg. 5).
+                * Often we see that FW don't send NULL (with clean P flags)
+                * frame even there is info about buffered frames in beacons.
+                * Sometimes we have to wait more than 10 seconds before FW
+                * will wakeup. Often sending one ping from AP to our device
+                * just fail (more than 50%).
+                *
+                * Seems setting this FW parameter to 1 couse FW
+                * will check every beacon and will wakup immediately
+                * after detection buffered data.
+                */
+               arg->peer_listen_intval = 1;
+       else
+               arg->peer_listen_intval = ar->hw->conf.listen_interval;
+
+       arg->peer_num_spatial_streams = 1;
+
+       /*
+        * The assoc capabilities are available only in managed mode.
+        */
+       if (arvif->vdev_type == WMI_VDEV_TYPE_STA && bss_conf)
+               arg->peer_caps = bss_conf->assoc_capability;
+}
+
+static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
+                                      struct ath10k_vif *arvif,
+                                      struct wmi_peer_assoc_complete_arg *arg)
+{
+       struct ieee80211_vif *vif = arvif->vif;
+       struct ieee80211_bss_conf *info = &vif->bss_conf;
+       struct cfg80211_bss *bss;
+       const u8 *rsnie = NULL;
+       const u8 *wpaie = NULL;
+
+       bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan,
+                              info->bssid, NULL, 0, 0, 0);
+       if (bss) {
+               const struct cfg80211_bss_ies *ies;
+
+               rcu_read_lock();
+               rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
+
+               ies = rcu_dereference(bss->ies);
+
+               wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+                               WLAN_OUI_TYPE_MICROSOFT_WPA,
+                               ies->data,
+                               ies->len);
+               rcu_read_unlock();
+               cfg80211_put_bss(ar->hw->wiphy, bss);
+       }
+
+       /* FIXME: base on RSN IE/WPA IE is a correct idea? */
+       if (rsnie || wpaie) {
+               ath10k_dbg(ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
+               arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
+       }
+
+       if (wpaie) {
+               ath10k_dbg(ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
+               arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
+       }
+}
+
+static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
+                                     struct ieee80211_sta *sta,
+                                     struct wmi_peer_assoc_complete_arg *arg)
+{
+       struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+       const struct ieee80211_supported_band *sband;
+       const struct ieee80211_rate *rates;
+       u32 ratemask;
+       int i;
+
+       sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
+       ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band];
+       rates = sband->bitrates;
+
+       rateset->num_rates = 0;
+
+       for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
+               if (!(ratemask & 1))
+                       continue;
+
+               rateset->rates[rateset->num_rates] = rates->hw_value;
+               rateset->num_rates++;
+       }
+}
+
+static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
+                                  struct ieee80211_sta *sta,
+                                  struct wmi_peer_assoc_complete_arg *arg)
+{
+       const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       int smps;
+       int i, n;
+
+       if (!ht_cap->ht_supported)
+               return;
+
+       arg->peer_flags |= WMI_PEER_HT;
+       arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+                                   ht_cap->ampdu_factor)) - 1;
+
+       arg->peer_mpdu_density =
+               ath10k_parse_mpdudensity(ht_cap->ampdu_density);
+
+       arg->peer_ht_caps = ht_cap->cap;
+       arg->peer_rate_caps |= WMI_RC_HT_FLAG;
+
+       if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
+               arg->peer_flags |= WMI_PEER_LDPC;
+
+       if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
+               arg->peer_flags |= WMI_PEER_40MHZ;
+               arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
+       }
+
+       if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+               arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+
+       if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+               arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+
+       if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
+               arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
+               arg->peer_flags |= WMI_PEER_STBC;
+       }
+
+       if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
+               u32 stbc;
+               stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
+               stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
+               stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
+               arg->peer_rate_caps |= stbc;
+               arg->peer_flags |= WMI_PEER_STBC;
+       }
+
+       smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+       smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+       if (smps == WLAN_HT_CAP_SM_PS_STATIC) {
+               arg->peer_flags |= WMI_PEER_SPATIAL_MUX;
+               arg->peer_flags |= WMI_PEER_STATIC_MIMOPS;
+       } else if (smps == WLAN_HT_CAP_SM_PS_DYNAMIC) {
+               arg->peer_flags |= WMI_PEER_SPATIAL_MUX;
+               arg->peer_flags |= WMI_PEER_DYN_MIMOPS;
+       }
+
+       if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
+               arg->peer_rate_caps |= WMI_RC_TS_FLAG;
+       else if (ht_cap->mcs.rx_mask[1])
+               arg->peer_rate_caps |= WMI_RC_DS_FLAG;
+
+       for (i = 0, n = 0; i < IEEE80211_HT_MCS_MASK_LEN*8; i++)
+               if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8))
+                       arg->peer_ht_rates.rates[n++] = i;
+
+       arg->peer_ht_rates.num_rates = n;
+       arg->peer_num_spatial_streams = max((n+7) / 8, 1);
+
+       ath10k_dbg(ATH10K_DBG_MAC, "mcs cnt %d nss %d\n",
+                  arg->peer_ht_rates.num_rates,
+                  arg->peer_num_spatial_streams);
+}
+
+static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
+                                      struct ath10k_vif *arvif,
+                                      struct ieee80211_sta *sta,
+                                      struct ieee80211_bss_conf *bss_conf,
+                                      struct wmi_peer_assoc_complete_arg *arg)
+{
+       u32 uapsd = 0;
+       u32 max_sp = 0;
+
+       if (sta->wme)
+               arg->peer_flags |= WMI_PEER_QOS;
+
+       if (sta->wme && sta->uapsd_queues) {
+               ath10k_dbg(ATH10K_DBG_MAC, "uapsd_queues: 0x%X, max_sp: %d\n",
+                          sta->uapsd_queues, sta->max_sp);
+
+               arg->peer_flags |= WMI_PEER_APSD;
+               arg->peer_flags |= WMI_RC_UAPSD_FLAG;
+
+               if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+                       uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
+                                WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
+               if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+                       uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
+                                WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
+               if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+                       uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
+                                WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
+               if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+                       uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
+                                WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
+
+
+               if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
+                       max_sp = sta->max_sp;
+
+               ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+                                          sta->addr,
+                                          WMI_AP_PS_PEER_PARAM_UAPSD,
+                                          uapsd);
+
+               ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+                                          sta->addr,
+                                          WMI_AP_PS_PEER_PARAM_MAX_SP,
+                                          max_sp);
+
+               /* TODO setup this based on STA listen interval and
+                  beacon interval. Currently we don't know
+                  sta->listen_interval - mac80211 patch required.
+                  Currently use 10 seconds */
+               ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+                                          sta->addr,
+                                          WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
+                                          10);
+       }
+}
+
+static void ath10k_peer_assoc_h_qos_sta(struct ath10k *ar,
+                                       struct ath10k_vif *arvif,
+                                       struct ieee80211_sta *sta,
+                                       struct ieee80211_bss_conf *bss_conf,
+                                       struct wmi_peer_assoc_complete_arg *arg)
+{
+       if (bss_conf->qos)
+               arg->peer_flags |= WMI_PEER_QOS;
+}
+
+static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
+                                   struct ieee80211_sta *sta,
+                                   struct wmi_peer_assoc_complete_arg *arg)
+{
+       const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+
+       if (!vht_cap->vht_supported)
+               return;
+
+       arg->peer_flags |= WMI_PEER_VHT;
+
+       arg->peer_vht_caps = vht_cap->cap;
+
+       if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+               arg->peer_flags |= WMI_PEER_80MHZ;
+
+       arg->peer_vht_rates.rx_max_rate =
+               __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
+       arg->peer_vht_rates.rx_mcs_set =
+               __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
+       arg->peer_vht_rates.tx_max_rate =
+               __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
+       arg->peer_vht_rates.tx_mcs_set =
+               __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
+
+       ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer\n");
+}
+
+static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
+                                   struct ath10k_vif *arvif,
+                                   struct ieee80211_sta *sta,
+                                   struct ieee80211_bss_conf *bss_conf,
+                                   struct wmi_peer_assoc_complete_arg *arg)
+{
+       switch (arvif->vdev_type) {
+       case WMI_VDEV_TYPE_AP:
+               ath10k_peer_assoc_h_qos_ap(ar, arvif, sta, bss_conf, arg);
+               break;
+       case WMI_VDEV_TYPE_STA:
+               ath10k_peer_assoc_h_qos_sta(ar, arvif, sta, bss_conf, arg);
+               break;
+       default:
+               break;
+       }
+}
+
+static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
+                                       struct ath10k_vif *arvif,
+                                       struct ieee80211_sta *sta,
+                                       struct wmi_peer_assoc_complete_arg *arg)
+{
+       enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+       /* FIXME: add VHT */
+
+       switch (ar->hw->conf.chandef.chan->band) {
+       case IEEE80211_BAND_2GHZ:
+               if (sta->ht_cap.ht_supported) {
+                       if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+                               phymode = MODE_11NG_HT40;
+                       else
+                               phymode = MODE_11NG_HT20;
+               } else {
+                       phymode = MODE_11G;
+               }
+
+               break;
+       case IEEE80211_BAND_5GHZ:
+               if (sta->ht_cap.ht_supported) {
+                       if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+                               phymode = MODE_11NA_HT40;
+                       else
+                               phymode = MODE_11NA_HT20;
+               } else {
+                       phymode = MODE_11A;
+               }
+
+               break;
+       default:
+               break;
+       }
+
+       arg->peer_phymode = phymode;
+       WARN_ON(phymode == MODE_UNKNOWN);
+}
+
+static int ath10k_peer_assoc(struct ath10k *ar,
+                            struct ath10k_vif *arvif,
+                            struct ieee80211_sta *sta,
+                            struct ieee80211_bss_conf *bss_conf)
+{
+       struct wmi_peer_assoc_complete_arg arg;
+
+       memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg));
+
+       ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg);
+       ath10k_peer_assoc_h_crypto(ar, arvif, &arg);
+       ath10k_peer_assoc_h_rates(ar, sta, &arg);
+       ath10k_peer_assoc_h_ht(ar, sta, &arg);
+       ath10k_peer_assoc_h_vht(ar, sta, &arg);
+       ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, &arg);
+       ath10k_peer_assoc_h_phymode(ar, arvif, sta, &arg);
+
+       return ath10k_wmi_peer_assoc(ar, &arg);
+}
+
+/* can be called only in mac80211 callbacks due to `key_count` usage */
+static void ath10k_bss_assoc(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_bss_conf *bss_conf)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ieee80211_sta *ap_sta;
+       int ret;
+
+       rcu_read_lock();
+
+       ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+       if (!ap_sta) {
+               ath10k_warn("Failed to find station entry for %pM\n",
+                           bss_conf->bssid);
+               rcu_read_unlock();
+               return;
+       }
+
+       ret = ath10k_peer_assoc(ar, arvif, ap_sta, bss_conf);
+       if (ret) {
+               ath10k_warn("Peer assoc failed for %pM\n", bss_conf->bssid);
+               rcu_read_unlock();
+               return;
+       }
+
+       rcu_read_unlock();
+
+       ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid,
+                                bss_conf->bssid);
+       if (ret)
+               ath10k_warn("VDEV: %d up failed: ret %d\n",
+                           arvif->vdev_id, ret);
+       else
+               ath10k_dbg(ATH10K_DBG_MAC,
+                          "VDEV: %d associated, BSSID: %pM, AID: %d\n",
+                          arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+}
+
+/*
+ * FIXME: flush TIDs
+ */
+static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       int ret;
+
+       /*
+        * For some reason, calling VDEV-DOWN before VDEV-STOP
+        * makes the FW to send frames via HTT after disassociation.
+        * No idea why this happens, even though VDEV-DOWN is supposed
+        * to be analogous to link down, so just stop the VDEV.
+        */
+       ret = ath10k_vdev_stop(arvif);
+       if (!ret)
+               ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d stopped\n",
+                          arvif->vdev_id);
+
+       /*
+        * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
+        * report beacons from previously associated network through HTT.
+        * This in turn would spam mac80211 WARN_ON if we bring down all
+        * interfaces as it expects there is no rx when no interface is
+        * running.
+        */
+       ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+       if (ret)
+               ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d ath10k_wmi_vdev_down failed (%d)\n",
+                          arvif->vdev_id, ret);
+
+       ath10k_wmi_flush_tx(ar);
+
+       arvif->def_wep_key_index = 0;
+}
+
+static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
+                               struct ieee80211_sta *sta)
+{
+       int ret = 0;
+
+       ret = ath10k_peer_assoc(ar, arvif, sta, NULL);
+       if (ret) {
+               ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr);
+               return ret;
+       }
+
+       ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
+       if (ret) {
+               ath10k_warn("could not install peer wep keys (%d)\n", ret);
+               return ret;
+       }
+
+       return ret;
+}
+
+static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
+                                  struct ieee80211_sta *sta)
+{
+       int ret = 0;
+
+       ret = ath10k_clear_peer_keys(arvif, sta->addr);
+       if (ret) {
+               ath10k_warn("could not clear all peer wep keys (%d)\n", ret);
+               return ret;
+       }
+
+       return ret;
+}
+
+/**************/
+/* Regulatory */
+/**************/
+
+static int ath10k_update_channel_list(struct ath10k *ar)
+{
+       struct ieee80211_hw *hw = ar->hw;
+       struct ieee80211_supported_band **bands;
+       enum ieee80211_band band;
+       struct ieee80211_channel *channel;
+       struct wmi_scan_chan_list_arg arg = {0};
+       struct wmi_channel_arg *ch;
+       bool passive;
+       int len;
+       int ret;
+       int i;
+
+       bands = hw->wiphy->bands;
+       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+               if (!bands[band])
+                       continue;
+
+               for (i = 0; i < bands[band]->n_channels; i++) {
+                       if (bands[band]->channels[i].flags &
+                           IEEE80211_CHAN_DISABLED)
+                               continue;
+
+                       arg.n_channels++;
+               }
+       }
+
+       len = sizeof(struct wmi_channel_arg) * arg.n_channels;
+       arg.channels = kzalloc(len, GFP_KERNEL);
+       if (!arg.channels)
+               return -ENOMEM;
+
+       ch = arg.channels;
+       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+               if (!bands[band])
+                       continue;
+
+               for (i = 0; i < bands[band]->n_channels; i++) {
+                       channel = &bands[band]->channels[i];
+
+                       if (channel->flags & IEEE80211_CHAN_DISABLED)
+                               continue;
+
+                       ch->allow_ht   = true;
+
+                       /* FIXME: when should we really allow VHT? */
+                       ch->allow_vht = true;
+
+                       ch->allow_ibss =
+                               !(channel->flags & IEEE80211_CHAN_NO_IBSS);
+
+                       ch->ht40plus =
+                               !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
+
+                       passive = channel->flags & IEEE80211_CHAN_PASSIVE_SCAN;
+                       ch->passive = passive;
+
+                       ch->freq = channel->center_freq;
+                       ch->min_power = channel->max_power * 3;
+                       ch->max_power = channel->max_power * 4;
+                       ch->max_reg_power = channel->max_reg_power * 4;
+                       ch->max_antenna_gain = channel->max_antenna_gain;
+                       ch->reg_class_id = 0; /* FIXME */
+
+                       /* FIXME: why use only legacy modes, why not any
+                        * HT/VHT modes? Would that even make any
+                        * difference? */
+                       if (channel->band == IEEE80211_BAND_2GHZ)
+                               ch->mode = MODE_11G;
+                       else
+                               ch->mode = MODE_11A;
+
+                       if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
+                               continue;
+
+                       ath10k_dbg(ATH10K_DBG_WMI,
+                                  "%s: [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+                                  __func__, ch - arg.channels, arg.n_channels,
+                                  ch->freq, ch->max_power, ch->max_reg_power,
+                                  ch->max_antenna_gain, ch->mode);
+
+                       ch++;
+               }
+       }
+
+       ret = ath10k_wmi_scan_chan_list(ar, &arg);
+       kfree(arg.channels);
+
+       return ret;
+}
+
+static void ath10k_reg_notifier(struct wiphy *wiphy,
+                               struct regulatory_request *request)
+{
+       struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+       struct reg_dmn_pair_mapping *regpair;
+       struct ath10k *ar = hw->priv;
+       int ret;
+
+       ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
+
+       ret = ath10k_update_channel_list(ar);
+       if (ret)
+               ath10k_warn("could not update channel list (%d)\n", ret);
+
+       regpair = ar->ath_common.regulatory.regpair;
+       /* Target allows setting up per-band regdomain but ath_common provides
+        * a combined one only */
+       ret = ath10k_wmi_pdev_set_regdomain(ar,
+                                           regpair->regDmnEnum,
+                                           regpair->regDmnEnum, /* 2ghz */
+                                           regpair->regDmnEnum, /* 5ghz */
+                                           regpair->reg_2ghz_ctl,
+                                           regpair->reg_5ghz_ctl);
+       if (ret)
+               ath10k_warn("could not set pdev regdomain (%d)\n", ret);
+}
+
+/***************/
+/* TX handlers */
+/***************/
+
+/*
+ * Frames sent to the FW have to be in "Native Wifi" format.
+ * Strip the QoS field from the 802.11 header.
+ */
+static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
+                                      struct ieee80211_tx_control *control,
+                                      struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (void *)skb->data;
+       u8 *qos_ctl;
+
+       if (!ieee80211_is_data_qos(hdr->frame_control))
+               return;
+
+       qos_ctl = ieee80211_get_qos_ctl(hdr);
+       memmove(qos_ctl, qos_ctl + IEEE80211_QOS_CTL_LEN,
+               skb->len - ieee80211_hdrlen(hdr->frame_control));
+       skb_trim(skb, skb->len - IEEE80211_QOS_CTL_LEN);
+}
+
+static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_vif *vif = info->control.vif;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k *ar = arvif->ar;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_key_conf *key = info->control.hw_key;
+       int ret;
+
+       /* TODO AP mode should be implemented */
+       if (vif->type != NL80211_IFTYPE_STATION)
+               return;
+
+       if (!ieee80211_has_protected(hdr->frame_control))
+               return;
+
+       if (!key)
+               return;
+
+       if (key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
+           key->cipher != WLAN_CIPHER_SUITE_WEP104)
+               return;
+
+       if (key->keyidx == arvif->def_wep_key_index)
+               return;
+
+       ath10k_dbg(ATH10K_DBG_MAC, "new wep keyidx will be %d\n", key->keyidx);
+
+       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+                                       WMI_VDEV_PARAM_DEF_KEYID,
+                                       key->keyidx);
+       if (ret) {
+               ath10k_warn("could not update wep keyidx (%d)\n", ret);
+               return;
+       }
+
+       arvif->def_wep_key_index = key->keyidx;
+}
+
+static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_vif *vif = info->control.vif;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+       /* This is case only for P2P_GO */
+       if (arvif->vdev_type != WMI_VDEV_TYPE_AP ||
+           arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+               return;
+
+       if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
+               spin_lock_bh(&ar->data_lock);
+               if (arvif->u.ap.noa_data)
+                       if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
+                                             GFP_ATOMIC))
+                               memcpy(skb_put(skb, arvif->u.ap.noa_len),
+                                      arvif->u.ap.noa_data,
+                                      arvif->u.ap.noa_len);
+               spin_unlock_bh(&ar->data_lock);
+       }
+}
+
+static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       int ret;
+
+       if (ieee80211_is_mgmt(hdr->frame_control))
+               ret = ath10k_htt_mgmt_tx(ar->htt, skb);
+       else if (ieee80211_is_nullfunc(hdr->frame_control))
+               /* FW does not report tx status properly for NullFunc frames
+                * unless they are sent through mgmt tx path. mac80211 sends
+                * those frames when it detects link/beacon loss and depends on
+                * the tx status to be correct. */
+               ret = ath10k_htt_mgmt_tx(ar->htt, skb);
+       else
+               ret = ath10k_htt_tx(ar->htt, skb);
+
+       if (ret) {
+               ath10k_warn("tx failed (%d). dropping packet.\n", ret);
+               ieee80211_free_txskb(ar->hw, skb);
+       }
+}
+
+void ath10k_offchan_tx_purge(struct ath10k *ar)
+{
+       struct sk_buff *skb;
+
+       for (;;) {
+               skb = skb_dequeue(&ar->offchan_tx_queue);
+               if (!skb)
+                       break;
+
+               ieee80211_free_txskb(ar->hw, skb);
+       }
+}
+
+void ath10k_offchan_tx_work(struct work_struct *work)
+{
+       struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
+       struct ath10k_peer *peer;
+       struct ieee80211_hdr *hdr;
+       struct sk_buff *skb;
+       const u8 *peer_addr;
+       int vdev_id;
+       int ret;
+
+       /* FW requirement: We must create a peer before FW will send out
+        * an offchannel frame. Otherwise the frame will be stuck and
+        * never transmitted. We delete the peer upon tx completion.
+        * It is unlikely that a peer for offchannel tx will already be
+        * present. However it may be in some rare cases so account for that.
+        * Otherwise we might remove a legitimate peer and break stuff. */
+
+       for (;;) {
+               skb = skb_dequeue(&ar->offchan_tx_queue);
+               if (!skb)
+                       break;
+
+               mutex_lock(&ar->conf_mutex);
+
+               ath10k_dbg(ATH10K_DBG_MAC, "processing offchannel skb %p\n",
+                          skb);
+
+               hdr = (struct ieee80211_hdr *)skb->data;
+               peer_addr = ieee80211_get_DA(hdr);
+               vdev_id = ATH10K_SKB_CB(skb)->htt.vdev_id;
+
+               spin_lock_bh(&ar->data_lock);
+               peer = ath10k_peer_find(ar, vdev_id, peer_addr);
+               spin_unlock_bh(&ar->data_lock);
+
+               if (peer)
+                       ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
+                                  peer_addr, vdev_id);
+
+               if (!peer) {
+                       ret = ath10k_peer_create(ar, vdev_id, peer_addr);
+                       if (ret)
+                               ath10k_warn("peer %pM on vdev %d not created (%d)\n",
+                                           peer_addr, vdev_id, ret);
+               }
+
+               spin_lock_bh(&ar->data_lock);
+               INIT_COMPLETION(ar->offchan_tx_completed);
+               ar->offchan_tx_skb = skb;
+               spin_unlock_bh(&ar->data_lock);
+
+               ath10k_tx_htt(ar, skb);
+
+               ret = wait_for_completion_timeout(&ar->offchan_tx_completed,
+                                                 3 * HZ);
+               if (ret <= 0)
+                       ath10k_warn("timed out waiting for offchannel skb %p\n",
+                                   skb);
+
+               if (!peer) {
+                       ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
+                       if (ret)
+                               ath10k_warn("peer %pM on vdev %d not deleted (%d)\n",
+                                           peer_addr, vdev_id, ret);
+               }
+
+               mutex_unlock(&ar->conf_mutex);
+       }
+}
+
+/************/
+/* Scanning */
+/************/
+
+/*
+ * This gets called if we dont get a heart-beat during scan.
+ * This may indicate the FW has hung and we need to abort the
+ * scan manually to prevent cancel_hw_scan() from deadlocking
+ */
+void ath10k_reset_scan(unsigned long ptr)
+{
+       struct ath10k *ar = (struct ath10k *)ptr;
+
+       spin_lock_bh(&ar->data_lock);
+       if (!ar->scan.in_progress) {
+               spin_unlock_bh(&ar->data_lock);
+               return;
+       }
+
+       ath10k_warn("scan timeout. resetting. fw issue?\n");
+
+       if (ar->scan.is_roc)
+               ieee80211_remain_on_channel_expired(ar->hw);
+       else
+               ieee80211_scan_completed(ar->hw, 1 /* aborted */);
+
+       ar->scan.in_progress = false;
+       complete_all(&ar->scan.completed);
+       spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath10k_abort_scan(struct ath10k *ar)
+{
+       struct wmi_stop_scan_arg arg = {
+               .req_id = 1, /* FIXME */
+               .req_type = WMI_SCAN_STOP_ONE,
+               .u.scan_id = ATH10K_SCAN_ID,
+       };
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       del_timer_sync(&ar->scan.timeout);
+
+       spin_lock_bh(&ar->data_lock);
+       if (!ar->scan.in_progress) {
+               spin_unlock_bh(&ar->data_lock);
+               return 0;
+       }
+
+       ar->scan.aborting = true;
+       spin_unlock_bh(&ar->data_lock);
+
+       ret = ath10k_wmi_stop_scan(ar, &arg);
+       if (ret) {
+               ath10k_warn("could not submit wmi stop scan (%d)\n", ret);
+               return -EIO;
+       }
+
+       ath10k_wmi_flush_tx(ar);
+
+       ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
+       if (ret == 0)
+               ath10k_warn("timed out while waiting for scan to stop\n");
+
+       /* scan completion may be done right after we timeout here, so let's
+        * check the in_progress and tell mac80211 scan is completed. if we
+        * don't do that and FW fails to send us scan completion indication
+        * then userspace won't be able to scan anymore */
+       ret = 0;
+
+       spin_lock_bh(&ar->data_lock);
+       if (ar->scan.in_progress) {
+               ath10k_warn("could not stop scan. its still in progress\n");
+               ar->scan.in_progress = false;
+               ath10k_offchan_tx_purge(ar);
+               ret = -ETIMEDOUT;
+       }
+       spin_unlock_bh(&ar->data_lock);
+
+       return ret;
+}
+
+static int ath10k_start_scan(struct ath10k *ar,
+                            const struct wmi_start_scan_arg *arg)
+{
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       ret = ath10k_wmi_start_scan(ar, arg);
+       if (ret)
+               return ret;
+
+       /* make sure we submit the command so the completion
+       * timeout makes sense */
+       ath10k_wmi_flush_tx(ar);
+
+       ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
+       if (ret == 0) {
+               ath10k_abort_scan(ar);
+               return ret;
+       }
+
+       /* the scan can complete earlier, before we even
+        * start the timer. in that case the timer handler
+        * checks ar->scan.in_progress and bails out if its
+        * false. Add a 200ms margin to account event/command
+        * processing. */
+       mod_timer(&ar->scan.timeout, jiffies +
+                 msecs_to_jiffies(arg->max_scan_time+200));
+       return 0;
+}
+
+/**********************/
+/* mac80211 callbacks */
+/**********************/
+
+static void ath10k_tx(struct ieee80211_hw *hw,
+                     struct ieee80211_tx_control *control,
+                     struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = NULL;
+       u32 vdev_id = 0;
+       u8 tid;
+
+       if (info->control.vif) {
+               arvif = ath10k_vif_to_arvif(info->control.vif);
+               vdev_id = arvif->vdev_id;
+       } else if (ar->monitor_enabled) {
+               vdev_id = ar->monitor_vdev_id;
+       }
+
+       /* We should disable CCK RATE due to P2P */
+       if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+               ath10k_dbg(ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+
+       /* we must calculate tid before we apply qos workaround
+        * as we'd lose the qos control field */
+       tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+       if (ieee80211_is_data_qos(hdr->frame_control) &&
+           is_unicast_ether_addr(ieee80211_get_DA(hdr))) {
+               u8 *qc = ieee80211_get_qos_ctl(hdr);
+               tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+       }
+
+       ath10k_tx_h_qos_workaround(hw, control, skb);
+       ath10k_tx_h_update_wep_key(skb);
+       ath10k_tx_h_add_p2p_noa_ie(ar, skb);
+       ath10k_tx_h_seq_no(skb);
+
+       memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb)));
+       ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
+       ATH10K_SKB_CB(skb)->htt.tid = tid;
+
+       if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+               spin_lock_bh(&ar->data_lock);
+               ATH10K_SKB_CB(skb)->htt.is_offchan = true;
+               ATH10K_SKB_CB(skb)->htt.vdev_id = ar->scan.vdev_id;
+               spin_unlock_bh(&ar->data_lock);
+
+               ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb);
+
+               skb_queue_tail(&ar->offchan_tx_queue, skb);
+               ieee80211_queue_work(hw, &ar->offchan_tx_work);
+               return;
+       }
+
+       ath10k_tx_htt(ar, skb);
+}
+
+/*
+ * Initialize various parameters with default vaules.
+ */
+static int ath10k_start(struct ieee80211_hw *hw)
+{
+       struct ath10k *ar = hw->priv;
+       int ret;
+
+       ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1);
+       if (ret)
+               ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
+                           ret);
+
+       ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 0);
+       if (ret)
+               ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
+                           ret);
+
+       return 0;
+}
+
+static void ath10k_stop(struct ieee80211_hw *hw)
+{
+       struct ath10k *ar = hw->priv;
+
+       /* avoid leaks in case FW never confirms scan for offchannel */
+       cancel_work_sync(&ar->offchan_tx_work);
+       ath10k_offchan_tx_purge(ar);
+}
+
+static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
+{
+       struct ath10k_generic_iter ar_iter;
+       struct ath10k *ar = hw->priv;
+       struct ieee80211_conf *conf = &hw->conf;
+       int ret = 0;
+       u32 flags;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+               ath10k_dbg(ATH10K_DBG_MAC, "Config channel %d mhz\n",
+                          conf->chandef.chan->center_freq);
+               spin_lock_bh(&ar->data_lock);
+               ar->rx_channel = conf->chandef.chan;
+               spin_unlock_bh(&ar->data_lock);
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_PS) {
+               memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
+               ar_iter.ar = ar;
+               flags = IEEE80211_IFACE_ITER_RESUME_ALL;
+
+               ieee80211_iterate_active_interfaces_atomic(hw,
+                                                          flags,
+                                                          ath10k_ps_iter,
+                                                          &ar_iter);
+
+               ret = ar_iter.ret;
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+               if (conf->flags & IEEE80211_CONF_MONITOR)
+                       ret = ath10k_monitor_create(ar);
+               else
+                       ret = ath10k_monitor_destroy(ar);
+       }
+
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+/*
+ * TODO:
+ * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
+ * because we will send mgmt frames without CCK. This requirement
+ * for P2P_FIND/GO_NEG should be handled by checking CCK flag
+ * in the TX packet.
+ */
+static int ath10k_add_interface(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       enum wmi_sta_powersave_param param;
+       int ret = 0;
+       u32 value;
+       int bit;
+
+       mutex_lock(&ar->conf_mutex);
+
+       arvif->ar = ar;
+       arvif->vif = vif;
+
+       if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
+               ath10k_warn("Only one monitor interface allowed\n");
+               ret = -EBUSY;
+               goto exit;
+       }
+
+       bit = ffs(ar->free_vdev_map);
+       if (bit == 0) {
+               ret = -EBUSY;
+               goto exit;
+       }
+
+       arvif->vdev_id = bit - 1;
+       arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+       ar->free_vdev_map &= ~(1 << arvif->vdev_id);
+
+       if (ar->p2p)
+               arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_UNSPECIFIED:
+       case NL80211_IFTYPE_STATION:
+               arvif->vdev_type = WMI_VDEV_TYPE_STA;
+               if (vif->p2p)
+                       arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
+               break;
+       case NL80211_IFTYPE_AP:
+               arvif->vdev_type = WMI_VDEV_TYPE_AP;
+
+               if (vif->p2p)
+                       arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
+               break;
+       case NL80211_IFTYPE_MONITOR:
+               arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+               break;
+       default:
+               WARN_ON(1);
+               break;
+       }
+
+       ath10k_dbg(ATH10K_DBG_MAC, "Add interface: id %d type %d subtype %d\n",
+                  arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
+
+       ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
+                                    arvif->vdev_subtype, vif->addr);
+       if (ret) {
+               ath10k_warn("WMI vdev create failed: ret %d\n", ret);
+               goto exit;
+       }
+
+       ret = ath10k_wmi_vdev_set_param(ar, 0, WMI_VDEV_PARAM_DEF_KEYID,
+                                       arvif->def_wep_key_index);
+       if (ret)
+               ath10k_warn("Failed to set default keyid: %d\n", ret);
+
+       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+                                       WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+                                       ATH10K_HW_TXRX_NATIVE_WIFI);
+       if (ret)
+               ath10k_warn("Failed to set TX encap: %d\n", ret);
+
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+               ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
+               if (ret) {
+                       ath10k_warn("Failed to create peer for AP: %d\n", ret);
+                       goto exit;
+               }
+       }
+
+       if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+               param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
+               value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+               ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+                                                 param, value);
+               if (ret)
+                       ath10k_warn("Failed to set RX wake policy: %d\n", ret);
+
+               param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
+               value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
+               ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+                                                 param, value);
+               if (ret)
+                       ath10k_warn("Failed to set TX wake thresh: %d\n", ret);
+
+               param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
+               value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
+               ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+                                                 param, value);
+               if (ret)
+                       ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
+       }
+
+       if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+               ar->monitor_present = true;
+
+exit:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+static void ath10k_remove_interface(struct ieee80211_hw *hw,
+                                   struct ieee80211_vif *vif)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       int ret;
+
+       mutex_lock(&ar->conf_mutex);
+
+       ath10k_dbg(ATH10K_DBG_MAC, "Remove interface: id %d\n", arvif->vdev_id);
+
+       ar->free_vdev_map |= 1 << (arvif->vdev_id);
+
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+               ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
+               if (ret)
+                       ath10k_warn("Failed to remove peer for AP: %d\n", ret);
+
+               kfree(arvif->u.ap.noa_data);
+       }
+
+       ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
+       if (ret)
+               ath10k_warn("WMI vdev delete failed: %d\n", ret);
+
+       if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+               ar->monitor_present = false;
+
+       ath10k_peer_cleanup(ar, arvif->vdev_id);
+
+       mutex_unlock(&ar->conf_mutex);
+}
+
+/*
+ * FIXME: Has to be verified.
+ */
+#define SUPPORTED_FILTERS                      \
+       (FIF_PROMISC_IN_BSS |                   \
+       FIF_ALLMULTI |                          \
+       FIF_CONTROL |                           \
+       FIF_PSPOLL |                            \
+       FIF_OTHER_BSS |                         \
+       FIF_BCN_PRBRESP_PROMISC |               \
+       FIF_PROBE_REQ |                         \
+       FIF_FCSFAIL)
+
+static void ath10k_configure_filter(struct ieee80211_hw *hw,
+                                   unsigned int changed_flags,
+                                   unsigned int *total_flags,
+                                   u64 multicast)
+{
+       struct ath10k *ar = hw->priv;
+       int ret;
+
+       mutex_lock(&ar->conf_mutex);
+
+       changed_flags &= SUPPORTED_FILTERS;
+       *total_flags &= SUPPORTED_FILTERS;
+       ar->filter_flags = *total_flags;
+
+       if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
+           !ar->monitor_enabled) {
+               ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
+               if (ret)
+                       ath10k_warn("Unable to start monitor mode\n");
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode started\n");
+       } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
+                  ar->monitor_enabled) {
+               ret = ath10k_monitor_stop(ar);
+               if (ret)
+                       ath10k_warn("Unable to stop monitor mode\n");
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode stopped\n");
+       }
+
+       mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
+                                   struct ieee80211_vif *vif,
+                                   struct ieee80211_bss_conf *info,
+                                   u32 changed)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       int ret = 0;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (changed & BSS_CHANGED_IBSS)
+               ath10k_control_ibss(arvif, info, vif->addr);
+
+       if (changed & BSS_CHANGED_BEACON_INT) {
+               arvif->beacon_interval = info->beacon_int;
+               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+                                               WMI_VDEV_PARAM_BEACON_INTERVAL,
+                                               arvif->beacon_interval);
+               if (ret)
+                       ath10k_warn("Failed to set beacon interval for VDEV: %d\n",
+                                   arvif->vdev_id);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Beacon interval: %d set for VDEV: %d\n",
+                                  arvif->beacon_interval, arvif->vdev_id);
+       }
+
+       if (changed & BSS_CHANGED_BEACON) {
+               ret = ath10k_wmi_pdev_set_param(ar,
+                                               WMI_PDEV_PARAM_BEACON_TX_MODE,
+                                               WMI_BEACON_STAGGERED_MODE);
+               if (ret)
+                       ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
+                                   arvif->vdev_id);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Set staggered beacon mode for VDEV: %d\n",
+                                  arvif->vdev_id);
+       }
+
+       if (changed & BSS_CHANGED_DTIM_PERIOD) {
+               arvif->dtim_period = info->dtim_period;
+
+               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+                                               WMI_VDEV_PARAM_DTIM_PERIOD,
+                                               arvif->dtim_period);
+               if (ret)
+                       ath10k_warn("Failed to set dtim period for VDEV: %d\n",
+                                   arvif->vdev_id);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Set dtim period: %d for VDEV: %d\n",
+                                  arvif->dtim_period, arvif->vdev_id);
+       }
+
+       if (changed & BSS_CHANGED_SSID &&
+           vif->type == NL80211_IFTYPE_AP) {
+               arvif->u.ap.ssid_len = info->ssid_len;
+               if (info->ssid_len)
+                       memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
+               arvif->u.ap.hidden_ssid = info->hidden_ssid;
+       }
+
+       if (changed & BSS_CHANGED_BSSID) {
+               if (!is_zero_ether_addr(info->bssid)) {
+                       ret = ath10k_peer_create(ar, arvif->vdev_id,
+                                                info->bssid);
+                       if (ret)
+                               ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
+                                           info->bssid, arvif->vdev_id);
+                       else
+                               ath10k_dbg(ATH10K_DBG_MAC,
+                                          "Added peer: %pM for VDEV: %d\n",
+                                          info->bssid, arvif->vdev_id);
+
+
+                       if (vif->type == NL80211_IFTYPE_STATION) {
+                               /*
+                                * this is never erased as we it for crypto key
+                                * clearing; this is FW requirement
+                                */
+                               memcpy(arvif->u.sta.bssid, info->bssid,
+                                      ETH_ALEN);
+
+                               ret = ath10k_vdev_start(arvif);
+                               if (!ret)
+                                       ath10k_dbg(ATH10K_DBG_MAC,
+                                                  "VDEV: %d started with BSSID: %pM\n",
+                                                  arvif->vdev_id, info->bssid);
+                       }
+
+                       /*
+                        * Mac80211 does not keep IBSS bssid when leaving IBSS,
+                        * so driver need to store it. It is needed when leaving
+                        * IBSS in order to remove BSSID peer.
+                        */
+                       if (vif->type == NL80211_IFTYPE_ADHOC)
+                               memcpy(arvif->u.ibss.bssid, info->bssid,
+                                      ETH_ALEN);
+               }
+       }
+
+       if (changed & BSS_CHANGED_BEACON_ENABLED)
+               ath10k_control_beaconing(arvif, info);
+
+       if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+               u32 cts_prot;
+               if (info->use_cts_prot)
+                       cts_prot = 1;
+               else
+                       cts_prot = 0;
+
+               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+                                               WMI_VDEV_PARAM_ENABLE_RTSCTS,
+                                               cts_prot);
+               if (ret)
+                       ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
+                                   arvif->vdev_id);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Set CTS prot: %d for VDEV: %d\n",
+                                  cts_prot, arvif->vdev_id);
+       }
+
+       if (changed & BSS_CHANGED_ERP_SLOT) {
+               u32 slottime;
+               if (info->use_short_slot)
+                       slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
+
+               else
+                       slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
+
+               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+                                               WMI_VDEV_PARAM_SLOT_TIME,
+                                               slottime);
+               if (ret)
+                       ath10k_warn("Failed to set erp slot for VDEV: %d\n",
+                                   arvif->vdev_id);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Set slottime: %d for VDEV: %d\n",
+                                  slottime, arvif->vdev_id);
+       }
+
+       if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+               u32 preamble;
+               if (info->use_short_preamble)
+                       preamble = WMI_VDEV_PREAMBLE_SHORT;
+               else
+                       preamble = WMI_VDEV_PREAMBLE_LONG;
+
+               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+                                               WMI_VDEV_PARAM_PREAMBLE,
+                                               preamble);
+               if (ret)
+                       ath10k_warn("Failed to set preamble for VDEV: %d\n",
+                                   arvif->vdev_id);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Set preamble: %d for VDEV: %d\n",
+                                  preamble, arvif->vdev_id);
+       }
+
+       if (changed & BSS_CHANGED_ASSOC) {
+               if (info->assoc)
+                       ath10k_bss_assoc(hw, vif, info);
+       }
+
+       mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_hw_scan(struct ieee80211_hw *hw,
+                         struct ieee80211_vif *vif,
+                         struct cfg80211_scan_request *req)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct wmi_start_scan_arg arg;
+       int ret = 0;
+       int i;
+
+       mutex_lock(&ar->conf_mutex);
+
+       spin_lock_bh(&ar->data_lock);
+       if (ar->scan.in_progress) {
+               spin_unlock_bh(&ar->data_lock);
+               ret = -EBUSY;
+               goto exit;
+       }
+
+       INIT_COMPLETION(ar->scan.started);
+       INIT_COMPLETION(ar->scan.completed);
+       ar->scan.in_progress = true;
+       ar->scan.aborting = false;
+       ar->scan.is_roc = false;
+       ar->scan.vdev_id = arvif->vdev_id;
+       spin_unlock_bh(&ar->data_lock);
+
+       memset(&arg, 0, sizeof(arg));
+       ath10k_wmi_start_scan_init(ar, &arg);
+       arg.vdev_id = arvif->vdev_id;
+       arg.scan_id = ATH10K_SCAN_ID;
+
+       if (!req->no_cck)
+               arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
+
+       if (req->ie_len) {
+               arg.ie_len = req->ie_len;
+               memcpy(arg.ie, req->ie, arg.ie_len);
+       }
+
+       if (req->n_ssids) {
+               arg.n_ssids = req->n_ssids;
+               for (i = 0; i < arg.n_ssids; i++) {
+                       arg.ssids[i].len  = req->ssids[i].ssid_len;
+                       arg.ssids[i].ssid = req->ssids[i].ssid;
+               }
+       }
+
+       if (req->n_channels) {
+               arg.n_channels = req->n_channels;
+               for (i = 0; i < arg.n_channels; i++)
+                       arg.channels[i] = req->channels[i]->center_freq;
+       }
+
+       ret = ath10k_start_scan(ar, &arg);
+       if (ret) {
+               ath10k_warn("could not start hw scan (%d)\n", ret);
+               spin_lock_bh(&ar->data_lock);
+               ar->scan.in_progress = false;
+               spin_unlock_bh(&ar->data_lock);
+       }
+
+exit:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
+                                 struct ieee80211_vif *vif)
+{
+       struct ath10k *ar = hw->priv;
+       int ret;
+
+       mutex_lock(&ar->conf_mutex);
+       ret = ath10k_abort_scan(ar);
+       if (ret) {
+               ath10k_warn("couldn't abort scan (%d). forcefully sending scan completion to mac80211\n",
+                           ret);
+               ieee80211_scan_completed(hw, 1 /* aborted */);
+       }
+       mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+                         struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+                         struct ieee80211_key_conf *key)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_peer *peer;
+       const u8 *peer_addr;
+       bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+                     key->cipher == WLAN_CIPHER_SUITE_WEP104;
+       int ret = 0;
+
+       if (key->keyidx > WMI_MAX_KEY_INDEX)
+               return -ENOSPC;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (sta)
+               peer_addr = sta->addr;
+       else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+               peer_addr = vif->bss_conf.bssid;
+       else
+               peer_addr = vif->addr;
+
+       key->hw_key_idx = key->keyidx;
+
+       /* the peer should not disappear in mid-way (unless FW goes awry) since
+        * we already hold conf_mutex. we just make sure its there now. */
+       spin_lock_bh(&ar->data_lock);
+       peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
+       spin_unlock_bh(&ar->data_lock);
+
+       if (!peer) {
+               if (cmd == SET_KEY) {
+                       ath10k_warn("cannot install key for non-existent peer %pM\n",
+                                   peer_addr);
+                       ret = -EOPNOTSUPP;
+                       goto exit;
+               } else {
+                       /* if the peer doesn't exist there is no key to disable
+                        * anymore */
+                       goto exit;
+               }
+       }
+
+       if (is_wep) {
+               if (cmd == SET_KEY)
+                       arvif->wep_keys[key->keyidx] = key;
+               else
+                       arvif->wep_keys[key->keyidx] = NULL;
+
+               if (cmd == DISABLE_KEY)
+                       ath10k_clear_vdev_key(arvif, key);
+       }
+
+       ret = ath10k_install_key(arvif, key, cmd, peer_addr);
+       if (ret) {
+               ath10k_warn("ath10k_install_key failed (%d)\n", ret);
+               goto exit;
+       }
+
+       spin_lock_bh(&ar->data_lock);
+       peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
+       if (peer && cmd == SET_KEY)
+               peer->keys[key->keyidx] = key;
+       else if (peer && cmd == DISABLE_KEY)
+               peer->keys[key->keyidx] = NULL;
+       else if (peer == NULL)
+               /* impossible unless FW goes crazy */
+               ath10k_warn("peer %pM disappeared!\n", peer_addr);
+       spin_unlock_bh(&ar->data_lock);
+
+exit:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+static int ath10k_sta_state(struct ieee80211_hw *hw,
+                           struct ieee80211_vif *vif,
+                           struct ieee80211_sta *sta,
+                           enum ieee80211_sta_state old_state,
+                           enum ieee80211_sta_state new_state)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       int ret = 0;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (old_state == IEEE80211_STA_NOTEXIST &&
+           new_state == IEEE80211_STA_NONE &&
+           vif->type != NL80211_IFTYPE_STATION) {
+               /*
+                * New station addition.
+                */
+               ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
+               if (ret)
+                       ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
+                                   sta->addr, arvif->vdev_id);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Added peer: %pM for VDEV: %d\n",
+                                  sta->addr, arvif->vdev_id);
+       } else if ((old_state == IEEE80211_STA_NONE &&
+                   new_state == IEEE80211_STA_NOTEXIST)) {
+               /*
+                * Existing station deletion.
+                */
+               ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+               if (ret)
+                       ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n",
+                                   sta->addr, arvif->vdev_id);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Removed peer: %pM for VDEV: %d\n",
+                                  sta->addr, arvif->vdev_id);
+
+               if (vif->type == NL80211_IFTYPE_STATION)
+                       ath10k_bss_disassoc(hw, vif);
+       } else if (old_state == IEEE80211_STA_AUTH &&
+                  new_state == IEEE80211_STA_ASSOC &&
+                  (vif->type == NL80211_IFTYPE_AP ||
+                   vif->type == NL80211_IFTYPE_ADHOC)) {
+               /*
+                * New association.
+                */
+               ret = ath10k_station_assoc(ar, arvif, sta);
+               if (ret)
+                       ath10k_warn("Failed to associate station: %pM\n",
+                                   sta->addr);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Station %pM moved to assoc state\n",
+                                  sta->addr);
+       } else if (old_state == IEEE80211_STA_ASSOC &&
+                  new_state == IEEE80211_STA_AUTH &&
+                  (vif->type == NL80211_IFTYPE_AP ||
+                   vif->type == NL80211_IFTYPE_ADHOC)) {
+               /*
+                * Disassociation.
+                */
+               ret = ath10k_station_disassoc(ar, arvif, sta);
+               if (ret)
+                       ath10k_warn("Failed to disassociate station: %pM\n",
+                                   sta->addr);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Station %pM moved to disassociated state\n",
+                                  sta->addr);
+       }
+
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
+                                u16 ac, bool enable)
+{
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       u32 value = 0;
+       int ret = 0;
+
+       if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+               return 0;
+
+       switch (ac) {
+       case IEEE80211_AC_VO:
+               value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
+                       WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
+               break;
+       case IEEE80211_AC_VI:
+               value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
+                       WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
+               break;
+       case IEEE80211_AC_BE:
+               value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
+                       WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
+               break;
+       case IEEE80211_AC_BK:
+               value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
+                       WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
+               break;
+       }
+
+       if (enable)
+               arvif->u.sta.uapsd |= value;
+       else
+               arvif->u.sta.uapsd &= ~value;
+
+       ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+                                         WMI_STA_PS_PARAM_UAPSD,
+                                         arvif->u.sta.uapsd);
+       if (ret) {
+               ath10k_warn("could not set uapsd params %d\n", ret);
+               goto exit;
+       }
+
+       if (arvif->u.sta.uapsd)
+               value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
+       else
+               value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+
+       ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+                                         WMI_STA_PS_PARAM_RX_WAKE_POLICY,
+                                         value);
+       if (ret)
+               ath10k_warn("could not set rx wake param %d\n", ret);
+
+exit:
+       return ret;
+}
+
+static int ath10k_conf_tx(struct ieee80211_hw *hw,
+                         struct ieee80211_vif *vif, u16 ac,
+                         const struct ieee80211_tx_queue_params *params)
+{
+       struct ath10k *ar = hw->priv;
+       struct wmi_wmm_params_arg *p = NULL;
+       int ret;
+
+       mutex_lock(&ar->conf_mutex);
+
+       switch (ac) {
+       case IEEE80211_AC_VO:
+               p = &ar->wmm_params.ac_vo;
+               break;
+       case IEEE80211_AC_VI:
+               p = &ar->wmm_params.ac_vi;
+               break;
+       case IEEE80211_AC_BE:
+               p = &ar->wmm_params.ac_be;
+               break;
+       case IEEE80211_AC_BK:
+               p = &ar->wmm_params.ac_bk;
+               break;
+       }
+
+       if (WARN_ON(!p)) {
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       p->cwmin = params->cw_min;
+       p->cwmax = params->cw_max;
+       p->aifs = params->aifs;
+
+       /*
+        * The channel time duration programmed in the HW is in absolute
+        * microseconds, while mac80211 gives the txop in units of
+        * 32 microseconds.
+        */
+       p->txop = params->txop * 32;
+
+       /* FIXME: FW accepts wmm params per hw, not per vif */
+       ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params);
+       if (ret) {
+               ath10k_warn("could not set wmm params %d\n", ret);
+               goto exit;
+       }
+
+       ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
+       if (ret)
+               ath10k_warn("could not set sta uapsd %d\n", ret);
+
+exit:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+#define ATH10K_ROC_TIMEOUT_HZ (2*HZ)
+
+static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
+                                   struct ieee80211_vif *vif,
+                                   struct ieee80211_channel *chan,
+                                   int duration,
+                                   enum ieee80211_roc_type type)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct wmi_start_scan_arg arg;
+       int ret;
+
+       mutex_lock(&ar->conf_mutex);
+
+       spin_lock_bh(&ar->data_lock);
+       if (ar->scan.in_progress) {
+               spin_unlock_bh(&ar->data_lock);
+               ret = -EBUSY;
+               goto exit;
+       }
+
+       INIT_COMPLETION(ar->scan.started);
+       INIT_COMPLETION(ar->scan.completed);
+       INIT_COMPLETION(ar->scan.on_channel);
+       ar->scan.in_progress = true;
+       ar->scan.aborting = false;
+       ar->scan.is_roc = true;
+       ar->scan.vdev_id = arvif->vdev_id;
+       ar->scan.roc_freq = chan->center_freq;
+       spin_unlock_bh(&ar->data_lock);
+
+       memset(&arg, 0, sizeof(arg));
+       ath10k_wmi_start_scan_init(ar, &arg);
+       arg.vdev_id = arvif->vdev_id;
+       arg.scan_id = ATH10K_SCAN_ID;
+       arg.n_channels = 1;
+       arg.channels[0] = chan->center_freq;
+       arg.dwell_time_active = duration;
+       arg.dwell_time_passive = duration;
+       arg.max_scan_time = 2 * duration;
+       arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+       arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+
+       ret = ath10k_start_scan(ar, &arg);
+       if (ret) {
+               ath10k_warn("could not start roc scan (%d)\n", ret);
+               spin_lock_bh(&ar->data_lock);
+               ar->scan.in_progress = false;
+               spin_unlock_bh(&ar->data_lock);
+               goto exit;
+       }
+
+       ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
+       if (ret == 0) {
+               ath10k_warn("could not switch to channel for roc scan\n");
+               ath10k_abort_scan(ar);
+               ret = -ETIMEDOUT;
+               goto exit;
+       }
+
+       ret = 0;
+exit:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+       struct ath10k *ar = hw->priv;
+
+       mutex_lock(&ar->conf_mutex);
+       ath10k_abort_scan(ar);
+       mutex_unlock(&ar->conf_mutex);
+
+       return 0;
+}
+
+/*
+ * Both RTS and Fragmentation threshold are interface-specific
+ * in ath10k, but device-specific in mac80211.
+ */
+static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct ath10k_generic_iter *ar_iter = data;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       u32 rts = ar_iter->ar->hw->wiphy->rts_threshold;
+
+       rts = min_t(u32, rts, ATH10K_RTS_MAX);
+
+       ar_iter->ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id,
+                                                WMI_VDEV_PARAM_RTS_THRESHOLD,
+                                                rts);
+       if (ar_iter->ret)
+               ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
+                           arvif->vdev_id);
+       else
+               ath10k_dbg(ATH10K_DBG_MAC,
+                          "Set RTS threshold: %d for VDEV: %d\n",
+                          rts, arvif->vdev_id);
+}
+
+static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+       struct ath10k_generic_iter ar_iter;
+       struct ath10k *ar = hw->priv;
+
+       memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
+       ar_iter.ar = ar;
+
+       mutex_lock(&ar->conf_mutex);
+       ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+                                           ath10k_set_rts_iter, &ar_iter);
+       mutex_unlock(&ar->conf_mutex);
+
+       return ar_iter.ret;
+}
+
+static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct ath10k_generic_iter *ar_iter = data;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       u32 frag = ar_iter->ar->hw->wiphy->frag_threshold;
+       int ret;
+
+       frag = clamp_t(u32, frag,
+                      ATH10K_FRAGMT_THRESHOLD_MIN,
+                      ATH10K_FRAGMT_THRESHOLD_MAX);
+
+       ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id,
+                                       WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+                                       frag);
+
+       ar_iter->ret = ret;
+       if (ar_iter->ret)
+               ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
+                           arvif->vdev_id);
+       else
+               ath10k_dbg(ATH10K_DBG_MAC,
+                          "Set frag threshold: %d for VDEV: %d\n",
+                          frag, arvif->vdev_id);
+}
+
+static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+{
+       struct ath10k_generic_iter ar_iter;
+       struct ath10k *ar = hw->priv;
+
+       memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
+       ar_iter.ar = ar;
+
+       mutex_lock(&ar->conf_mutex);
+       ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+                                           ath10k_set_frag_iter, &ar_iter);
+       mutex_unlock(&ar->conf_mutex);
+
+       return ar_iter.ret;
+}
+
+static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+{
+       struct ath10k *ar = hw->priv;
+       int ret;
+
+       /* mac80211 doesn't care if we really xmit queued frames or not
+        * we'll collect those frames either way if we stop/delete vdevs */
+       if (drop)
+               return;
+
+       ret = wait_event_timeout(ar->htt->empty_tx_wq, ({
+                       bool empty;
+                       spin_lock_bh(&ar->htt->tx_lock);
+                       empty = bitmap_empty(ar->htt->used_msdu_ids,
+                                            ar->htt->max_num_pending_tx);
+                       spin_unlock_bh(&ar->htt->tx_lock);
+                       (empty);
+               }), ATH10K_FLUSH_TIMEOUT_HZ);
+       if (ret <= 0)
+               ath10k_warn("tx not flushed\n");
+}
+
+/* TODO: Implement this function properly
+ * For now it is needed to reply to Probe Requests in IBSS mode.
+ * Propably we need this information from FW.
+ */
+static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
+{
+       return 1;
+}
+
+static const struct ieee80211_ops ath10k_ops = {
+       .tx                             = ath10k_tx,
+       .start                          = ath10k_start,
+       .stop                           = ath10k_stop,
+       .config                         = ath10k_config,
+       .add_interface                  = ath10k_add_interface,
+       .remove_interface               = ath10k_remove_interface,
+       .configure_filter               = ath10k_configure_filter,
+       .bss_info_changed               = ath10k_bss_info_changed,
+       .hw_scan                        = ath10k_hw_scan,
+       .cancel_hw_scan                 = ath10k_cancel_hw_scan,
+       .set_key                        = ath10k_set_key,
+       .sta_state                      = ath10k_sta_state,
+       .conf_tx                        = ath10k_conf_tx,
+       .remain_on_channel              = ath10k_remain_on_channel,
+       .cancel_remain_on_channel       = ath10k_cancel_remain_on_channel,
+       .set_rts_threshold              = ath10k_set_rts_threshold,
+       .set_frag_threshold             = ath10k_set_frag_threshold,
+       .flush                          = ath10k_flush,
+       .tx_last_beacon                 = ath10k_tx_last_beacon,
+};
+
+#define RATETAB_ENT(_rate, _rateid, _flags) { \
+       .bitrate                = (_rate), \
+       .flags                  = (_flags), \
+       .hw_value               = (_rateid), \
+}
+
+#define CHAN2G(_channel, _freq, _flags) { \
+       .band                   = IEEE80211_BAND_2GHZ, \
+       .hw_value               = (_channel), \
+       .center_freq            = (_freq), \
+       .flags                  = (_flags), \
+       .max_antenna_gain       = 0, \
+       .max_power              = 30, \
+}
+
+#define CHAN5G(_channel, _freq, _flags) { \
+       .band                   = IEEE80211_BAND_5GHZ, \
+       .hw_value               = (_channel), \
+       .center_freq            = (_freq), \
+       .flags                  = (_flags), \
+       .max_antenna_gain       = 0, \
+       .max_power              = 30, \
+}
+
+static const struct ieee80211_channel ath10k_2ghz_channels[] = {
+       CHAN2G(1, 2412, 0),
+       CHAN2G(2, 2417, 0),
+       CHAN2G(3, 2422, 0),
+       CHAN2G(4, 2427, 0),
+       CHAN2G(5, 2432, 0),
+       CHAN2G(6, 2437, 0),
+       CHAN2G(7, 2442, 0),
+       CHAN2G(8, 2447, 0),
+       CHAN2G(9, 2452, 0),
+       CHAN2G(10, 2457, 0),
+       CHAN2G(11, 2462, 0),
+       CHAN2G(12, 2467, 0),
+       CHAN2G(13, 2472, 0),
+       CHAN2G(14, 2484, 0),
+};
+
+static const struct ieee80211_channel ath10k_5ghz_channels[] = {
+       CHAN5G(36, 5180, 14),
+       CHAN5G(40, 5200, 15),
+       CHAN5G(44, 5220, 16),
+       CHAN5G(48, 5240, 17),
+       CHAN5G(52, 5260, 18),
+       CHAN5G(56, 5280, 19),
+       CHAN5G(60, 5300, 20),
+       CHAN5G(64, 5320, 21),
+       CHAN5G(100, 5500, 22),
+       CHAN5G(104, 5520, 23),
+       CHAN5G(108, 5540, 24),
+       CHAN5G(112, 5560, 25),
+       CHAN5G(116, 5580, 26),
+       CHAN5G(120, 5600, 27),
+       CHAN5G(124, 5620, 28),
+       CHAN5G(128, 5640, 29),
+       CHAN5G(132, 5660, 30),
+       CHAN5G(136, 5680, 31),
+       CHAN5G(140, 5700, 32),
+       CHAN5G(149, 5745, 33),
+       CHAN5G(153, 5765, 34),
+       CHAN5G(157, 5785, 35),
+       CHAN5G(161, 5805, 36),
+       CHAN5G(165, 5825, 37),
+};
+
+static struct ieee80211_rate ath10k_rates[] = {
+       /* CCK */
+       RATETAB_ENT(10,  0x82, 0),
+       RATETAB_ENT(20,  0x84, 0),
+       RATETAB_ENT(55,  0x8b, 0),
+       RATETAB_ENT(110, 0x96, 0),
+       /* OFDM */
+       RATETAB_ENT(60,  0x0c, 0),
+       RATETAB_ENT(90,  0x12, 0),
+       RATETAB_ENT(120, 0x18, 0),
+       RATETAB_ENT(180, 0x24, 0),
+       RATETAB_ENT(240, 0x30, 0),
+       RATETAB_ENT(360, 0x48, 0),
+       RATETAB_ENT(480, 0x60, 0),
+       RATETAB_ENT(540, 0x6c, 0),
+};
+
+#define ath10k_a_rates (ath10k_rates + 4)
+#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - 4)
+#define ath10k_g_rates (ath10k_rates + 0)
+#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
+
+struct ath10k *ath10k_mac_create(void)
+{
+       struct ieee80211_hw *hw;
+       struct ath10k *ar;
+
+       hw = ieee80211_alloc_hw(sizeof(struct ath10k), &ath10k_ops);
+       if (!hw)
+               return NULL;
+
+       ar = hw->priv;
+       ar->hw = hw;
+
+       return ar;
+}
+
+void ath10k_mac_destroy(struct ath10k *ar)
+{
+       ieee80211_free_hw(ar->hw);
+}
+
+static const struct ieee80211_iface_limit ath10k_if_limits[] = {
+       {
+       .max    = 8,
+       .types  = BIT(NL80211_IFTYPE_STATION)
+               | BIT(NL80211_IFTYPE_P2P_CLIENT)
+               | BIT(NL80211_IFTYPE_P2P_GO)
+               | BIT(NL80211_IFTYPE_AP)
+       }
+};
+
+static const struct ieee80211_iface_combination ath10k_if_comb = {
+       .limits = ath10k_if_limits,
+       .n_limits = ARRAY_SIZE(ath10k_if_limits),
+       .max_interfaces = 8,
+       .num_different_channels = 1,
+       .beacon_int_infra_match = true,
+};
+
+static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
+{
+       struct ieee80211_sta_vht_cap vht_cap = {0};
+       u16 mcs_map;
+
+       vht_cap.vht_supported = 1;
+       vht_cap.cap = ar->vht_cap_info;
+
+       /* FIXME: check dynamically how many streams board supports */
+       mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
+               IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
+               IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
+               IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
+               IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
+               IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
+               IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
+               IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
+
+       vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+       vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+
+       return vht_cap;
+}
+
+static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
+{
+       int i;
+       struct ieee80211_sta_ht_cap ht_cap = {0};
+
+       if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
+               return ht_cap;
+
+       ht_cap.ht_supported = 1;
+       ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+       ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+       ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+       ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+       ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+       if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
+               ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+
+       if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
+               ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+       if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
+               u32 smps;
+
+               smps   = WLAN_HT_CAP_SM_PS_DYNAMIC;
+               smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+               ht_cap.cap |= smps;
+       }
+
+       if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC)
+               ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+
+       if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
+               u32 stbc;
+
+               stbc   = ar->ht_cap_info;
+               stbc  &= WMI_HT_CAP_RX_STBC;
+               stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
+               stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
+               stbc  &= IEEE80211_HT_CAP_RX_STBC;
+
+               ht_cap.cap |= stbc;
+       }
+
+       if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
+               ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+       if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
+               ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
+
+       /* max AMSDU is implicitly taken from vht_cap_info */
+       if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
+               ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+       for (i = 0; i < WMI_MAX_SPATIAL_STREAM; i++)
+               ht_cap.mcs.rx_mask[i] = 0xFF;
+
+       ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+
+       return ht_cap;
+}
+
+
+static void ath10k_get_arvif_iter(void *data, u8 *mac,
+                                 struct ieee80211_vif *vif)
+{
+       struct ath10k_vif_iter *arvif_iter = data;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+       if (arvif->vdev_id == arvif_iter->vdev_id)
+               arvif_iter->arvif = arvif;
+}
+
+struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
+{
+       struct ath10k_vif_iter arvif_iter;
+       u32 flags;
+
+       memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
+       arvif_iter.vdev_id = vdev_id;
+
+       flags = IEEE80211_IFACE_ITER_RESUME_ALL;
+       ieee80211_iterate_active_interfaces_atomic(ar->hw,
+                                                  flags,
+                                                  ath10k_get_arvif_iter,
+                                                  &arvif_iter);
+       if (!arvif_iter.arvif) {
+               ath10k_warn("No VIF found for VDEV: %d\n", vdev_id);
+               return NULL;
+       }
+
+       return arvif_iter.arvif;
+}
+
+int ath10k_mac_register(struct ath10k *ar)
+{
+       struct ieee80211_supported_band *band;
+       struct ieee80211_sta_vht_cap vht_cap;
+       struct ieee80211_sta_ht_cap ht_cap;
+       void *channels;
+       int ret;
+
+       SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+
+       SET_IEEE80211_DEV(ar->hw, ar->dev);
+
+       ht_cap = ath10k_get_ht_cap(ar);
+       vht_cap = ath10k_create_vht_cap(ar);
+
+       if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
+               channels = kmemdup(ath10k_2ghz_channels,
+                                  sizeof(ath10k_2ghz_channels),
+                                  GFP_KERNEL);
+               if (!channels)
+                       return -ENOMEM;
+
+               band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+               band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
+               band->channels = channels;
+               band->n_bitrates = ath10k_g_rates_size;
+               band->bitrates = ath10k_g_rates;
+               band->ht_cap = ht_cap;
+
+               /* vht is not supported in 2.4 GHz */
+
+               ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+       }
+
+       if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
+               channels = kmemdup(ath10k_5ghz_channels,
+                                  sizeof(ath10k_5ghz_channels),
+                                  GFP_KERNEL);
+               if (!channels) {
+                       if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
+                               band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+                               kfree(band->channels);
+                       }
+                       return -ENOMEM;
+               }
+
+               band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
+               band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
+               band->channels = channels;
+               band->n_bitrates = ath10k_a_rates_size;
+               band->bitrates = ath10k_a_rates;
+               band->ht_cap = ht_cap;
+               band->vht_cap = vht_cap;
+               ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+       }
+
+       ar->hw->wiphy->interface_modes =
+               BIT(NL80211_IFTYPE_STATION) |
+               BIT(NL80211_IFTYPE_ADHOC) |
+               BIT(NL80211_IFTYPE_AP) |
+               BIT(NL80211_IFTYPE_P2P_CLIENT) |
+               BIT(NL80211_IFTYPE_P2P_GO);
+
+       ar->hw->flags = IEEE80211_HW_SIGNAL_DBM |
+                       IEEE80211_HW_SUPPORTS_PS |
+                       IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
+                       IEEE80211_HW_SUPPORTS_UAPSD |
+                       IEEE80211_HW_MFP_CAPABLE |
+                       IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+                       IEEE80211_HW_HAS_RATE_CONTROL |
+                       IEEE80211_HW_SUPPORTS_STATIC_SMPS |
+                       IEEE80211_HW_WANT_MONITOR_VIF |
+                       IEEE80211_HW_AP_LINK_PS;
+
+       if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
+               ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
+
+       if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
+               ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+               ar->hw->flags |= IEEE80211_HW_TX_AMPDU_SETUP_IN_HW;
+       }
+
+       ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
+       ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+
+       ar->hw->vif_data_size = sizeof(struct ath10k_vif);
+
+       ar->hw->channel_change_time = 5000;
+       ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
+
+       ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+       ar->hw->wiphy->max_remain_on_channel_duration = 5000;
+
+       ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+       /*
+        * on LL hardware queues are managed entirely by the FW
+        * so we only advertise to mac we can do the queues thing
+        */
+       ar->hw->queues = 4;
+
+       ar->hw->wiphy->iface_combinations = &ath10k_if_comb;
+       ar->hw->wiphy->n_iface_combinations = 1;
+
+       ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
+                           ath10k_reg_notifier);
+       if (ret) {
+               ath10k_err("Regulatory initialization failed\n");
+               return ret;
+       }
+
+       ret = ieee80211_register_hw(ar->hw);
+       if (ret) {
+               ath10k_err("ieee80211 registration failed: %d\n", ret);
+               return ret;
+       }
+
+       if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
+               ret = regulatory_hint(ar->hw->wiphy,
+                                     ar->ath_common.regulatory.alpha2);
+               if (ret)
+                       goto exit;
+       }
+
+       return 0;
+exit:
+       ieee80211_unregister_hw(ar->hw);
+       return ret;
+}
+
+void ath10k_mac_unregister(struct ath10k *ar)
+{
+       ieee80211_unregister_hw(ar->hw);
+
+       kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
+       kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+
+       SET_IEEE80211_DEV(ar->hw, NULL);
+}
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
new file mode 100644 (file)
index 0000000..27fc92e
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MAC_H_
+#define _MAC_H_
+
+#include <net/mac80211.h>
+#include "core.h"
+
+struct ath10k_generic_iter {
+       struct ath10k *ar;
+       int ret;
+};
+
+struct ath10k *ath10k_mac_create(void);
+void ath10k_mac_destroy(struct ath10k *ar);
+int ath10k_mac_register(struct ath10k *ar);
+void ath10k_mac_unregister(struct ath10k *ar);
+struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
+void ath10k_reset_scan(unsigned long ptr);
+void ath10k_offchan_tx_purge(struct ath10k *ar);
+void ath10k_offchan_tx_work(struct work_struct *work);
+
+static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
+{
+       return (struct ath10k_vif *)vif->drv_priv;
+}
+
+static inline void ath10k_tx_h_seq_no(struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_vif *vif = info->control.vif;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+       if (info->flags  & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+               if (arvif->tx_seq_no == 0)
+                       arvif->tx_seq_no = 0x1000;
+
+               if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+                       arvif->tx_seq_no += 0x10;
+               hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+               hdr->seq_ctrl |= cpu_to_le16(arvif->tx_seq_no);
+       }
+}
+
+#endif /* _MAC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
new file mode 100644 (file)
index 0000000..8e4e832
--- /dev/null
@@ -0,0 +1,2506 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+#include "core.h"
+#include "debug.h"
+
+#include "targaddrs.h"
+#include "bmi.h"
+
+#include "hif.h"
+#include "htc.h"
+
+#include "ce.h"
+#include "pci.h"
+
+unsigned int ath10k_target_ps;
+module_param(ath10k_target_ps, uint, 0644);
+MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
+
+#define QCA988X_1_0_DEVICE_ID  (0xabcd)
+#define QCA988X_2_0_DEVICE_ID  (0x003c)
+
+static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
+       { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
+       { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
+       {0}
+};
+
+static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
+                                      u32 *data);
+
+static void ath10k_pci_process_ce(struct ath10k *ar);
+static int ath10k_pci_post_rx(struct ath10k *ar);
+static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+                                            int num);
+static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
+static void ath10k_pci_stop_ce(struct ath10k *ar);
+
+static const struct ce_attr host_ce_config_wlan[] = {
+       /* host->target HTC control and raw streams */
+       { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
+       /* could be moved to share CE3 */
+       /* target->host HTT + HTC control */
+       { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
+       /* target->host WMI */
+       { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
+       /* host->target WMI */
+       { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
+       /* host->target HTT */
+       { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
+                   CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
+       /* unused */
+       { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
+       /* Target autonomous hif_memcpy */
+       { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
+       /* ce_diag, the Diagnostic Window */
+       { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
+};
+
+/* Target firmware's Copy Engine configuration. */
+static const struct ce_pipe_config target_ce_config_wlan[] = {
+       /* host->target HTC control and raw streams */
+       { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
+       /* target->host HTT + HTC control */
+       { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
+       /* target->host WMI */
+       { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
+       /* host->target WMI */
+       { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
+       /* host->target HTT */
+       { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
+       /* NB: 50% of src nentries, since tx has 2 frags */
+       /* unused */
+       { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
+       /* Reserved for target autonomous hif_memcpy */
+       { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
+       /* CE7 used only by Host */
+};
+
+/*
+ * Diagnostic read/write access is provided for startup/config/debug usage.
+ * Caller must guarantee proper alignment, when applicable, and single user
+ * at any moment.
+ */
+static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
+                                   int nbytes)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret = 0;
+       u32 buf;
+       unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
+       unsigned int id;
+       unsigned int flags;
+       struct ce_state *ce_diag;
+       /* Host buffer address in CE space */
+       u32 ce_data;
+       dma_addr_t ce_data_base = 0;
+       void *data_buf = NULL;
+       int i;
+
+       /*
+        * This code cannot handle reads to non-memory space. Redirect to the
+        * register read fn but preserve the multi word read capability of
+        * this fn
+        */
+       if (address < DRAM_BASE_ADDRESS) {
+               if (!IS_ALIGNED(address, 4) ||
+                   !IS_ALIGNED((unsigned long)data, 4))
+                       return -EIO;
+
+               while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
+                                          ar, address, (u32 *)data)) == 0)) {
+                       nbytes -= sizeof(u32);
+                       address += sizeof(u32);
+                       data += sizeof(u32);
+               }
+               return ret;
+       }
+
+       ce_diag = ar_pci->ce_diag;
+
+       /*
+        * Allocate a temporary bounce buffer to hold caller's data
+        * to be DMA'ed from Target. This guarantees
+        *   1) 4-byte alignment
+        *   2) Buffer in DMA-able space
+        */
+       orig_nbytes = nbytes;
+       data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
+                                                        orig_nbytes,
+                                                        &ce_data_base);
+
+       if (!data_buf) {
+               ret = -ENOMEM;
+               goto done;
+       }
+       memset(data_buf, 0, orig_nbytes);
+
+       remaining_bytes = orig_nbytes;
+       ce_data = ce_data_base;
+       while (remaining_bytes) {
+               nbytes = min_t(unsigned int, remaining_bytes,
+                              DIAG_TRANSFER_LIMIT);
+
+               ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
+               if (ret != 0)
+                       goto done;
+
+               /* Request CE to send from Target(!) address to Host buffer */
+               /*
+                * The address supplied by the caller is in the
+                * Target CPU virtual address space.
+                *
+                * In order to use this address with the diagnostic CE,
+                * convert it from Target CPU virtual address space
+                * to CE address space
+                */
+               ath10k_pci_wake(ar);
+               address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
+                                                    address);
+               ath10k_pci_sleep(ar);
+
+               ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
+                                0);
+               if (ret)
+                       goto done;
+
+               i = 0;
+               while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
+                                                    &completed_nbytes,
+                                                    &id) != 0) {
+                       mdelay(1);
+                       if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+                               ret = -EBUSY;
+                               goto done;
+                       }
+               }
+
+               if (nbytes != completed_nbytes) {
+                       ret = -EIO;
+                       goto done;
+               }
+
+               if (buf != (u32) address) {
+                       ret = -EIO;
+                       goto done;
+               }
+
+               i = 0;
+               while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
+                                                    &completed_nbytes,
+                                                    &id, &flags) != 0) {
+                       mdelay(1);
+
+                       if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+                               ret = -EBUSY;
+                               goto done;
+                       }
+               }
+
+               if (nbytes != completed_nbytes) {
+                       ret = -EIO;
+                       goto done;
+               }
+
+               if (buf != ce_data) {
+                       ret = -EIO;
+                       goto done;
+               }
+
+               remaining_bytes -= nbytes;
+               address += nbytes;
+               ce_data += nbytes;
+       }
+
+done:
+       if (ret == 0) {
+               /* Copy data from allocated DMA buf to caller's buf */
+               WARN_ON_ONCE(orig_nbytes & 3);
+               for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
+                       ((u32 *)data)[i] =
+                               __le32_to_cpu(((__le32 *)data_buf)[i]);
+               }
+       } else
+               ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
+                          __func__, address);
+
+       if (data_buf)
+               pci_free_consistent(ar_pci->pdev, orig_nbytes,
+                                   data_buf, ce_data_base);
+
+       return ret;
+}
+
+/* Read 4-byte aligned data from Target memory or register */
+static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
+                                      u32 *data)
+{
+       /* Assume range doesn't cross this boundary */
+       if (address >= DRAM_BASE_ADDRESS)
+               return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
+
+       ath10k_pci_wake(ar);
+       *data = ath10k_pci_read32(ar, address);
+       ath10k_pci_sleep(ar);
+       return 0;
+}
+
+static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
+                                    const void *data, int nbytes)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret = 0;
+       u32 buf;
+       unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
+       unsigned int id;
+       unsigned int flags;
+       struct ce_state *ce_diag;
+       void *data_buf = NULL;
+       u32 ce_data;    /* Host buffer address in CE space */
+       dma_addr_t ce_data_base = 0;
+       int i;
+
+       ce_diag = ar_pci->ce_diag;
+
+       /*
+        * Allocate a temporary bounce buffer to hold caller's data
+        * to be DMA'ed to Target. This guarantees
+        *   1) 4-byte alignment
+        *   2) Buffer in DMA-able space
+        */
+       orig_nbytes = nbytes;
+       data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
+                                                        orig_nbytes,
+                                                        &ce_data_base);
+       if (!data_buf) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       /* Copy caller's data to allocated DMA buf */
+       WARN_ON_ONCE(orig_nbytes & 3);
+       for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
+               ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
+
+       /*
+        * The address supplied by the caller is in the
+        * Target CPU virtual address space.
+        *
+        * In order to use this address with the diagnostic CE,
+        * convert it from
+        *    Target CPU virtual address space
+        * to
+        *    CE address space
+        */
+       ath10k_pci_wake(ar);
+       address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
+       ath10k_pci_sleep(ar);
+
+       remaining_bytes = orig_nbytes;
+       ce_data = ce_data_base;
+       while (remaining_bytes) {
+               /* FIXME: check cast */
+               nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
+
+               /* Set up to receive directly into Target(!) address */
+               ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
+               if (ret != 0)
+                       goto done;
+
+               /*
+                * Request CE to send caller-supplied data that
+                * was copied to bounce buffer to Target(!) address.
+                */
+               ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
+                                    nbytes, 0, 0);
+               if (ret != 0)
+                       goto done;
+
+               i = 0;
+               while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
+                                                    &completed_nbytes,
+                                                    &id) != 0) {
+                       mdelay(1);
+
+                       if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+                               ret = -EBUSY;
+                               goto done;
+                       }
+               }
+
+               if (nbytes != completed_nbytes) {
+                       ret = -EIO;
+                       goto done;
+               }
+
+               if (buf != ce_data) {
+                       ret = -EIO;
+                       goto done;
+               }
+
+               i = 0;
+               while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
+                                                    &completed_nbytes,
+                                                    &id, &flags) != 0) {
+                       mdelay(1);
+
+                       if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+                               ret = -EBUSY;
+                               goto done;
+                       }
+               }
+
+               if (nbytes != completed_nbytes) {
+                       ret = -EIO;
+                       goto done;
+               }
+
+               if (buf != address) {
+                       ret = -EIO;
+                       goto done;
+               }
+
+               remaining_bytes -= nbytes;
+               address += nbytes;
+               ce_data += nbytes;
+       }
+
+done:
+       if (data_buf) {
+               pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
+                                   ce_data_base);
+       }
+
+       if (ret != 0)
+               ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
+                          address);
+
+       return ret;
+}
+
+/* Write 4B data to Target memory or register */
+static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
+                                       u32 data)
+{
+       /* Assume range doesn't cross this boundary */
+       if (address >= DRAM_BASE_ADDRESS)
+               return ath10k_pci_diag_write_mem(ar, address, &data,
+                                                sizeof(u32));
+
+       ath10k_pci_wake(ar);
+       ath10k_pci_write32(ar, address, data);
+       ath10k_pci_sleep(ar);
+       return 0;
+}
+
+static bool ath10k_pci_target_is_awake(struct ath10k *ar)
+{
+       void __iomem *mem = ath10k_pci_priv(ar)->mem;
+       u32 val;
+       val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
+                      RTC_STATE_ADDRESS);
+       return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
+}
+
+static void ath10k_pci_wait(struct ath10k *ar)
+{
+       int n = 100;
+
+       while (n-- && !ath10k_pci_target_is_awake(ar))
+               msleep(10);
+
+       if (n < 0)
+               ath10k_warn("Unable to wakeup target\n");
+}
+
+void ath10k_do_pci_wake(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       void __iomem *pci_addr = ar_pci->mem;
+       int tot_delay = 0;
+       int curr_delay = 5;
+
+       if (atomic_read(&ar_pci->keep_awake_count) == 0) {
+               /* Force AWAKE */
+               iowrite32(PCIE_SOC_WAKE_V_MASK,
+                         pci_addr + PCIE_LOCAL_BASE_ADDRESS +
+                         PCIE_SOC_WAKE_ADDRESS);
+       }
+       atomic_inc(&ar_pci->keep_awake_count);
+
+       if (ar_pci->verified_awake)
+               return;
+
+       for (;;) {
+               if (ath10k_pci_target_is_awake(ar)) {
+                       ar_pci->verified_awake = true;
+                       break;
+               }
+
+               if (tot_delay > PCIE_WAKE_TIMEOUT) {
+                       ath10k_warn("target takes too long to wake up (awake count %d)\n",
+                                   atomic_read(&ar_pci->keep_awake_count));
+                       break;
+               }
+
+               udelay(curr_delay);
+               tot_delay += curr_delay;
+
+               if (curr_delay < 50)
+                       curr_delay += 5;
+       }
+}
+
+void ath10k_do_pci_sleep(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       void __iomem *pci_addr = ar_pci->mem;
+
+       if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
+               /* Allow sleep */
+               ar_pci->verified_awake = false;
+               iowrite32(PCIE_SOC_WAKE_RESET,
+                         pci_addr + PCIE_LOCAL_BASE_ADDRESS +
+                         PCIE_SOC_WAKE_ADDRESS);
+       }
+}
+
+/*
+ * FIXME: Handle OOM properly.
+ */
+static inline
+struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
+{
+       struct ath10k_pci_compl *compl = NULL;
+
+       spin_lock_bh(&pipe_info->pipe_lock);
+       if (list_empty(&pipe_info->compl_free)) {
+               ath10k_warn("Completion buffers are full\n");
+               goto exit;
+       }
+       compl = list_first_entry(&pipe_info->compl_free,
+                                struct ath10k_pci_compl, list);
+       list_del(&compl->list);
+exit:
+       spin_unlock_bh(&pipe_info->pipe_lock);
+       return compl;
+}
+
+/* Called by lower (CE) layer when a send to Target completes. */
+static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
+                                   void *transfer_context,
+                                   u32 ce_data,
+                                   unsigned int nbytes,
+                                   unsigned int transfer_id)
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct hif_ce_pipe_info *pipe_info =  &ar_pci->pipe_info[ce_state->id];
+       struct ath10k_pci_compl *compl;
+       bool process = false;
+
+       do {
+               /*
+                * For the send completion of an item in sendlist, just
+                * increment num_sends_allowed. The upper layer callback will
+                * be triggered when last fragment is done with send.
+                */
+               if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
+                       spin_lock_bh(&pipe_info->pipe_lock);
+                       pipe_info->num_sends_allowed++;
+                       spin_unlock_bh(&pipe_info->pipe_lock);
+                       continue;
+               }
+
+               compl = get_free_compl(pipe_info);
+               if (!compl)
+                       break;
+
+               compl->send_or_recv = HIF_CE_COMPLETE_SEND;
+               compl->ce_state = ce_state;
+               compl->pipe_info = pipe_info;
+               compl->transfer_context = transfer_context;
+               compl->nbytes = nbytes;
+               compl->transfer_id = transfer_id;
+               compl->flags = 0;
+
+               /*
+                * Add the completion to the processing queue.
+                */
+               spin_lock_bh(&ar_pci->compl_lock);
+               list_add_tail(&compl->list, &ar_pci->compl_process);
+               spin_unlock_bh(&ar_pci->compl_lock);
+
+               process = true;
+       } while (ath10k_ce_completed_send_next(ce_state,
+                                                          &transfer_context,
+                                                          &ce_data, &nbytes,
+                                                          &transfer_id) == 0);
+
+       /*
+        * If only some of the items within a sendlist have completed,
+        * don't invoke completion processing until the entire sendlist
+        * has been sent.
+        */
+       if (!process)
+               return;
+
+       ath10k_pci_process_ce(ar);
+}
+
+/* Called by lower (CE) layer when data is received from the Target. */
+static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
+                                   void *transfer_context, u32 ce_data,
+                                   unsigned int nbytes,
+                                   unsigned int transfer_id,
+                                   unsigned int flags)
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct hif_ce_pipe_info *pipe_info =  &ar_pci->pipe_info[ce_state->id];
+       struct ath10k_pci_compl *compl;
+       struct sk_buff *skb;
+
+       do {
+               compl = get_free_compl(pipe_info);
+               if (!compl)
+                       break;
+
+               compl->send_or_recv = HIF_CE_COMPLETE_RECV;
+               compl->ce_state = ce_state;
+               compl->pipe_info = pipe_info;
+               compl->transfer_context = transfer_context;
+               compl->nbytes = nbytes;
+               compl->transfer_id = transfer_id;
+               compl->flags = flags;
+
+               skb = transfer_context;
+               dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
+                                skb->len + skb_tailroom(skb),
+                                DMA_FROM_DEVICE);
+               /*
+                * Add the completion to the processing queue.
+                */
+               spin_lock_bh(&ar_pci->compl_lock);
+               list_add_tail(&compl->list, &ar_pci->compl_process);
+               spin_unlock_bh(&ar_pci->compl_lock);
+
+       } while (ath10k_ce_completed_recv_next(ce_state,
+                                                          &transfer_context,
+                                                          &ce_data, &nbytes,
+                                                          &transfer_id,
+                                                          &flags) == 0);
+
+       ath10k_pci_process_ce(ar);
+}
+
+/* Send the first nbytes bytes of the buffer */
+static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
+                                   unsigned int transfer_id,
+                                   unsigned int bytes, struct sk_buff *nbuf)
+{
+       struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
+       struct ce_state *ce_hdl = pipe_info->ce_hdl;
+       struct ce_sendlist sendlist;
+       unsigned int len;
+       u32 flags = 0;
+       int ret;
+
+       memset(&sendlist, 0, sizeof(struct ce_sendlist));
+
+       len = min(bytes, nbuf->len);
+       bytes -= len;
+
+       if (len & 3)
+               ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
+
+       ath10k_dbg(ATH10K_DBG_PCI,
+                  "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
+                  nbuf->data, (unsigned long long) skb_cb->paddr,
+                  nbuf->len, len);
+       ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
+                       "ath10k tx: data: ",
+                       nbuf->data, nbuf->len);
+
+       ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
+
+       /* Make sure we have resources to handle this request */
+       spin_lock_bh(&pipe_info->pipe_lock);
+       if (!pipe_info->num_sends_allowed) {
+               ath10k_warn("Pipe: %d is full\n", pipe_id);
+               spin_unlock_bh(&pipe_info->pipe_lock);
+               return -ENOSR;
+       }
+       pipe_info->num_sends_allowed--;
+       spin_unlock_bh(&pipe_info->pipe_lock);
+
+       ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
+       if (ret)
+               ath10k_warn("CE send failed: %p\n", nbuf);
+
+       return ret;
+}
+
+static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
+       int ret;
+
+       spin_lock_bh(&pipe_info->pipe_lock);
+       ret = pipe_info->num_sends_allowed;
+       spin_unlock_bh(&pipe_info->pipe_lock);
+
+       return ret;
+}
+
+static void ath10k_pci_hif_dump_area(struct ath10k *ar)
+{
+       u32 reg_dump_area = 0;
+       u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
+       u32 host_addr;
+       int ret;
+       u32 i;
+
+       ath10k_err("firmware crashed!\n");
+       ath10k_err("hardware name %s version 0x%x\n",
+                  ar->hw_params.name, ar->target_version);
+       ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
+                  ar->fw_version_minor, ar->fw_version_release,
+                  ar->fw_version_build);
+
+       host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
+       if (ath10k_pci_diag_read_mem(ar, host_addr,
+                                    &reg_dump_area, sizeof(u32)) != 0) {
+               ath10k_warn("could not read hi_failure_state\n");
+               return;
+       }
+
+       ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
+
+       ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
+                                      &reg_dump_values[0],
+                                      REG_DUMP_COUNT_QCA988X * sizeof(u32));
+       if (ret != 0) {
+               ath10k_err("could not dump FW Dump Area\n");
+               return;
+       }
+
+       BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
+
+       ath10k_err("target Register Dump\n");
+       for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
+               ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
+                          i,
+                          reg_dump_values[i],
+                          reg_dump_values[i + 1],
+                          reg_dump_values[i + 2],
+                          reg_dump_values[i + 3]);
+}
+
+static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+                                              int force)
+{
+       if (!force) {
+               int resources;
+               /*
+                * Decide whether to actually poll for completions, or just
+                * wait for a later chance.
+                * If there seem to be plenty of resources left, then just wait
+                * since checking involves reading a CE register, which is a
+                * relatively expensive operation.
+                */
+               resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
+
+               /*
+                * If at least 50% of the total resources are still available,
+                * don't bother checking again yet.
+                */
+               if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
+                       return;
+       }
+       ath10k_ce_per_engine_service(ar, pipe);
+}
+
+static void ath10k_pci_hif_post_init(struct ath10k *ar,
+                                    struct ath10k_hif_cb *callbacks)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+
+       memcpy(&ar_pci->msg_callbacks_current, callbacks,
+              sizeof(ar_pci->msg_callbacks_current));
+}
+
+static int ath10k_pci_start_ce(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ce_state *ce_diag = ar_pci->ce_diag;
+       const struct ce_attr *attr;
+       struct hif_ce_pipe_info *pipe_info;
+       struct ath10k_pci_compl *compl;
+       int i, pipe_num, completions, disable_interrupts;
+
+       spin_lock_init(&ar_pci->compl_lock);
+       INIT_LIST_HEAD(&ar_pci->compl_process);
+
+       for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+               pipe_info = &ar_pci->pipe_info[pipe_num];
+
+               spin_lock_init(&pipe_info->pipe_lock);
+               INIT_LIST_HEAD(&pipe_info->compl_free);
+
+               /* Handle Diagnostic CE specially */
+               if (pipe_info->ce_hdl == ce_diag)
+                       continue;
+
+               attr = &host_ce_config_wlan[pipe_num];
+               completions = 0;
+
+               if (attr->src_nentries) {
+                       disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
+                       ath10k_ce_send_cb_register(pipe_info->ce_hdl,
+                                                  ath10k_pci_ce_send_done,
+                                                  disable_interrupts);
+                       completions += attr->src_nentries;
+                       pipe_info->num_sends_allowed = attr->src_nentries - 1;
+               }
+
+               if (attr->dest_nentries) {
+                       ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
+                                                  ath10k_pci_ce_recv_data);
+                       completions += attr->dest_nentries;
+               }
+
+               if (completions == 0)
+                       continue;
+
+               for (i = 0; i < completions; i++) {
+                       compl = kmalloc(sizeof(struct ath10k_pci_compl),
+                                       GFP_KERNEL);
+                       if (!compl) {
+                               ath10k_warn("No memory for completion state\n");
+                               ath10k_pci_stop_ce(ar);
+                               return -ENOMEM;
+                       }
+
+                       compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+                       list_add_tail(&compl->list, &pipe_info->compl_free);
+               }
+       }
+
+       return 0;
+}
+
+static void ath10k_pci_stop_ce(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ath10k_pci_compl *compl;
+       struct sk_buff *skb;
+       int i;
+
+       ath10k_ce_disable_interrupts(ar);
+
+       /* Cancel the pending tasklet */
+       tasklet_kill(&ar_pci->intr_tq);
+
+       for (i = 0; i < CE_COUNT; i++)
+               tasklet_kill(&ar_pci->pipe_info[i].intr);
+
+       /* Mark pending completions as aborted, so that upper layers free up
+        * their associated resources */
+       spin_lock_bh(&ar_pci->compl_lock);
+       list_for_each_entry(compl, &ar_pci->compl_process, list) {
+               skb = (struct sk_buff *)compl->transfer_context;
+               ATH10K_SKB_CB(skb)->is_aborted = true;
+       }
+       spin_unlock_bh(&ar_pci->compl_lock);
+}
+
+static void ath10k_pci_cleanup_ce(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ath10k_pci_compl *compl, *tmp;
+       struct hif_ce_pipe_info *pipe_info;
+       struct sk_buff *netbuf;
+       int pipe_num;
+
+       /* Free pending completions. */
+       spin_lock_bh(&ar_pci->compl_lock);
+       if (!list_empty(&ar_pci->compl_process))
+               ath10k_warn("pending completions still present! possible memory leaks.\n");
+
+       list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
+               list_del(&compl->list);
+               netbuf = (struct sk_buff *)compl->transfer_context;
+               dev_kfree_skb_any(netbuf);
+               kfree(compl);
+       }
+       spin_unlock_bh(&ar_pci->compl_lock);
+
+       /* Free unused completions for each pipe. */
+       for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+               pipe_info = &ar_pci->pipe_info[pipe_num];
+
+               spin_lock_bh(&pipe_info->pipe_lock);
+               list_for_each_entry_safe(compl, tmp,
+                                        &pipe_info->compl_free, list) {
+                       list_del(&compl->list);
+                       kfree(compl);
+               }
+               spin_unlock_bh(&pipe_info->pipe_lock);
+       }
+}
+
+static void ath10k_pci_process_ce(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ar->hif.priv;
+       struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
+       struct ath10k_pci_compl *compl;
+       struct sk_buff *skb;
+       unsigned int nbytes;
+       int ret, send_done = 0;
+
+       /* Upper layers aren't ready to handle tx/rx completions in parallel so
+        * we must serialize all completion processing. */
+
+       spin_lock_bh(&ar_pci->compl_lock);
+       if (ar_pci->compl_processing) {
+               spin_unlock_bh(&ar_pci->compl_lock);
+               return;
+       }
+       ar_pci->compl_processing = true;
+       spin_unlock_bh(&ar_pci->compl_lock);
+
+       for (;;) {
+               spin_lock_bh(&ar_pci->compl_lock);
+               if (list_empty(&ar_pci->compl_process)) {
+                       spin_unlock_bh(&ar_pci->compl_lock);
+                       break;
+               }
+               compl = list_first_entry(&ar_pci->compl_process,
+                                        struct ath10k_pci_compl, list);
+               list_del(&compl->list);
+               spin_unlock_bh(&ar_pci->compl_lock);
+
+               if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
+                       cb->tx_completion(ar,
+                                         compl->transfer_context,
+                                         compl->transfer_id);
+                       send_done = 1;
+               } else {
+                       ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
+                       if (ret) {
+                               ath10k_warn("Unable to post recv buffer for pipe: %d\n",
+                                           compl->pipe_info->pipe_num);
+                               break;
+                       }
+
+                       skb = (struct sk_buff *)compl->transfer_context;
+                       nbytes = compl->nbytes;
+
+                       ath10k_dbg(ATH10K_DBG_PCI,
+                                  "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
+                                  skb, nbytes);
+                       ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
+                                       "ath10k rx: ", skb->data, nbytes);
+
+                       if (skb->len + skb_tailroom(skb) >= nbytes) {
+                               skb_trim(skb, 0);
+                               skb_put(skb, nbytes);
+                               cb->rx_completion(ar, skb,
+                                                 compl->pipe_info->pipe_num);
+                       } else {
+                               ath10k_warn("rxed more than expected (nbytes %d, max %d)",
+                                           nbytes,
+                                           skb->len + skb_tailroom(skb));
+                       }
+               }
+
+               compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+
+               /*
+                * Add completion back to the pipe's free list.
+                */
+               spin_lock_bh(&compl->pipe_info->pipe_lock);
+               list_add_tail(&compl->list, &compl->pipe_info->compl_free);
+               compl->pipe_info->num_sends_allowed += send_done;
+               spin_unlock_bh(&compl->pipe_info->pipe_lock);
+       }
+
+       spin_lock_bh(&ar_pci->compl_lock);
+       ar_pci->compl_processing = false;
+       spin_unlock_bh(&ar_pci->compl_lock);
+}
+
+/* TODO - temporary mapping while we have too few CE's */
+static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
+                                             u16 service_id, u8 *ul_pipe,
+                                             u8 *dl_pipe, int *ul_is_polled,
+                                             int *dl_is_polled)
+{
+       int ret = 0;
+
+       /* polling for received messages not supported */
+       *dl_is_polled = 0;
+
+       switch (service_id) {
+       case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
+               /*
+                * Host->target HTT gets its own pipe, so it can be polled
+                * while other pipes are interrupt driven.
+                */
+               *ul_pipe = 4;
+               /*
+                * Use the same target->host pipe for HTC ctrl, HTC raw
+                * streams, and HTT.
+                */
+               *dl_pipe = 1;
+               break;
+
+       case ATH10K_HTC_SVC_ID_RSVD_CTRL:
+       case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
+               /*
+                * Note: HTC_RAW_STREAMS_SVC is currently unused, and
+                * HTC_CTRL_RSVD_SVC could share the same pipe as the
+                * WMI services.  So, if another CE is needed, change
+                * this to *ul_pipe = 3, which frees up CE 0.
+                */
+               /* *ul_pipe = 3; */
+               *ul_pipe = 0;
+               *dl_pipe = 1;
+               break;
+
+       case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
+       case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
+       case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
+       case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
+
+       case ATH10K_HTC_SVC_ID_WMI_CONTROL:
+               *ul_pipe = 3;
+               *dl_pipe = 2;
+               break;
+
+               /* pipe 5 unused   */
+               /* pipe 6 reserved */
+               /* pipe 7 reserved */
+
+       default:
+               ret = -1;
+               break;
+       }
+       *ul_is_polled =
+               (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
+
+       return ret;
+}
+
+static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
+                                               u8 *ul_pipe, u8 *dl_pipe)
+{
+       int ul_is_polled, dl_is_polled;
+
+       (void)ath10k_pci_hif_map_service_to_pipe(ar,
+                                                ATH10K_HTC_SVC_ID_RSVD_CTRL,
+                                                ul_pipe,
+                                                dl_pipe,
+                                                &ul_is_polled,
+                                                &dl_is_polled);
+}
+
+static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+                                  int num)
+{
+       struct ath10k *ar = pipe_info->hif_ce_state;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ce_state *ce_state = pipe_info->ce_hdl;
+       struct sk_buff *skb;
+       dma_addr_t ce_data;
+       int i, ret = 0;
+
+       if (pipe_info->buf_sz == 0)
+               return 0;
+
+       for (i = 0; i < num; i++) {
+               skb = dev_alloc_skb(pipe_info->buf_sz);
+               if (!skb) {
+                       ath10k_warn("could not allocate skbuff for pipe %d\n",
+                                   num);
+                       ret = -ENOMEM;
+                       goto err;
+               }
+
+               WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+               ce_data = dma_map_single(ar->dev, skb->data,
+                                        skb->len + skb_tailroom(skb),
+                                        DMA_FROM_DEVICE);
+
+               if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
+                       ath10k_warn("could not dma map skbuff\n");
+                       dev_kfree_skb_any(skb);
+                       ret = -EIO;
+                       goto err;
+               }
+
+               ATH10K_SKB_CB(skb)->paddr = ce_data;
+
+               pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
+                                              pipe_info->buf_sz,
+                                              PCI_DMA_FROMDEVICE);
+
+               ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
+                                                ce_data);
+               if (ret) {
+                       ath10k_warn("could not enqueue to pipe %d (%d)\n",
+                                   num, ret);
+                       goto err;
+               }
+       }
+
+       return ret;
+
+err:
+       ath10k_pci_rx_pipe_cleanup(pipe_info);
+       return ret;
+}
+
+static int ath10k_pci_post_rx(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct hif_ce_pipe_info *pipe_info;
+       const struct ce_attr *attr;
+       int pipe_num, ret = 0;
+
+       for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+               pipe_info = &ar_pci->pipe_info[pipe_num];
+               attr = &host_ce_config_wlan[pipe_num];
+
+               if (attr->dest_nentries == 0)
+                       continue;
+
+               ret = ath10k_pci_post_rx_pipe(pipe_info,
+                                             attr->dest_nentries - 1);
+               if (ret) {
+                       ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
+                                   pipe_num);
+
+                       for (; pipe_num >= 0; pipe_num--) {
+                               pipe_info = &ar_pci->pipe_info[pipe_num];
+                               ath10k_pci_rx_pipe_cleanup(pipe_info);
+                       }
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int ath10k_pci_hif_start(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+
+       ret = ath10k_pci_start_ce(ar);
+       if (ret) {
+               ath10k_warn("could not start CE (%d)\n", ret);
+               return ret;
+       }
+
+       /* Post buffers once to start things off. */
+       ret = ath10k_pci_post_rx(ar);
+       if (ret) {
+               ath10k_warn("could not post rx pipes (%d)\n", ret);
+               return ret;
+       }
+
+       ar_pci->started = 1;
+       return 0;
+}
+
+static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+{
+       struct ath10k *ar;
+       struct ath10k_pci *ar_pci;
+       struct ce_state *ce_hdl;
+       u32 buf_sz;
+       struct sk_buff *netbuf;
+       u32 ce_data;
+
+       buf_sz = pipe_info->buf_sz;
+
+       /* Unused Copy Engine */
+       if (buf_sz == 0)
+               return;
+
+       ar = pipe_info->hif_ce_state;
+       ar_pci = ath10k_pci_priv(ar);
+
+       if (!ar_pci->started)
+               return;
+
+       ce_hdl = pipe_info->ce_hdl;
+
+       while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
+                                         &ce_data) == 0) {
+               dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
+                                netbuf->len + skb_tailroom(netbuf),
+                                DMA_FROM_DEVICE);
+               dev_kfree_skb_any(netbuf);
+       }
+}
+
+static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+{
+       struct ath10k *ar;
+       struct ath10k_pci *ar_pci;
+       struct ce_state *ce_hdl;
+       struct sk_buff *netbuf;
+       u32 ce_data;
+       unsigned int nbytes;
+       unsigned int id;
+       u32 buf_sz;
+
+       buf_sz = pipe_info->buf_sz;
+
+       /* Unused Copy Engine */
+       if (buf_sz == 0)
+               return;
+
+       ar = pipe_info->hif_ce_state;
+       ar_pci = ath10k_pci_priv(ar);
+
+       if (!ar_pci->started)
+               return;
+
+       ce_hdl = pipe_info->ce_hdl;
+
+       while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
+                                         &ce_data, &nbytes, &id) == 0) {
+               if (netbuf != CE_SENDLIST_ITEM_CTXT)
+                       /*
+                        * Indicate the completion to higer layer to free
+                        * the buffer
+                        */
+                       ATH10K_SKB_CB(netbuf)->is_aborted = true;
+                       ar_pci->msg_callbacks_current.tx_completion(ar,
+                                                                   netbuf,
+                                                                   id);
+       }
+}
+
+/*
+ * Cleanup residual buffers for device shutdown:
+ *    buffers that were enqueued for receive
+ *    buffers that were to be sent
+ * Note: Buffers that had completed but which were
+ * not yet processed are on a completion queue. They
+ * are handled when the completion thread shuts down.
+ */
+static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int pipe_num;
+
+       for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+               struct hif_ce_pipe_info *pipe_info;
+
+               pipe_info = &ar_pci->pipe_info[pipe_num];
+               ath10k_pci_rx_pipe_cleanup(pipe_info);
+               ath10k_pci_tx_pipe_cleanup(pipe_info);
+       }
+}
+
+static void ath10k_pci_ce_deinit(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct hif_ce_pipe_info *pipe_info;
+       int pipe_num;
+
+       for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+               pipe_info = &ar_pci->pipe_info[pipe_num];
+               if (pipe_info->ce_hdl) {
+                       ath10k_ce_deinit(pipe_info->ce_hdl);
+                       pipe_info->ce_hdl = NULL;
+                       pipe_info->buf_sz = 0;
+               }
+       }
+}
+
+static void ath10k_pci_hif_stop(struct ath10k *ar)
+{
+       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+
+       ath10k_pci_stop_ce(ar);
+
+       /* At this point, asynchronous threads are stopped, the target should
+        * not DMA nor interrupt. We process the leftovers and then free
+        * everything else up. */
+
+       ath10k_pci_process_ce(ar);
+       ath10k_pci_cleanup_ce(ar);
+       ath10k_pci_buffer_cleanup(ar);
+       ath10k_pci_ce_deinit(ar);
+}
+
+static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
+                                          void *req, u32 req_len,
+                                          void *resp, u32 *resp_len)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
+       struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
+       dma_addr_t req_paddr = 0;
+       dma_addr_t resp_paddr = 0;
+       struct bmi_xfer xfer = {};
+       void *treq, *tresp = NULL;
+       int ret = 0;
+
+       if (resp && !resp_len)
+               return -EINVAL;
+
+       if (resp && resp_len && *resp_len == 0)
+               return -EINVAL;
+
+       treq = kmemdup(req, req_len, GFP_KERNEL);
+       if (!treq)
+               return -ENOMEM;
+
+       req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
+       ret = dma_mapping_error(ar->dev, req_paddr);
+       if (ret)
+               goto err_dma;
+
+       if (resp && resp_len) {
+               tresp = kzalloc(*resp_len, GFP_KERNEL);
+               if (!tresp) {
+                       ret = -ENOMEM;
+                       goto err_req;
+               }
+
+               resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
+                                           DMA_FROM_DEVICE);
+               ret = dma_mapping_error(ar->dev, resp_paddr);
+               if (ret)
+                       goto err_req;
+
+               xfer.wait_for_resp = true;
+               xfer.resp_len = 0;
+
+               ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
+       }
+
+       init_completion(&xfer.done);
+
+       ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
+       if (ret)
+               goto err_resp;
+
+       ret = wait_for_completion_timeout(&xfer.done,
+                                         BMI_COMMUNICATION_TIMEOUT_HZ);
+       if (ret <= 0) {
+               u32 unused_buffer;
+               unsigned int unused_nbytes;
+               unsigned int unused_id;
+
+               ret = -ETIMEDOUT;
+               ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
+                                          &unused_nbytes, &unused_id);
+       } else {
+               /* non-zero means we did not time out */
+               ret = 0;
+       }
+
+err_resp:
+       if (resp) {
+               u32 unused_buffer;
+
+               ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
+               dma_unmap_single(ar->dev, resp_paddr,
+                                *resp_len, DMA_FROM_DEVICE);
+       }
+err_req:
+       dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
+
+       if (ret == 0 && resp_len) {
+               *resp_len = min(*resp_len, xfer.resp_len);
+               memcpy(resp, tresp, xfer.resp_len);
+       }
+err_dma:
+       kfree(treq);
+       kfree(tresp);
+
+       return ret;
+}
+
+static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
+                                    void *transfer_context,
+                                    u32 data,
+                                    unsigned int nbytes,
+                                    unsigned int transfer_id)
+{
+       struct bmi_xfer *xfer = transfer_context;
+
+       if (xfer->wait_for_resp)
+               return;
+
+       complete(&xfer->done);
+}
+
+static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
+                                    void *transfer_context,
+                                    u32 data,
+                                    unsigned int nbytes,
+                                    unsigned int transfer_id,
+                                    unsigned int flags)
+{
+       struct bmi_xfer *xfer = transfer_context;
+
+       if (!xfer->wait_for_resp) {
+               ath10k_warn("unexpected: BMI data received; ignoring\n");
+               return;
+       }
+
+       xfer->resp_len = nbytes;
+       complete(&xfer->done);
+}
+
+/*
+ * Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
+       {
+                ATH10K_HTC_SVC_ID_WMI_DATA_VO,
+                PIPEDIR_OUT,           /* out = UL = host -> target */
+                3,
+       },
+       {
+                ATH10K_HTC_SVC_ID_WMI_DATA_VO,
+                PIPEDIR_IN,            /* in = DL = target -> host */
+                2,
+       },
+       {
+                ATH10K_HTC_SVC_ID_WMI_DATA_BK,
+                PIPEDIR_OUT,           /* out = UL = host -> target */
+                3,
+       },
+       {
+                ATH10K_HTC_SVC_ID_WMI_DATA_BK,
+                PIPEDIR_IN,            /* in = DL = target -> host */
+                2,
+       },
+       {
+                ATH10K_HTC_SVC_ID_WMI_DATA_BE,
+                PIPEDIR_OUT,           /* out = UL = host -> target */
+                3,
+       },
+       {
+                ATH10K_HTC_SVC_ID_WMI_DATA_BE,
+                PIPEDIR_IN,            /* in = DL = target -> host */
+                2,
+       },
+       {
+                ATH10K_HTC_SVC_ID_WMI_DATA_VI,
+                PIPEDIR_OUT,           /* out = UL = host -> target */
+                3,
+       },
+       {
+                ATH10K_HTC_SVC_ID_WMI_DATA_VI,
+                PIPEDIR_IN,            /* in = DL = target -> host */
+                2,
+       },
+       {
+                ATH10K_HTC_SVC_ID_WMI_CONTROL,
+                PIPEDIR_OUT,           /* out = UL = host -> target */
+                3,
+       },
+       {
+                ATH10K_HTC_SVC_ID_WMI_CONTROL,
+                PIPEDIR_IN,            /* in = DL = target -> host */
+                2,
+       },
+       {
+                ATH10K_HTC_SVC_ID_RSVD_CTRL,
+                PIPEDIR_OUT,           /* out = UL = host -> target */
+                0,             /* could be moved to 3 (share with WMI) */
+       },
+       {
+                ATH10K_HTC_SVC_ID_RSVD_CTRL,
+                PIPEDIR_IN,            /* in = DL = target -> host */
+                1,
+       },
+       {
+                ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
+                PIPEDIR_OUT,           /* out = UL = host -> target */
+                0,
+       },
+       {
+                ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
+                PIPEDIR_IN,            /* in = DL = target -> host */
+                1,
+       },
+       {
+                ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
+                PIPEDIR_OUT,           /* out = UL = host -> target */
+                4,
+       },
+       {
+                ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
+                PIPEDIR_IN,            /* in = DL = target -> host */
+                1,
+       },
+
+       /* (Additions here) */
+
+       {                               /* Must be last */
+                0,
+                0,
+                0,
+       },
+};
+
+/*
+ * Send an interrupt to the device to wake up the Target CPU
+ * so it has an opportunity to notice any changed state.
+ */
+static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
+{
+       int ret;
+       u32 core_ctrl;
+
+       ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
+                                             CORE_CTRL_ADDRESS,
+                                         &core_ctrl);
+       if (ret) {
+               ath10k_warn("Unable to read core ctrl\n");
+               return ret;
+       }
+
+       /* A_INUM_FIRMWARE interrupt to Target CPU */
+       core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
+
+       ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
+                                              CORE_CTRL_ADDRESS,
+                                          core_ctrl);
+       if (ret)
+               ath10k_warn("Unable to set interrupt mask\n");
+
+       return ret;
+}
+
+static int ath10k_pci_init_config(struct ath10k *ar)
+{
+       u32 interconnect_targ_addr;
+       u32 pcie_state_targ_addr = 0;
+       u32 pipe_cfg_targ_addr = 0;
+       u32 svc_to_pipe_map = 0;
+       u32 pcie_config_flags = 0;
+       u32 ealloc_value;
+       u32 ealloc_targ_addr;
+       u32 flag2_value;
+       u32 flag2_targ_addr;
+       int ret = 0;
+
+       /* Download to Target the CE Config and the service-to-CE map */
+       interconnect_targ_addr =
+               host_interest_item_address(HI_ITEM(hi_interconnect_state));
+
+       /* Supply Target-side CE configuration */
+       ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
+                                         &pcie_state_targ_addr);
+       if (ret != 0) {
+               ath10k_err("Failed to get pcie state addr: %d\n", ret);
+               return ret;
+       }
+
+       if (pcie_state_targ_addr == 0) {
+               ret = -EIO;
+               ath10k_err("Invalid pcie state addr\n");
+               return ret;
+       }
+
+       ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
+                                         offsetof(struct pcie_state,
+                                                  pipe_cfg_addr),
+                                         &pipe_cfg_targ_addr);
+       if (ret != 0) {
+               ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
+               return ret;
+       }
+
+       if (pipe_cfg_targ_addr == 0) {
+               ret = -EIO;
+               ath10k_err("Invalid pipe cfg addr\n");
+               return ret;
+       }
+
+       ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
+                                target_ce_config_wlan,
+                                sizeof(target_ce_config_wlan));
+
+       if (ret != 0) {
+               ath10k_err("Failed to write pipe cfg: %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
+                                         offsetof(struct pcie_state,
+                                                  svc_to_pipe_map),
+                                         &svc_to_pipe_map);
+       if (ret != 0) {
+               ath10k_err("Failed to get svc/pipe map: %d\n", ret);
+               return ret;
+       }
+
+       if (svc_to_pipe_map == 0) {
+               ret = -EIO;
+               ath10k_err("Invalid svc_to_pipe map\n");
+               return ret;
+       }
+
+       ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
+                                target_service_to_ce_map_wlan,
+                                sizeof(target_service_to_ce_map_wlan));
+       if (ret != 0) {
+               ath10k_err("Failed to write svc/pipe map: %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
+                                         offsetof(struct pcie_state,
+                                                  config_flags),
+                                         &pcie_config_flags);
+       if (ret != 0) {
+               ath10k_err("Failed to get pcie config_flags: %d\n", ret);
+               return ret;
+       }
+
+       pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
+
+       ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
+                                offsetof(struct pcie_state, config_flags),
+                                &pcie_config_flags,
+                                sizeof(pcie_config_flags));
+       if (ret != 0) {
+               ath10k_err("Failed to write pcie config_flags: %d\n", ret);
+               return ret;
+       }
+
+       /* configure early allocation */
+       ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
+
+       ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
+       if (ret != 0) {
+               ath10k_err("Faile to get early alloc val: %d\n", ret);
+               return ret;
+       }
+
+       /* first bank is switched to IRAM */
+       ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
+                        HI_EARLY_ALLOC_MAGIC_MASK);
+       ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
+                        HI_EARLY_ALLOC_IRAM_BANKS_MASK);
+
+       ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
+       if (ret != 0) {
+               ath10k_err("Failed to set early alloc val: %d\n", ret);
+               return ret;
+       }
+
+       /* Tell Target to proceed with initialization */
+       flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
+
+       ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
+       if (ret != 0) {
+               ath10k_err("Failed to get option val: %d\n", ret);
+               return ret;
+       }
+
+       flag2_value |= HI_OPTION_EARLY_CFG_DONE;
+
+       ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
+       if (ret != 0) {
+               ath10k_err("Failed to set option val: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+
+
+static int ath10k_pci_ce_init(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct hif_ce_pipe_info *pipe_info;
+       const struct ce_attr *attr;
+       int pipe_num;
+
+       for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+               pipe_info = &ar_pci->pipe_info[pipe_num];
+               pipe_info->pipe_num = pipe_num;
+               pipe_info->hif_ce_state = ar;
+               attr = &host_ce_config_wlan[pipe_num];
+
+               pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
+               if (pipe_info->ce_hdl == NULL) {
+                       ath10k_err("Unable to initialize CE for pipe: %d\n",
+                                  pipe_num);
+
+                       /* It is safe to call it here. It checks if ce_hdl is
+                        * valid for each pipe */
+                       ath10k_pci_ce_deinit(ar);
+                       return -1;
+               }
+
+               if (pipe_num == ar_pci->ce_count - 1) {
+                       /*
+                        * Reserve the ultimate CE for
+                        * diagnostic Window support
+                        */
+                       ar_pci->ce_diag =
+                       ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
+                       continue;
+               }
+
+               pipe_info->buf_sz = (size_t) (attr->src_sz_max);
+       }
+
+       /*
+        * Initially, establish CE completion handlers for use with BMI.
+        * These are overwritten with generic handlers after we exit BMI phase.
+        */
+       pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
+       ath10k_ce_send_cb_register(pipe_info->ce_hdl,
+                                  ath10k_pci_bmi_send_done, 0);
+
+       pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
+       ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
+                                  ath10k_pci_bmi_recv_data);
+
+       return 0;
+}
+
+static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       u32 fw_indicator_address, fw_indicator;
+
+       ath10k_pci_wake(ar);
+
+       fw_indicator_address = ar_pci->fw_indicator_address;
+       fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
+
+       if (fw_indicator & FW_IND_EVENT_PENDING) {
+               /* ACK: clear Target-side pending event */
+               ath10k_pci_write32(ar, fw_indicator_address,
+                                  fw_indicator & ~FW_IND_EVENT_PENDING);
+
+               if (ar_pci->started) {
+                       ath10k_pci_hif_dump_area(ar);
+               } else {
+                       /*
+                        * Probable Target failure before we're prepared
+                        * to handle it.  Generally unexpected.
+                        */
+                       ath10k_warn("early firmware event indicated\n");
+               }
+       }
+
+       ath10k_pci_sleep(ar);
+}
+
+static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
+       .send_head              = ath10k_pci_hif_send_head,
+       .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
+       .start                  = ath10k_pci_hif_start,
+       .stop                   = ath10k_pci_hif_stop,
+       .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
+       .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
+       .send_complete_check    = ath10k_pci_hif_send_complete_check,
+       .init                   = ath10k_pci_hif_post_init,
+       .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
+};
+
+static void ath10k_pci_ce_tasklet(unsigned long ptr)
+{
+       struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
+       struct ath10k_pci *ar_pci = pipe->ar_pci;
+
+       ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
+}
+
+static void ath10k_msi_err_tasklet(unsigned long data)
+{
+       struct ath10k *ar = (struct ath10k *)data;
+
+       ath10k_pci_fw_interrupt_handler(ar);
+}
+
+/*
+ * Handler for a per-engine interrupt on a PARTICULAR CE.
+ * This is used in cases where each CE has a private MSI interrupt.
+ */
+static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
+{
+       struct ath10k *ar = arg;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
+
+       if (ce_id < 0 || ce_id > ARRAY_SIZE(ar_pci->pipe_info)) {
+               ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
+               return IRQ_HANDLED;
+       }
+
+       /*
+        * NOTE: We are able to derive ce_id from irq because we
+        * use a one-to-one mapping for CE's 0..5.
+        * CE's 6 & 7 do not use interrupts at all.
+        *
+        * This mapping must be kept in sync with the mapping
+        * used by firmware.
+        */
+       tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
+{
+       struct ath10k *ar = arg;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       tasklet_schedule(&ar_pci->msi_fw_err);
+       return IRQ_HANDLED;
+}
+
+/*
+ * Top-level interrupt handler for all PCI interrupts from a Target.
+ * When a block of MSI interrupts is allocated, this top-level handler
+ * is not used; instead, we directly call the correct sub-handler.
+ */
+static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
+{
+       struct ath10k *ar = arg;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       if (ar_pci->num_msi_intrs == 0) {
+               /*
+                * IMPORTANT: INTR_CLR regiser has to be set after
+                * INTR_ENABLE is set to 0, otherwise interrupt can not be
+                * really cleared.
+                */
+               iowrite32(0, ar_pci->mem +
+                         (SOC_CORE_BASE_ADDRESS |
+                          PCIE_INTR_ENABLE_ADDRESS));
+               iowrite32(PCIE_INTR_FIRMWARE_MASK |
+                         PCIE_INTR_CE_MASK_ALL,
+                         ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
+                                        PCIE_INTR_CLR_ADDRESS));
+               /*
+                * IMPORTANT: this extra read transaction is required to
+                * flush the posted write buffer.
+                */
+               (void) ioread32(ar_pci->mem +
+                               (SOC_CORE_BASE_ADDRESS |
+                                PCIE_INTR_ENABLE_ADDRESS));
+       }
+
+       tasklet_schedule(&ar_pci->intr_tq);
+
+       return IRQ_HANDLED;
+}
+
+static void ath10k_pci_tasklet(unsigned long data)
+{
+       struct ath10k *ar = (struct ath10k *)data;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
+       ath10k_ce_per_engine_service_any(ar);
+
+       if (ar_pci->num_msi_intrs == 0) {
+               /* Enable Legacy PCI line interrupts */
+               iowrite32(PCIE_INTR_FIRMWARE_MASK |
+                         PCIE_INTR_CE_MASK_ALL,
+                         ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
+                                        PCIE_INTR_ENABLE_ADDRESS));
+               /*
+                * IMPORTANT: this extra read transaction is required to
+                * flush the posted write buffer
+                */
+               (void) ioread32(ar_pci->mem +
+                               (SOC_CORE_BASE_ADDRESS |
+                                PCIE_INTR_ENABLE_ADDRESS));
+       }
+}
+
+static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+       int i;
+
+       ret = pci_enable_msi_block(ar_pci->pdev, num);
+       if (ret)
+               return ret;
+
+       ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
+                         ath10k_pci_msi_fw_handler,
+                         IRQF_SHARED, "ath10k_pci", ar);
+       if (ret)
+               return ret;
+
+       for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
+               ret = request_irq(ar_pci->pdev->irq + i,
+                                 ath10k_pci_per_engine_handler,
+                                 IRQF_SHARED, "ath10k_pci", ar);
+               if (ret) {
+                       ath10k_warn("request_irq(%d) failed %d\n",
+                                   ar_pci->pdev->irq + i, ret);
+
+                       for (; i >= MSI_ASSIGN_CE_INITIAL; i--)
+                               free_irq(ar_pci->pdev->irq, ar);
+
+                       pci_disable_msi(ar_pci->pdev);
+                       return ret;
+               }
+       }
+
+       ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
+       return 0;
+}
+
+static int ath10k_pci_start_intr_msi(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+
+       ret = pci_enable_msi(ar_pci->pdev);
+       if (ret < 0)
+               return ret;
+
+       ret = request_irq(ar_pci->pdev->irq,
+                         ath10k_pci_interrupt_handler,
+                         IRQF_SHARED, "ath10k_pci", ar);
+       if (ret < 0) {
+               pci_disable_msi(ar_pci->pdev);
+               return ret;
+       }
+
+       ath10k_info("MSI interrupt handling\n");
+       return 0;
+}
+
+static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+
+       ret = request_irq(ar_pci->pdev->irq,
+                         ath10k_pci_interrupt_handler,
+                         IRQF_SHARED, "ath10k_pci", ar);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Make sure to wake the Target before enabling Legacy
+        * Interrupt.
+        */
+       iowrite32(PCIE_SOC_WAKE_V_MASK,
+                 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+                 PCIE_SOC_WAKE_ADDRESS);
+
+       ath10k_pci_wait(ar);
+
+       /*
+        * A potential race occurs here: The CORE_BASE write
+        * depends on target correctly decoding AXI address but
+        * host won't know when target writes BAR to CORE_CTRL.
+        * This write might get lost if target has NOT written BAR.
+        * For now, fix the race by repeating the write in below
+        * synchronization checking.
+        */
+       iowrite32(PCIE_INTR_FIRMWARE_MASK |
+                 PCIE_INTR_CE_MASK_ALL,
+                 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
+                                PCIE_INTR_ENABLE_ADDRESS));
+       iowrite32(PCIE_SOC_WAKE_RESET,
+                 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+                 PCIE_SOC_WAKE_ADDRESS);
+
+       ath10k_info("legacy interrupt handling\n");
+       return 0;
+}
+
+static int ath10k_pci_start_intr(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int num = MSI_NUM_REQUEST;
+       int ret;
+       int i;
+
+       tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
+       tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
+                    (unsigned long) ar);
+
+       for (i = 0; i < CE_COUNT; i++) {
+               ar_pci->pipe_info[i].ar_pci = ar_pci;
+               tasklet_init(&ar_pci->pipe_info[i].intr,
+                            ath10k_pci_ce_tasklet,
+                            (unsigned long)&ar_pci->pipe_info[i]);
+       }
+
+       if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
+               num = 1;
+
+       if (num > 1) {
+               ret = ath10k_pci_start_intr_msix(ar, num);
+               if (ret == 0)
+                       goto exit;
+
+               ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
+               num = 1;
+       }
+
+       if (num == 1) {
+               ret = ath10k_pci_start_intr_msi(ar);
+               if (ret == 0)
+                       goto exit;
+
+               ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
+                           ret);
+               num = 0;
+       }
+
+       ret = ath10k_pci_start_intr_legacy(ar);
+
+exit:
+       ar_pci->num_msi_intrs = num;
+       ar_pci->ce_count = CE_COUNT;
+       return ret;
+}
+
+static void ath10k_pci_stop_intr(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int i;
+
+       /* There's at least one interrupt irregardless whether its legacy INTR
+        * or MSI or MSI-X */
+       for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
+               free_irq(ar_pci->pdev->irq + i, ar);
+
+       if (ar_pci->num_msi_intrs > 0)
+               pci_disable_msi(ar_pci->pdev);
+}
+
+static int ath10k_pci_reset_target(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int wait_limit = 300; /* 3 sec */
+
+       /* Wait for Target to finish initialization before we proceed. */
+       iowrite32(PCIE_SOC_WAKE_V_MASK,
+                 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+                 PCIE_SOC_WAKE_ADDRESS);
+
+       ath10k_pci_wait(ar);
+
+       while (wait_limit-- &&
+              !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
+                FW_IND_INITIALIZED)) {
+               if (ar_pci->num_msi_intrs == 0)
+                       /* Fix potential race by repeating CORE_BASE writes */
+                       iowrite32(PCIE_INTR_FIRMWARE_MASK |
+                                 PCIE_INTR_CE_MASK_ALL,
+                                 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
+                                                PCIE_INTR_ENABLE_ADDRESS));
+               mdelay(10);
+       }
+
+       if (wait_limit < 0) {
+               ath10k_err("Target stalled\n");
+               iowrite32(PCIE_SOC_WAKE_RESET,
+                         ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+                         PCIE_SOC_WAKE_ADDRESS);
+               return -EIO;
+       }
+
+       iowrite32(PCIE_SOC_WAKE_RESET,
+                 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+                 PCIE_SOC_WAKE_ADDRESS);
+
+       return 0;
+}
+
+static void ath10k_pci_device_reset(struct ath10k_pci *ar_pci)
+{
+       struct ath10k *ar = ar_pci->ar;
+       void __iomem *mem = ar_pci->mem;
+       int i;
+       u32 val;
+
+       if (!SOC_GLOBAL_RESET_ADDRESS)
+               return;
+
+       if (!mem)
+               return;
+
+       ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
+                              PCIE_SOC_WAKE_V_MASK);
+       for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
+               if (ath10k_pci_target_is_awake(ar))
+                       break;
+               msleep(1);
+       }
+
+       /* Put Target, including PCIe, into RESET. */
+       val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
+       val |= 1;
+       ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+
+       for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
+               if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+                                         RTC_STATE_COLD_RESET_MASK)
+                       break;
+               msleep(1);
+       }
+
+       /* Pull Target, including PCIe, out of RESET. */
+       val &= ~1;
+       ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+
+       for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
+               if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+                                           RTC_STATE_COLD_RESET_MASK))
+                       break;
+               msleep(1);
+       }
+
+       ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
+}
+
+static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
+{
+       int i;
+
+       for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
+               if (!test_bit(i, ar_pci->features))
+                       continue;
+
+               switch (i) {
+               case ATH10K_PCI_FEATURE_MSI_X:
+                       ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
+                       break;
+               case ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND:
+                       ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
+                       break;
+               }
+       }
+}
+
+static int ath10k_pci_probe(struct pci_dev *pdev,
+                           const struct pci_device_id *pci_dev)
+{
+       void __iomem *mem;
+       int ret = 0;
+       struct ath10k *ar;
+       struct ath10k_pci *ar_pci;
+       u32 lcr_val;
+
+       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+
+       ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
+       if (ar_pci == NULL)
+               return -ENOMEM;
+
+       ar_pci->pdev = pdev;
+       ar_pci->dev = &pdev->dev;
+
+       switch (pci_dev->device) {
+       case QCA988X_1_0_DEVICE_ID:
+               set_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features);
+               break;
+       case QCA988X_2_0_DEVICE_ID:
+               set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
+               break;
+       default:
+               ret = -ENODEV;
+               ath10k_err("Unkown device ID: %d\n", pci_dev->device);
+               goto err_ar_pci;
+       }
+
+       ath10k_pci_dump_features(ar_pci);
+
+       ar = ath10k_core_create(ar_pci, ar_pci->dev, ATH10K_BUS_PCI,
+                               &ath10k_pci_hif_ops);
+       if (!ar) {
+               ath10k_err("ath10k_core_create failed!\n");
+               ret = -EINVAL;
+               goto err_ar_pci;
+       }
+
+       /* Enable QCA988X_1.0 HW workarounds */
+       if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features))
+               spin_lock_init(&ar_pci->hw_v1_workaround_lock);
+
+       ar_pci->ar = ar;
+       ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
+       atomic_set(&ar_pci->keep_awake_count, 0);
+
+       pci_set_drvdata(pdev, ar);
+
+       /*
+        * Without any knowledge of the Host, the Target may have been reset or
+        * power cycled and its Config Space may no longer reflect the PCI
+        * address space that was assigned earlier by the PCI infrastructure.
+        * Refresh it now.
+        */
+       ret = pci_assign_resource(pdev, BAR_NUM);
+       if (ret) {
+               ath10k_err("cannot assign PCI space: %d\n", ret);
+               goto err_ar;
+       }
+
+       ret = pci_enable_device(pdev);
+       if (ret) {
+               ath10k_err("cannot enable PCI device: %d\n", ret);
+               goto err_ar;
+       }
+
+       /* Request MMIO resources */
+       ret = pci_request_region(pdev, BAR_NUM, "ath");
+       if (ret) {
+               ath10k_err("PCI MMIO reservation error: %d\n", ret);
+               goto err_device;
+       }
+
+       /*
+        * Target structures have a limit of 32 bit DMA pointers.
+        * DMA pointers can be wider than 32 bits by default on some systems.
+        */
+       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (ret) {
+               ath10k_err("32-bit DMA not available: %d\n", ret);
+               goto err_region;
+       }
+
+       ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (ret) {
+               ath10k_err("cannot enable 32-bit consistent DMA\n");
+               goto err_region;
+       }
+
+       /* Set bus master bit in PCI_COMMAND to enable DMA */
+       pci_set_master(pdev);
+
+       /*
+        * Temporary FIX: disable ASPM
+        * Will be removed after the OTP is programmed
+        */
+       pci_read_config_dword(pdev, 0x80, &lcr_val);
+       pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
+
+       /* Arrange for access to Target SoC registers. */
+       mem = pci_iomap(pdev, BAR_NUM, 0);
+       if (!mem) {
+               ath10k_err("PCI iomap error\n");
+               ret = -EIO;
+               goto err_master;
+       }
+
+       ar_pci->mem = mem;
+
+       spin_lock_init(&ar_pci->ce_lock);
+
+       ar_pci->cacheline_sz = dma_get_cache_alignment();
+
+       ret = ath10k_pci_start_intr(ar);
+       if (ret) {
+               ath10k_err("could not start interrupt handling (%d)\n", ret);
+               goto err_iomap;
+       }
+
+       /*
+        * Bring the target up cleanly.
+        *
+        * The target may be in an undefined state with an AUX-powered Target
+        * and a Host in WoW mode. If the Host crashes, loses power, or is
+        * restarted (without unloading the driver) then the Target is left
+        * (aux) powered and running. On a subsequent driver load, the Target
+        * is in an unexpected state. We try to catch that here in order to
+        * reset the Target and retry the probe.
+        */
+       ath10k_pci_device_reset(ar_pci);
+
+       ret = ath10k_pci_reset_target(ar);
+       if (ret)
+               goto err_intr;
+
+       if (ath10k_target_ps) {
+               ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n");
+       } else {
+               /* Force AWAKE forever */
+               ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n");
+               ath10k_do_pci_wake(ar);
+       }
+
+       ret = ath10k_pci_ce_init(ar);
+       if (ret)
+               goto err_intr;
+
+       ret = ath10k_pci_init_config(ar);
+       if (ret)
+               goto err_ce;
+
+       ret = ath10k_pci_wake_target_cpu(ar);
+       if (ret) {
+               ath10k_err("could not wake up target CPU (%d)\n", ret);
+               goto err_ce;
+       }
+
+       ret = ath10k_core_register(ar);
+       if (ret) {
+               ath10k_err("could not register driver core (%d)\n", ret);
+               goto err_ce;
+       }
+
+       return 0;
+
+err_ce:
+       ath10k_pci_ce_deinit(ar);
+err_intr:
+       ath10k_pci_stop_intr(ar);
+err_iomap:
+       pci_iounmap(pdev, mem);
+err_master:
+       pci_clear_master(pdev);
+err_region:
+       pci_release_region(pdev, BAR_NUM);
+err_device:
+       pci_disable_device(pdev);
+err_ar:
+       pci_set_drvdata(pdev, NULL);
+       ath10k_core_destroy(ar);
+err_ar_pci:
+       /* call HIF PCI free here */
+       kfree(ar_pci);
+
+       return ret;
+}
+
+static void ath10k_pci_remove(struct pci_dev *pdev)
+{
+       struct ath10k *ar = pci_get_drvdata(pdev);
+       struct ath10k_pci *ar_pci;
+
+       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+
+       if (!ar)
+               return;
+
+       ar_pci = ath10k_pci_priv(ar);
+
+       if (!ar_pci)
+               return;
+
+       tasklet_kill(&ar_pci->msi_fw_err);
+
+       ath10k_core_unregister(ar);
+       ath10k_pci_stop_intr(ar);
+
+       pci_set_drvdata(pdev, NULL);
+       pci_iounmap(pdev, ar_pci->mem);
+       pci_release_region(pdev, BAR_NUM);
+       pci_clear_master(pdev);
+       pci_disable_device(pdev);
+
+       ath10k_core_destroy(ar);
+       kfree(ar_pci);
+}
+
+#if defined(CONFIG_PM_SLEEP)
+
+#define ATH10K_PCI_PM_CONTROL 0x44
+
+static int ath10k_pci_suspend(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct ath10k *ar = pci_get_drvdata(pdev);
+       struct ath10k_pci *ar_pci;
+       u32 val;
+       int ret, retval;
+
+       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+
+       if (!ar)
+               return -ENODEV;
+
+       ar_pci = ath10k_pci_priv(ar);
+       if (!ar_pci)
+               return -ENODEV;
+
+       if (ath10k_core_target_suspend(ar))
+               return -EBUSY;
+
+       ret = wait_event_interruptible_timeout(ar->event_queue,
+                                               ar->is_target_paused == true,
+                                               1 * HZ);
+       if (ret < 0) {
+               ath10k_warn("suspend interrupted (%d)\n", ret);
+               retval = ret;
+               goto resume;
+       } else if (ret == 0) {
+               ath10k_warn("suspend timed out - target pause event never came\n");
+               retval = EIO;
+               goto resume;
+       }
+
+       /*
+        * reset is_target_paused and host can check that in next time,
+        * or it will always be TRUE and host just skip the waiting
+        * condition, it causes target assert due to host already
+        * suspend
+        */
+       ar->is_target_paused = false;
+
+       pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
+
+       if ((val & 0x000000ff) != 0x3) {
+               pci_save_state(pdev);
+               pci_disable_device(pdev);
+               pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
+                                      (val & 0xffffff00) | 0x03);
+       }
+
+       return 0;
+resume:
+       ret = ath10k_core_target_resume(ar);
+       if (ret)
+               ath10k_warn("could not resume (%d)\n", ret);
+
+       return retval;
+}
+
+static int ath10k_pci_resume(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct ath10k *ar = pci_get_drvdata(pdev);
+       struct ath10k_pci *ar_pci;
+       int ret;
+       u32 val;
+
+       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+
+       if (!ar)
+               return -ENODEV;
+       ar_pci = ath10k_pci_priv(ar);
+
+       if (!ar_pci)
+               return -ENODEV;
+
+       ret = pci_enable_device(pdev);
+       if (ret) {
+               ath10k_warn("cannot enable PCI device: %d\n", ret);
+               return ret;
+       }
+
+       pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
+
+       if ((val & 0x000000ff) != 0) {
+               pci_restore_state(pdev);
+               pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
+                                      val & 0xffffff00);
+               /*
+                * Suspend/Resume resets the PCI configuration space,
+                * so we have to re-disable the RETRY_TIMEOUT register (0x41)
+                * to keep PCI Tx retries from interfering with C3 CPU state
+                */
+               pci_read_config_dword(pdev, 0x40, &val);
+
+               if ((val & 0x0000ff00) != 0)
+                       pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+       }
+
+       ret = ath10k_core_target_resume(ar);
+       if (ret)
+               ath10k_warn("target resume failed: %d\n", ret);
+
+       return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(ath10k_dev_pm_ops,
+                        ath10k_pci_suspend,
+                        ath10k_pci_resume);
+
+#define ATH10K_PCI_PM_OPS (&ath10k_dev_pm_ops)
+
+#else
+
+#define ATH10K_PCI_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
+
+static struct pci_driver ath10k_pci_driver = {
+       .name = "ath10k_pci",
+       .id_table = ath10k_pci_id_table,
+       .probe = ath10k_pci_probe,
+       .remove = ath10k_pci_remove,
+       .driver.pm = ATH10K_PCI_PM_OPS,
+};
+
+static int __init ath10k_pci_init(void)
+{
+       int ret;
+
+       ret = pci_register_driver(&ath10k_pci_driver);
+       if (ret)
+               ath10k_err("pci_register_driver failed [%d]\n", ret);
+
+       return ret;
+}
+module_init(ath10k_pci_init);
+
+static void __exit ath10k_pci_exit(void)
+{
+       pci_unregister_driver(&ath10k_pci_driver);
+}
+
+module_exit(ath10k_pci_exit);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
+MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
+MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
new file mode 100644 (file)
index 0000000..d2a055a
--- /dev/null
@@ -0,0 +1,355 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _PCI_H_
+#define _PCI_H_
+
+#include <linux/interrupt.h>
+
+#include "hw.h"
+#include "ce.h"
+
+/* FW dump area */
+#define REG_DUMP_COUNT_QCA988X 60
+
+/*
+ * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
+ */
+#define DIAG_TRANSFER_LIMIT 2048
+
+/*
+ * maximum number of bytes that can be
+ * handled atomically by DiagRead/DiagWrite
+ */
+#define DIAG_TRANSFER_LIMIT 2048
+
+struct bmi_xfer {
+       struct completion done;
+       bool wait_for_resp;
+       u32 resp_len;
+};
+
+struct ath10k_pci_compl {
+       struct list_head list;
+       int send_or_recv;
+       struct ce_state *ce_state;
+       struct hif_ce_pipe_info *pipe_info;
+       void *transfer_context;
+       unsigned int nbytes;
+       unsigned int transfer_id;
+       unsigned int flags;
+};
+
+/* compl_state.send_or_recv */
+#define HIF_CE_COMPLETE_FREE 0
+#define HIF_CE_COMPLETE_SEND 1
+#define HIF_CE_COMPLETE_RECV 2
+
+/*
+ * PCI-specific Target state
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ *
+ * Much of this may be of interest to the Host so
+ * HOST_INTEREST->hi_interconnect_state points here
+ * (and all members are 32-bit quantities in order to
+ * facilitate Host access). In particular, Host software is
+ * required to initialize pipe_cfg_addr and svc_to_pipe_map.
+ */
+struct pcie_state {
+       /* Pipe configuration Target address */
+       /* NB: ce_pipe_config[CE_COUNT] */
+       u32 pipe_cfg_addr;
+
+       /* Service to pipe map Target address */
+       /* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */
+       u32 svc_to_pipe_map;
+
+       /* number of MSI interrupts requested */
+       u32 msi_requested;
+
+       /* number of MSI interrupts granted */
+       u32 msi_granted;
+
+       /* Message Signalled Interrupt address */
+       u32 msi_addr;
+
+       /* Base data */
+       u32 msi_data;
+
+       /*
+        * Data for firmware interrupt;
+        * MSI data for other interrupts are
+        * in various SoC registers
+        */
+       u32 msi_fw_intr_data;
+
+       /* PCIE_PWR_METHOD_* */
+       u32 power_mgmt_method;
+
+       /* PCIE_CONFIG_FLAG_* */
+       u32 config_flags;
+};
+
+/* PCIE_CONFIG_FLAG definitions */
+#define PCIE_CONFIG_FLAG_ENABLE_L1  0x0000001
+
+/* Host software's Copy Engine configuration. */
+#define CE_ATTR_FLAGS 0
+
+/*
+ * Configuration information for a Copy Engine pipe.
+ * Passed from Host to Target during startup (one per CE).
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ */
+struct ce_pipe_config {
+       u32 pipenum;
+       u32 pipedir;
+       u32 nentries;
+       u32 nbytes_max;
+       u32 flags;
+       u32 reserved;
+};
+
+/*
+ * Directions for interconnect pipe configuration.
+ * These definitions may be used during configuration and are shared
+ * between Host and Target.
+ *
+ * Pipe Directions are relative to the Host, so PIPEDIR_IN means
+ * "coming IN over air through Target to Host" as with a WiFi Rx operation.
+ * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
+ * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
+ * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
+ * over the interconnect.
+ */
+#define PIPEDIR_NONE    0
+#define PIPEDIR_IN      1  /* Target-->Host, WiFi Rx direction */
+#define PIPEDIR_OUT     2  /* Host->Target, WiFi Tx direction */
+#define PIPEDIR_INOUT   3  /* bidirectional */
+
+/* Establish a mapping between a service/direction and a pipe. */
+struct service_to_pipe {
+       u32 service_id;
+       u32 pipedir;
+       u32 pipenum;
+};
+
+enum ath10k_pci_features {
+       ATH10K_PCI_FEATURE_MSI_X                = 0,
+       ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND    = 1,
+
+       /* keep last */
+       ATH10K_PCI_FEATURE_COUNT
+};
+
+/* Per-pipe state. */
+struct hif_ce_pipe_info {
+       /* Handle of underlying Copy Engine */
+       struct ce_state *ce_hdl;
+
+       /* Our pipe number; facilitiates use of pipe_info ptrs. */
+       u8 pipe_num;
+
+       /* Convenience back pointer to hif_ce_state. */
+       struct ath10k *hif_ce_state;
+
+       size_t buf_sz;
+
+       /* protects compl_free and num_send_allowed */
+       spinlock_t pipe_lock;
+
+       /* List of free CE completion slots */
+       struct list_head compl_free;
+
+       /* Limit the number of outstanding send requests. */
+       int num_sends_allowed;
+
+       struct ath10k_pci *ar_pci;
+       struct tasklet_struct intr;
+};
+
+struct ath10k_pci {
+       struct pci_dev *pdev;
+       struct device *dev;
+       struct ath10k *ar;
+       void __iomem *mem;
+       int cacheline_sz;
+
+       DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
+
+       /*
+        * Number of MSI interrupts granted, 0 --> using legacy PCI line
+        * interrupts.
+        */
+       int num_msi_intrs;
+
+       struct tasklet_struct intr_tq;
+       struct tasklet_struct msi_fw_err;
+
+       /* Number of Copy Engines supported */
+       unsigned int ce_count;
+
+       int started;
+
+       atomic_t keep_awake_count;
+       bool verified_awake;
+
+       /* List of CE completions to be processed */
+       struct list_head compl_process;
+
+       /* protects compl_processing and compl_process */
+       spinlock_t compl_lock;
+
+       bool compl_processing;
+
+       struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX];
+
+       struct ath10k_hif_cb msg_callbacks_current;
+
+       /* Target address used to signal a pending firmware event */
+       u32 fw_indicator_address;
+
+       /* Copy Engine used for Diagnostic Accesses */
+       struct ce_state *ce_diag;
+
+       /* FIXME: document what this really protects */
+       spinlock_t ce_lock;
+
+       /* Map CE id to ce_state */
+       struct ce_state *ce_id_to_state[CE_COUNT_MAX];
+
+       /* makes sure that dummy reads are atomic */
+       spinlock_t hw_v1_workaround_lock;
+};
+
+static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
+{
+       return ar->hif.priv;
+}
+
+static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr)
+{
+       return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+}
+
+static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val)
+{
+       iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+}
+
+#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
+#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */
+
+#define BAR_NUM 0
+
+#define CDC_WAR_MAGIC_STR   0xceef0000
+#define CDC_WAR_DATA_CE     4
+
+/*
+ * TODO: Should be a function call specific to each Target-type.
+ * This convoluted macro converts from Target CPU Virtual Address Space to CE
+ * Address Space. As part of this process, we conservatively fetch the current
+ * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
+ * for this device; but that's not guaranteed.
+ */
+#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr)                 \
+       (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS|                  \
+         CORE_CTRL_ADDRESS)) & 0x7ff) << 21) |                         \
+        0x100000 | ((addr) & 0xfffff))
+
+/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
+#define DIAG_ACCESS_CE_TIMEOUT_MS 10
+
+/*
+ * This API allows the Host to access Target registers directly
+ * and relatively efficiently over PCIe.
+ * This allows the Host to avoid extra overhead associated with
+ * sending a message to firmware and waiting for a response message
+ * from firmware, as is done on other interconnects.
+ *
+ * Yet there is some complexity with direct accesses because the
+ * Target's power state is not known a priori. The Host must issue
+ * special PCIe reads/writes in order to explicitly wake the Target
+ * and to verify that it is awake and will remain awake.
+ *
+ * Usage:
+ *
+ *   Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space.
+ *   These calls must be bracketed by ath10k_pci_wake and
+ *   ath10k_pci_sleep.  A single BEGIN/END pair is adequate for
+ *   multiple READ/WRITE operations.
+ *
+ *   Use ath10k_pci_wake to put the Target in a state in
+ *   which it is legal for the Host to directly access it. This
+ *   may involve waking the Target from a low power state, which
+ *   may take up to 2Ms!
+ *
+ *   Use ath10k_pci_sleep to tell the Target that as far as
+ *   this code path is concerned, it no longer needs to remain
+ *   directly accessible.  BEGIN/END is under a reference counter;
+ *   multiple code paths may issue BEGIN/END on a single targid.
+ */
+static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
+                                     u32 value)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       void __iomem *addr = ar_pci->mem;
+
+       if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
+               unsigned long irq_flags;
+
+               spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
+
+               ioread32(addr+offset+4); /* 3rd read prior to write */
+               ioread32(addr+offset+4); /* 2nd read prior to write */
+               ioread32(addr+offset+4); /* 1st read prior to write */
+               iowrite32(value, addr+offset);
+
+               spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock,
+                                      irq_flags);
+       } else {
+               iowrite32(value, addr+offset);
+       }
+}
+
+static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       return ioread32(ar_pci->mem + offset);
+}
+
+extern unsigned int ath10k_target_ps;
+
+void ath10k_do_pci_wake(struct ath10k *ar);
+void ath10k_do_pci_sleep(struct ath10k *ar);
+
+static inline void ath10k_pci_wake(struct ath10k *ar)
+{
+       if (ath10k_target_ps)
+               ath10k_do_pci_wake(ar);
+}
+
+static inline void ath10k_pci_sleep(struct ath10k *ar)
+{
+       if (ath10k_target_ps)
+               ath10k_do_pci_sleep(ar);
+}
+
+#endif /* _PCI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
new file mode 100644 (file)
index 0000000..bfec6c8
--- /dev/null
@@ -0,0 +1,990 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _RX_DESC_H_
+#define _RX_DESC_H_
+
+enum rx_attention_flags {
+       RX_ATTENTION_FLAGS_FIRST_MPDU          = 1 << 0,
+       RX_ATTENTION_FLAGS_LAST_MPDU           = 1 << 1,
+       RX_ATTENTION_FLAGS_MCAST_BCAST         = 1 << 2,
+       RX_ATTENTION_FLAGS_PEER_IDX_INVALID    = 1 << 3,
+       RX_ATTENTION_FLAGS_PEER_IDX_TIMEOUT    = 1 << 4,
+       RX_ATTENTION_FLAGS_POWER_MGMT          = 1 << 5,
+       RX_ATTENTION_FLAGS_NON_QOS             = 1 << 6,
+       RX_ATTENTION_FLAGS_NULL_DATA           = 1 << 7,
+       RX_ATTENTION_FLAGS_MGMT_TYPE           = 1 << 8,
+       RX_ATTENTION_FLAGS_CTRL_TYPE           = 1 << 9,
+       RX_ATTENTION_FLAGS_MORE_DATA           = 1 << 10,
+       RX_ATTENTION_FLAGS_EOSP                = 1 << 11,
+       RX_ATTENTION_FLAGS_U_APSD_TRIGGER      = 1 << 12,
+       RX_ATTENTION_FLAGS_FRAGMENT            = 1 << 13,
+       RX_ATTENTION_FLAGS_ORDER               = 1 << 14,
+       RX_ATTENTION_FLAGS_CLASSIFICATION      = 1 << 15,
+       RX_ATTENTION_FLAGS_OVERFLOW_ERR        = 1 << 16,
+       RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR     = 1 << 17,
+       RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL = 1 << 18,
+       RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL      = 1 << 19,
+       RX_ATTENTION_FLAGS_SA_IDX_INVALID      = 1 << 20,
+       RX_ATTENTION_FLAGS_DA_IDX_INVALID      = 1 << 21,
+       RX_ATTENTION_FLAGS_SA_IDX_TIMEOUT      = 1 << 22,
+       RX_ATTENTION_FLAGS_DA_IDX_TIMEOUT      = 1 << 23,
+       RX_ATTENTION_FLAGS_ENCRYPT_REQUIRED    = 1 << 24,
+       RX_ATTENTION_FLAGS_DIRECTED            = 1 << 25,
+       RX_ATTENTION_FLAGS_BUFFER_FRAGMENT     = 1 << 26,
+       RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR     = 1 << 27,
+       RX_ATTENTION_FLAGS_TKIP_MIC_ERR        = 1 << 28,
+       RX_ATTENTION_FLAGS_DECRYPT_ERR         = 1 << 29,
+       RX_ATTENTION_FLAGS_FCS_ERR             = 1 << 30,
+       RX_ATTENTION_FLAGS_MSDU_DONE           = 1 << 31,
+};
+
+struct rx_attention {
+       __le32 flags; /* %RX_ATTENTION_FLAGS_ */
+} __packed;
+
+/*
+ * first_mpdu
+ *             Indicates the first MSDU of the PPDU.  If both first_mpdu
+ *             and last_mpdu are set in the MSDU then this is a not an
+ *             A-MPDU frame but a stand alone MPDU.  Interior MPDU in an
+ *             A-MPDU shall have both first_mpdu and last_mpdu bits set to
+ *             0.  The PPDU start status will only be valid when this bit
+ *             is set.
+ *
+ * last_mpdu
+ *             Indicates the last MSDU of the last MPDU of the PPDU.  The
+ *             PPDU end status will only be valid when this bit is set.
+ *
+ * mcast_bcast
+ *             Multicast / broadcast indicator.  Only set when the MAC
+ *             address 1 bit 0 is set indicating mcast/bcast and the BSSID
+ *             matches one of the 4 BSSID registers. Only set when
+ *             first_msdu is set.
+ *
+ * peer_idx_invalid
+ *             Indicates no matching entries within the the max search
+ *             count.  Only set when first_msdu is set.
+ *
+ * peer_idx_timeout
+ *             Indicates an unsuccessful search for the peer index due to
+ *             timeout.  Only set when first_msdu is set.
+ *
+ * power_mgmt
+ *             Power management bit set in the 802.11 header.  Only set
+ *             when first_msdu is set.
+ *
+ * non_qos
+ *             Set if packet is not a non-QoS data frame.  Only set when
+ *             first_msdu is set.
+ *
+ * null_data
+ *             Set if frame type indicates either null data or QoS null
+ *             data format.  Only set when first_msdu is set.
+ *
+ * mgmt_type
+ *             Set if packet is a management packet.  Only set when
+ *             first_msdu is set.
+ *
+ * ctrl_type
+ *             Set if packet is a control packet.  Only set when first_msdu
+ *             is set.
+ *
+ * more_data
+ *             Set if more bit in frame control is set.  Only set when
+ *             first_msdu is set.
+ *
+ * eosp
+ *             Set if the EOSP (end of service period) bit in the QoS
+ *             control field is set.  Only set when first_msdu is set.
+ *
+ * u_apsd_trigger
+ *             Set if packet is U-APSD trigger.  Key table will have bits
+ *             per TID to indicate U-APSD trigger.
+ *
+ * fragment
+ *             Indicates that this is an 802.11 fragment frame.  This is
+ *             set when either the more_frag bit is set in the frame
+ *             control or the fragment number is not zero.  Only set when
+ *             first_msdu is set.
+ *
+ * order
+ *             Set if the order bit in the frame control is set.  Only set
+ *             when first_msdu is set.
+ *
+ * classification
+ *             Indicates that this status has a corresponding MSDU that
+ *             requires FW processing.  The OLE will have classification
+ *             ring mask registers which will indicate the ring(s) for
+ *             packets and descriptors which need FW attention.
+ *
+ * overflow_err
+ *             PCU Receive FIFO does not have enough space to store the
+ *             full receive packet.  Enough space is reserved in the
+ *             receive FIFO for the status is written.  This MPDU remaining
+ *             packets in the PPDU will be filtered and no Ack response
+ *             will be transmitted.
+ *
+ * msdu_length_err
+ *             Indicates that the MSDU length from the 802.3 encapsulated
+ *             length field extends beyond the MPDU boundary.
+ *
+ * tcp_udp_chksum_fail
+ *             Indicates that the computed checksum (tcp_udp_chksum) did
+ *             not match the checksum in the TCP/UDP header.
+ *
+ * ip_chksum_fail
+ *             Indicates that the computed checksum did not match the
+ *             checksum in the IP header.
+ *
+ * sa_idx_invalid
+ *             Indicates no matching entry was found in the address search
+ *             table for the source MAC address.
+ *
+ * da_idx_invalid
+ *             Indicates no matching entry was found in the address search
+ *             table for the destination MAC address.
+ *
+ * sa_idx_timeout
+ *             Indicates an unsuccessful search for the source MAC address
+ *             due to the expiring of the search timer.
+ *
+ * da_idx_timeout
+ *             Indicates an unsuccessful search for the destination MAC
+ *             address due to the expiring of the search timer.
+ *
+ * encrypt_required
+ *             Indicates that this data type frame is not encrypted even if
+ *             the policy for this MPDU requires encryption as indicated in
+ *             the peer table key type.
+ *
+ * directed
+ *             MPDU is a directed packet which means that the RA matched
+ *             our STA addresses.  In proxySTA it means that the TA matched
+ *             an entry in our address search table with the corresponding
+ *             'no_ack' bit is the address search entry cleared.
+ *
+ * buffer_fragment
+ *             Indicates that at least one of the rx buffers has been
+ *             fragmented.  If set the FW should look at the rx_frag_info
+ *             descriptor described below.
+ *
+ * mpdu_length_err
+ *             Indicates that the MPDU was pre-maturely terminated
+ *             resulting in a truncated MPDU.  Don't trust the MPDU length
+ *             field.
+ *
+ * tkip_mic_err
+ *             Indicates that the MPDU Michael integrity check failed
+ *
+ * decrypt_err
+ *             Indicates that the MPDU decrypt integrity check failed
+ *
+ * fcs_err
+ *             Indicates that the MPDU FCS check failed
+ *
+ * msdu_done
+ *             If set indicates that the RX packet data, RX header data, RX
+ *             PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU
+ *             start/end descriptors and RX Attention descriptor are all
+ *             valid.  This bit must be in the last octet of the
+ *             descriptor.
+ */
+
+struct rx_frag_info {
+       u8 ring0_more_count;
+       u8 ring1_more_count;
+       u8 ring2_more_count;
+       u8 ring3_more_count;
+} __packed;
+
+/*
+ * ring0_more_count
+ *             Indicates the number of more buffers associated with RX DMA
+ *             ring 0.  Field is filled in by the RX_DMA.
+ *
+ * ring1_more_count
+ *             Indicates the number of more buffers associated with RX DMA
+ *             ring 1. Field is filled in by the RX_DMA.
+ *
+ * ring2_more_count
+ *             Indicates the number of more buffers associated with RX DMA
+ *             ring 2. Field is filled in by the RX_DMA.
+ *
+ * ring3_more_count
+ *             Indicates the number of more buffers associated with RX DMA
+ *             ring 3. Field is filled in by the RX_DMA.
+ */
+
+enum htt_rx_mpdu_encrypt_type {
+       HTT_RX_MPDU_ENCRYPT_WEP40            = 0,
+       HTT_RX_MPDU_ENCRYPT_WEP104           = 1,
+       HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC = 2,
+       HTT_RX_MPDU_ENCRYPT_WEP128           = 3,
+       HTT_RX_MPDU_ENCRYPT_TKIP_WPA         = 4,
+       HTT_RX_MPDU_ENCRYPT_WAPI             = 5,
+       HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2     = 6,
+       HTT_RX_MPDU_ENCRYPT_NONE             = 7,
+};
+
+#define RX_MPDU_START_INFO0_PEER_IDX_MASK     0x000007ff
+#define RX_MPDU_START_INFO0_PEER_IDX_LSB      0
+#define RX_MPDU_START_INFO0_SEQ_NUM_MASK      0x0fff0000
+#define RX_MPDU_START_INFO0_SEQ_NUM_LSB       16
+#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_MASK 0xf0000000
+#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_LSB  28
+#define RX_MPDU_START_INFO0_FROM_DS           (1 << 11)
+#define RX_MPDU_START_INFO0_TO_DS             (1 << 12)
+#define RX_MPDU_START_INFO0_ENCRYPTED         (1 << 13)
+#define RX_MPDU_START_INFO0_RETRY             (1 << 14)
+#define RX_MPDU_START_INFO0_TXBF_H_INFO       (1 << 15)
+
+#define RX_MPDU_START_INFO1_TID_MASK 0xf0000000
+#define RX_MPDU_START_INFO1_TID_LSB  28
+#define RX_MPDU_START_INFO1_DIRECTED (1 << 16)
+
+struct rx_mpdu_start {
+       __le32 info0;
+       union {
+               struct {
+                       __le32 pn31_0;
+                       __le32 info1; /* %RX_MPDU_START_INFO1_ */
+               } __packed;
+               struct {
+                       u8 pn[6];
+               } __packed;
+       } __packed;
+} __packed;
+
+/*
+ * peer_idx
+ *             The index of the address search table which associated with
+ *             the peer table entry corresponding to this MPDU.  Only valid
+ *             when first_msdu is set.
+ *
+ * fr_ds
+ *             Set if the from DS bit is set in the frame control.  Only
+ *             valid when first_msdu is set.
+ *
+ * to_ds
+ *             Set if the to DS bit is set in the frame control.  Only
+ *             valid when first_msdu is set.
+ *
+ * encrypted
+ *             Protected bit from the frame control.  Only valid when
+ *             first_msdu is set.
+ *
+ * retry
+ *             Retry bit from the frame control.  Only valid when
+ *             first_msdu is set.
+ *
+ * txbf_h_info
+ *             The MPDU data will contain H information.  Primarily used
+ *             for debug.
+ *
+ * seq_num
+ *             The sequence number from the 802.11 header.  Only valid when
+ *             first_msdu is set.
+ *
+ * encrypt_type
+ *             Indicates type of decrypt cipher used (as defined in the
+ *             peer table)
+ *             0: WEP40
+ *             1: WEP104
+ *             2: TKIP without MIC
+ *             3: WEP128
+ *             4: TKIP (WPA)
+ *             5: WAPI
+ *             6: AES-CCM (WPA2)
+ *             7: No cipher
+ *             Only valid when first_msdu_is set
+ *
+ * pn_31_0
+ *             Bits [31:0] of the PN number extracted from the IV field
+ *             WEP: IV = {key_id_octet, pn2, pn1, pn0}.  Only pn[23:0] is
+ *             valid.
+ *             TKIP: IV = {pn5, pn4, pn3, pn2, key_id_octet, pn0,
+ *             WEPSeed[1], pn1}.  Only pn[47:0] is valid.
+ *             AES-CCM: IV = {pn5, pn4, pn3, pn2, key_id_octet, 0x0, pn1,
+ *             pn0}.  Only pn[47:0] is valid.
+ *             WAPI: IV = {key_id_octet, 0x0, pn15, pn14, pn13, pn12, pn11,
+ *             pn10, pn9, pn8, pn7, pn6, pn5, pn4, pn3, pn2, pn1, pn0}.
+ *             The ext_wapi_pn[127:48] in the rx_msdu_misc descriptor and
+ *             pn[47:0] are valid.
+ *             Only valid when first_msdu is set.
+ *
+ * pn_47_32
+ *             Bits [47:32] of the PN number.   See description for
+ *             pn_31_0.  The remaining PN fields are in the rx_msdu_end
+ *             descriptor
+ *
+ * pn
+ *             Use this field to access the pn without worrying about
+ *             byte-order and bitmasking/bitshifting.
+ *
+ * directed
+ *             See definition in RX attention descriptor
+ *
+ * reserved_2
+ *             Reserved: HW should fill with zero.  FW should ignore.
+ *
+ * tid
+ *             The TID field in the QoS control field
+ */
+
+#define RX_MPDU_END_INFO0_RESERVED_0_MASK     0x00001fff
+#define RX_MPDU_END_INFO0_RESERVED_0_LSB      0
+#define RX_MPDU_END_INFO0_POST_DELIM_CNT_MASK 0x0fff0000
+#define RX_MPDU_END_INFO0_POST_DELIM_CNT_LSB  16
+#define RX_MPDU_END_INFO0_OVERFLOW_ERR        (1 << 13)
+#define RX_MPDU_END_INFO0_LAST_MPDU           (1 << 14)
+#define RX_MPDU_END_INFO0_POST_DELIM_ERR      (1 << 15)
+#define RX_MPDU_END_INFO0_MPDU_LENGTH_ERR     (1 << 28)
+#define RX_MPDU_END_INFO0_TKIP_MIC_ERR        (1 << 29)
+#define RX_MPDU_END_INFO0_DECRYPT_ERR         (1 << 30)
+#define RX_MPDU_END_INFO0_FCS_ERR             (1 << 31)
+
+struct rx_mpdu_end {
+       __le32 info0;
+} __packed;
+
+/*
+ * reserved_0
+ *             Reserved
+ *
+ * overflow_err
+ *             PCU Receive FIFO does not have enough space to store the
+ *             full receive packet.  Enough space is reserved in the
+ *             receive FIFO for the status is written.  This MPDU remaining
+ *             packets in the PPDU will be filtered and no Ack response
+ *             will be transmitted.
+ *
+ * last_mpdu
+ *             Indicates that this is the last MPDU of a PPDU.
+ *
+ * post_delim_err
+ *             Indicates that a delimiter FCS error occurred after this
+ *             MPDU before the next MPDU.  Only valid when last_msdu is
+ *             set.
+ *
+ * post_delim_cnt
+ *             Count of the delimiters after this MPDU.  This requires the
+ *             last MPDU to be held until all the EOF descriptors have been
+ *             received.  This may be inefficient in the future when
+ *             ML-MIMO is used.  Only valid when last_mpdu is set.
+ *
+ * mpdu_length_err
+ *             See definition in RX attention descriptor
+ *
+ * tkip_mic_err
+ *             See definition in RX attention descriptor
+ *
+ * decrypt_err
+ *             See definition in RX attention descriptor
+ *
+ * fcs_err
+ *             See definition in RX attention descriptor
+ */
+
+#define RX_MSDU_START_INFO0_MSDU_LENGTH_MASK    0x00003fff
+#define RX_MSDU_START_INFO0_MSDU_LENGTH_LSB     0
+#define RX_MSDU_START_INFO0_IP_OFFSET_MASK      0x000fc000
+#define RX_MSDU_START_INFO0_IP_OFFSET_LSB       14
+#define RX_MSDU_START_INFO0_RING_MASK_MASK      0x00f00000
+#define RX_MSDU_START_INFO0_RING_MASK_LSB       20
+#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_MASK 0x7f000000
+#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_LSB  24
+
+#define RX_MSDU_START_INFO1_MSDU_NUMBER_MASK    0x000000ff
+#define RX_MSDU_START_INFO1_MSDU_NUMBER_LSB     0
+#define RX_MSDU_START_INFO1_DECAP_FORMAT_MASK   0x00000300
+#define RX_MSDU_START_INFO1_DECAP_FORMAT_LSB    8
+#define RX_MSDU_START_INFO1_SA_IDX_MASK         0x07ff0000
+#define RX_MSDU_START_INFO1_SA_IDX_LSB          16
+#define RX_MSDU_START_INFO1_IPV4_PROTO          (1 << 10)
+#define RX_MSDU_START_INFO1_IPV6_PROTO          (1 << 11)
+#define RX_MSDU_START_INFO1_TCP_PROTO           (1 << 12)
+#define RX_MSDU_START_INFO1_UDP_PROTO           (1 << 13)
+#define RX_MSDU_START_INFO1_IP_FRAG             (1 << 14)
+#define RX_MSDU_START_INFO1_TCP_ONLY_ACK        (1 << 15)
+
+enum rx_msdu_decap_format {
+       RX_MSDU_DECAP_RAW           = 0,
+       RX_MSDU_DECAP_NATIVE_WIFI   = 1,
+       RX_MSDU_DECAP_ETHERNET2_DIX = 2,
+       RX_MSDU_DECAP_8023_SNAP_LLC = 3
+};
+
+struct rx_msdu_start {
+       __le32 info0; /* %RX_MSDU_START_INFO0_ */
+       __le32 flow_id_crc;
+       __le32 info1; /* %RX_MSDU_START_INFO1_ */
+} __packed;
+
+/*
+ * msdu_length
+ *             MSDU length in bytes after decapsulation.  This field is
+ *             still valid for MPDU frames without A-MSDU.  It still
+ *             represents MSDU length after decapsulation
+ *
+ * ip_offset
+ *             Indicates the IP offset in bytes from the start of the
+ *             packet after decapsulation.  Only valid if ipv4_proto or
+ *             ipv6_proto is set.
+ *
+ * ring_mask
+ *             Indicates the destination RX rings for this MSDU.
+ *
+ * tcp_udp_offset
+ *             Indicates the offset in bytes to the start of TCP or UDP
+ *             header from the start of the IP header after decapsulation.
+ *             Only valid if tcp_prot or udp_prot is set.  The value 0
+ *             indicates that the offset is longer than 127 bytes.
+ *
+ * reserved_0c
+ *             Reserved: HW should fill with zero.  FW should ignore.
+ *
+ * flow_id_crc
+ *             The flow_id_crc runs CRC32 on the following information:
+ *             IPv4 option: dest_addr[31:0], src_addr [31:0], {24'b0,
+ *             protocol[7:0]}.
+ *             IPv6 option: dest_addr[127:0], src_addr [127:0], {24'b0,
+ *             next_header[7:0]}
+ *             UDP case: sort_port[15:0], dest_port[15:0]
+ *             TCP case: sort_port[15:0], dest_port[15:0],
+ *             {header_length[3:0], 6'b0, flags[5:0], window_size[15:0]},
+ *             {16'b0, urgent_ptr[15:0]}, all options except 32-bit
+ *             timestamp.
+ *
+ * msdu_number
+ *             Indicates the MSDU number within a MPDU.  This value is
+ *             reset to zero at the start of each MPDU.  If the number of
+ *             MSDU exceeds 255 this number will wrap using modulo 256.
+ *
+ * decap_format
+ *             Indicates the format after decapsulation:
+ *             0: RAW: No decapsulation
+ *             1: Native WiFi
+ *             2: Ethernet 2 (DIX)
+ *             3: 802.3 (SNAP/LLC)
+ *
+ * ipv4_proto
+ *             Set if L2 layer indicates IPv4 protocol.
+ *
+ * ipv6_proto
+ *             Set if L2 layer indicates IPv6 protocol.
+ *
+ * tcp_proto
+ *             Set if the ipv4_proto or ipv6_proto are set and the IP
+ *             protocol indicates TCP.
+ *
+ * udp_proto
+ *             Set if the ipv4_proto or ipv6_proto are set and the IP
+ *                     protocol indicates UDP.
+ *
+ * ip_frag
+ *             Indicates that either the IP More frag bit is set or IP frag
+ *             number is non-zero.  If set indicates that this is a
+ *             fragmented IP packet.
+ *
+ * tcp_only_ack
+ *             Set if only the TCP Ack bit is set in the TCP flags and if
+ *             the TCP payload is 0.
+ *
+ * sa_idx
+ *             The offset in the address table which matches the MAC source
+ *             address.
+ *
+ * reserved_2b
+ *             Reserved: HW should fill with zero.  FW should ignore.
+ */
+
+#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_MASK 0x00003fff
+#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_LSB  0
+#define RX_MSDU_END_INFO0_FIRST_MSDU                (1 << 14)
+#define RX_MSDU_END_INFO0_LAST_MSDU                 (1 << 15)
+#define RX_MSDU_END_INFO0_PRE_DELIM_ERR             (1 << 30)
+#define RX_MSDU_END_INFO0_RESERVED_3B               (1 << 31)
+
+struct rx_msdu_end {
+       __le16 ip_hdr_cksum;
+       __le16 tcp_hdr_cksum;
+       u8 key_id_octet;
+       u8 classification_filter;
+       u8 wapi_pn[10];
+       __le32 info0;
+} __packed;
+
+/*
+ *ip_hdr_chksum
+ *             This can include the IP header checksum or the pseudo header
+ *             checksum used by TCP/UDP checksum.
+ *
+ *tcp_udp_chksum
+ *             The value of the computed TCP/UDP checksum.  A mode bit
+ *             selects whether this checksum is the full checksum or the
+ *             partial checksum which does not include the pseudo header.
+ *
+ *key_id_octet
+ *             The key ID octet from the IV.  Only valid when first_msdu is
+ *             set.
+ *
+ *classification_filter
+ *             Indicates the number classification filter rule
+ *
+ *ext_wapi_pn_63_48
+ *             Extension PN (packet number) which is only used by WAPI.
+ *             This corresponds to WAPI PN bits [63:48] (pn6 and pn7).  The
+ *             WAPI PN bits [63:0] are in the pn field of the rx_mpdu_start
+ *             descriptor.
+ *
+ *ext_wapi_pn_95_64
+ *             Extension PN (packet number) which is only used by WAPI.
+ *             This corresponds to WAPI PN bits [95:64] (pn8, pn9, pn10 and
+ *             pn11).
+ *
+ *ext_wapi_pn_127_96
+ *             Extension PN (packet number) which is only used by WAPI.
+ *             This corresponds to WAPI PN bits [127:96] (pn12, pn13, pn14,
+ *             pn15).
+ *
+ *reported_mpdu_length
+ *             MPDU length before decapsulation.  Only valid when
+ *             first_msdu is set.  This field is taken directly from the
+ *             length field of the A-MPDU delimiter or the preamble length
+ *             field for non-A-MPDU frames.
+ *
+ *first_msdu
+ *             Indicates the first MSDU of A-MSDU.  If both first_msdu and
+ *             last_msdu are set in the MSDU then this is a non-aggregated
+ *             MSDU frame: normal MPDU.  Interior MSDU in an A-MSDU shall
+ *             have both first_mpdu and last_mpdu bits set to 0.
+ *
+ *last_msdu
+ *             Indicates the last MSDU of the A-MSDU.  MPDU end status is
+ *             only valid when last_msdu is set.
+ *
+ *reserved_3a
+ *             Reserved: HW should fill with zero.  FW should ignore.
+ *
+ *pre_delim_err
+ *             Indicates that the first delimiter had a FCS failure.  Only
+ *             valid when first_mpdu and first_msdu are set.
+ *
+ *reserved_3b
+ *             Reserved: HW should fill with zero.  FW should ignore.
+ */
+
+#define RX_PPDU_START_SIG_RATE_SELECT_OFDM 0
+#define RX_PPDU_START_SIG_RATE_SELECT_CCK  1
+
+#define RX_PPDU_START_SIG_RATE_OFDM_48 0
+#define RX_PPDU_START_SIG_RATE_OFDM_24 1
+#define RX_PPDU_START_SIG_RATE_OFDM_12 2
+#define RX_PPDU_START_SIG_RATE_OFDM_6  3
+#define RX_PPDU_START_SIG_RATE_OFDM_54 4
+#define RX_PPDU_START_SIG_RATE_OFDM_36 5
+#define RX_PPDU_START_SIG_RATE_OFDM_18 6
+#define RX_PPDU_START_SIG_RATE_OFDM_9  7
+
+#define RX_PPDU_START_SIG_RATE_CCK_LP_11  0
+#define RX_PPDU_START_SIG_RATE_CCK_LP_5_5 1
+#define RX_PPDU_START_SIG_RATE_CCK_LP_2   2
+#define RX_PPDU_START_SIG_RATE_CCK_LP_1   3
+#define RX_PPDU_START_SIG_RATE_CCK_SP_11  4
+#define RX_PPDU_START_SIG_RATE_CCK_SP_5_5 5
+#define RX_PPDU_START_SIG_RATE_CCK_SP_2   6
+
+#define HTT_RX_PPDU_START_PREAMBLE_LEGACY        0x04
+#define HTT_RX_PPDU_START_PREAMBLE_HT            0x08
+#define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF  0x09
+#define HTT_RX_PPDU_START_PREAMBLE_VHT           0x0C
+#define HTT_RX_PPDU_START_PREAMBLE_VHT_WITH_TXBF 0x0D
+
+#define RX_PPDU_START_INFO0_IS_GREENFIELD (1 << 0)
+
+#define RX_PPDU_START_INFO1_L_SIG_RATE_MASK    0x0000000f
+#define RX_PPDU_START_INFO1_L_SIG_RATE_LSB     0
+#define RX_PPDU_START_INFO1_L_SIG_LENGTH_MASK  0x0001ffe0
+#define RX_PPDU_START_INFO1_L_SIG_LENGTH_LSB   5
+#define RX_PPDU_START_INFO1_L_SIG_TAIL_MASK    0x00fc0000
+#define RX_PPDU_START_INFO1_L_SIG_TAIL_LSB     18
+#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_MASK 0xff000000
+#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_LSB  24
+#define RX_PPDU_START_INFO1_L_SIG_RATE_SELECT  (1 << 4)
+#define RX_PPDU_START_INFO1_L_SIG_PARITY       (1 << 17)
+
+#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_MASK 0x00ffffff
+#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_LSB  0
+
+#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_MASK 0x00ffffff
+#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_LSB  0
+#define RX_PPDU_START_INFO3_TXBF_H_INFO             (1 << 24)
+
+#define RX_PPDU_START_INFO4_VHT_SIG_B_MASK 0x1fffffff
+#define RX_PPDU_START_INFO4_VHT_SIG_B_LSB  0
+
+#define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff
+#define RX_PPDU_START_INFO5_SERVICE_LSB  0
+
+struct rx_ppdu_start {
+       struct {
+               u8 pri20_mhz;
+               u8 ext20_mhz;
+               u8 ext40_mhz;
+               u8 ext80_mhz;
+       } rssi_chains[4];
+       u8 rssi_comb;
+       __le16 rsvd0;
+       u8 info0; /* %RX_PPDU_START_INFO0_ */
+       __le32 info1; /* %RX_PPDU_START_INFO1_ */
+       __le32 info2; /* %RX_PPDU_START_INFO2_ */
+       __le32 info3; /* %RX_PPDU_START_INFO3_ */
+       __le32 info4; /* %RX_PPDU_START_INFO4_ */
+       __le32 info5; /* %RX_PPDU_START_INFO5_ */
+} __packed;
+
+/*
+ * rssi_chain0_pri20
+ *             RSSI of RX PPDU on chain 0 of primary 20 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec20
+ *             RSSI of RX PPDU on chain 0 of secondary 20 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec40
+ *             RSSI of RX PPDU on chain 0 of secondary 40 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec80
+ *             RSSI of RX PPDU on chain 0 of secondary 80 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_pri20
+ *             RSSI of RX PPDU on chain 1 of primary 20 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec20
+ *             RSSI of RX PPDU on chain 1 of secondary 20 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec40
+ *             RSSI of RX PPDU on chain 1 of secondary 40 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec80
+ *             RSSI of RX PPDU on chain 1 of secondary 80 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_pri20
+ *             RSSI of RX PPDU on chain 2 of primary 20 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec20
+ *             RSSI of RX PPDU on chain 2 of secondary 20 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec40
+ *             RSSI of RX PPDU on chain 2 of secondary 40 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec80
+ *             RSSI of RX PPDU on chain 2 of secondary 80 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_pri20
+ *             RSSI of RX PPDU on chain 3 of primary 20 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec20
+ *             RSSI of RX PPDU on chain 3 of secondary 20 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec40
+ *             RSSI of RX PPDU on chain 3 of secondary 40 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec80
+ *             RSSI of RX PPDU on chain 3 of secondary 80 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_comb
+ *             The combined RSSI of RX PPDU of all active chains and
+ *             bandwidths.  Value of 0x80 indicates invalid.
+ *
+ * reserved_4a
+ *             Reserved: HW should fill with 0, FW should ignore.
+ *
+ * is_greenfield
+ *             Do we really support this?
+ *
+ * reserved_4b
+ *             Reserved: HW should fill with 0, FW should ignore.
+ *
+ * l_sig_rate
+ *             If l_sig_rate_select is 0:
+ *             0x8: OFDM 48 Mbps
+ *             0x9: OFDM 24 Mbps
+ *             0xA: OFDM 12 Mbps
+ *             0xB: OFDM 6 Mbps
+ *             0xC: OFDM 54 Mbps
+ *             0xD: OFDM 36 Mbps
+ *             0xE: OFDM 18 Mbps
+ *             0xF: OFDM 9 Mbps
+ *             If l_sig_rate_select is 1:
+ *             0x8: CCK 11 Mbps long preamble
+ *             0x9: CCK 5.5 Mbps long preamble
+ *             0xA: CCK 2 Mbps long preamble
+ *             0xB: CCK 1 Mbps long preamble
+ *             0xC: CCK 11 Mbps short preamble
+ *             0xD: CCK 5.5 Mbps short preamble
+ *             0xE: CCK 2 Mbps short preamble
+ *
+ * l_sig_rate_select
+ *             Legacy signal rate select.  If set then l_sig_rate indicates
+ *             CCK rates.  If clear then l_sig_rate indicates OFDM rates.
+ *
+ * l_sig_length
+ *             Length of legacy frame in octets.
+ *
+ * l_sig_parity
+ *             Odd parity over l_sig_rate and l_sig_length
+ *
+ * l_sig_tail
+ *             Tail bits for Viterbi decoder
+ *
+ * preamble_type
+ *             Indicates the type of preamble ahead:
+ *             0x4: Legacy (OFDM/CCK)
+ *             0x8: HT
+ *             0x9: HT with TxBF
+ *             0xC: VHT
+ *             0xD: VHT with TxBF
+ *             0x80 - 0xFF: Reserved for special baseband data types such
+ *             as radar and spectral scan.
+ *
+ * ht_sig_vht_sig_a_1
+ *             If preamble_type == 0x8 or 0x9
+ *             HT-SIG (first 24 bits)
+ *             If preamble_type == 0xC or 0xD
+ *             VHT-SIG A (first 24 bits)
+ *             Else
+ *             Reserved
+ *
+ * reserved_6
+ *             Reserved: HW should fill with 0, FW should ignore.
+ *
+ * ht_sig_vht_sig_a_2
+ *             If preamble_type == 0x8 or 0x9
+ *             HT-SIG (last 24 bits)
+ *             If preamble_type == 0xC or 0xD
+ *             VHT-SIG A (last 24 bits)
+ *             Else
+ *             Reserved
+ *
+ * txbf_h_info
+ *             Indicates that the packet data carries H information which
+ *             is used for TxBF debug.
+ *
+ * reserved_7
+ *             Reserved: HW should fill with 0, FW should ignore.
+ *
+ * vht_sig_b
+ *             WiFi 1.0 and WiFi 2.0 will likely have this field to be all
+ *             0s since the BB does not plan on decoding VHT SIG-B.
+ *
+ * reserved_8
+ *             Reserved: HW should fill with 0, FW should ignore.
+ *
+ * service
+ *             Service field from BB for OFDM, HT and VHT packets.  CCK
+ *             packets will have service field of 0.
+ *
+ * reserved_9
+ *             Reserved: HW should fill with 0, FW should ignore.
+*/
+
+
+#define RX_PPDU_END_FLAGS_PHY_ERR             (1 << 0)
+#define RX_PPDU_END_FLAGS_RX_LOCATION         (1 << 1)
+#define RX_PPDU_END_FLAGS_TXBF_H_INFO         (1 << 2)
+
+#define RX_PPDU_END_INFO0_RX_ANTENNA_MASK     0x00ffffff
+#define RX_PPDU_END_INFO0_RX_ANTENNA_LSB      0
+#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24)
+#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25)
+
+#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
+
+struct rx_ppdu_end {
+       __le32 evm_p0;
+       __le32 evm_p1;
+       __le32 evm_p2;
+       __le32 evm_p3;
+       __le32 evm_p4;
+       __le32 evm_p5;
+       __le32 evm_p6;
+       __le32 evm_p7;
+       __le32 evm_p8;
+       __le32 evm_p9;
+       __le32 evm_p10;
+       __le32 evm_p11;
+       __le32 evm_p12;
+       __le32 evm_p13;
+       __le32 evm_p14;
+       __le32 evm_p15;
+       __le32 tsf_timestamp;
+       __le32 wb_timestamp;
+       u8 locationing_timestamp;
+       u8 phy_err_code;
+       __le16 flags; /* %RX_PPDU_END_FLAGS_ */
+       __le32 info0; /* %RX_PPDU_END_INFO0_ */
+       __le16 bb_length;
+       __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+/*
+ * evm_p0
+ *             EVM for pilot 0.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p1
+ *             EVM for pilot 1.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p2
+ *             EVM for pilot 2.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p3
+ *             EVM for pilot 3.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p4
+ *             EVM for pilot 4.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p5
+ *             EVM for pilot 5.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p6
+ *             EVM for pilot 6.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p7
+ *             EVM for pilot 7.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p8
+ *             EVM for pilot 8.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p9
+ *             EVM for pilot 9.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p10
+ *             EVM for pilot 10.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p11
+ *             EVM for pilot 11.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p12
+ *             EVM for pilot 12.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p13
+ *             EVM for pilot 13.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p14
+ *             EVM for pilot 14.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p15
+ *             EVM for pilot 15.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * tsf_timestamp
+ *             Receive TSF timestamp sampled on the rising edge of
+ *             rx_clear.  For PHY errors this may be the current TSF when
+ *             phy_error is asserted if the rx_clear does not assert before
+ *             the end of the PHY error.
+ *
+ * wb_timestamp
+ *             WLAN/BT timestamp is a 1 usec resolution timestamp which
+ *             does not get updated based on receive beacon like TSF.  The
+ *             same rules for capturing tsf_timestamp are used to capture
+ *             the wb_timestamp.
+ *
+ * locationing_timestamp
+ *             Timestamp used for locationing.  This timestamp is used to
+ *             indicate fractions of usec.  For example if the MAC clock is
+ *             running at 80 MHz, the timestamp will increment every 12.5
+ *             nsec.  The value starts at 0 and increments to 79 and
+ *             returns to 0 and repeats.  This information is valid for
+ *             every PPDU.  This information can be used in conjunction
+ *             with wb_timestamp to capture large delta times.
+ *
+ * phy_err_code
+ *             See the 1.10.8.1.2 for the list of the PHY error codes.
+ *
+ * phy_err
+ *             Indicates a PHY error was detected for this PPDU.
+ *
+ * rx_location
+ *             Indicates that location information was requested.
+ *
+ * txbf_h_info
+ *             Indicates that the packet data carries H information which
+ *             is used for TxBF debug.
+ *
+ * reserved_18
+ *             Reserved: HW should fill with 0, FW should ignore.
+ *
+ * rx_antenna
+ *             Receive antenna value
+ *
+ * tx_ht_vht_ack
+ *             Indicates that a HT or VHT Ack/BA frame was transmitted in
+ *             response to this receive packet.
+ *
+ * bb_captured_channel
+ *             Indicates that the BB has captured a channel dump.  FW can
+ *             then read the channel dump memory.  This may indicate that
+ *             the channel was captured either based on PCU setting the
+ *             capture_channel bit  BB descriptor or FW setting the
+ *             capture_channel mode bit.
+ *
+ * reserved_19
+ *             Reserved: HW should fill with 0, FW should ignore.
+ *
+ * bb_length
+ *             Indicates the number of bytes of baseband information for
+ *             PPDUs where the BB descriptor preamble type is 0x80 to 0xFF
+ *             which indicates that this is not a normal PPDU but rather
+ *             contains baseband debug information.
+ *
+ * reserved_20
+ *             Reserved: HW should fill with 0, FW should ignore.
+ *
+ * ppdu_done
+ *             PPDU end status is only valid when ppdu_done bit is set.
+ *             Every time HW sets this bit in memory FW/SW must clear this
+ *             bit in memory.  FW will initialize all the ppdu_done dword
+ *             to 0.
+*/
+
+#define FW_RX_DESC_INFO0_DISCARD  (1 << 0)
+#define FW_RX_DESC_INFO0_FORWARD  (1 << 1)
+#define FW_RX_DESC_INFO0_INSPECT  (1 << 5)
+#define FW_RX_DESC_INFO0_EXT_MASK 0xC0
+#define FW_RX_DESC_INFO0_EXT_LSB  6
+
+struct fw_rx_desc_base {
+       u8 info0;
+} __packed;
+
+#endif /* _RX_DESC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
new file mode 100644 (file)
index 0000000..be7ba1e
--- /dev/null
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __TARGADDRS_H__
+#define __TARGADDRS_H__
+
+/*
+ * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
+ * host_interest structure.  It must match the address of the _host_interest
+ * symbol (see linker script).
+ *
+ * Host Interest is shared between Host and Target in order to coordinate
+ * between the two, and is intended to remain constant (with additions only
+ * at the end) across software releases.
+ *
+ * All addresses are available here so that it's possible to
+ * write a single binary that works with all Target Types.
+ * May be used in assembler code as well as C.
+ */
+#define QCA988X_HOST_INTEREST_ADDRESS    0x00400800
+#define HOST_INTEREST_MAX_SIZE          0x200
+
+/*
+ * These are items that the Host may need to access via BMI or via the
+ * Diagnostic Window. The position of items in this structure must remain
+ * constant across firmware revisions! Types for each item must be fixed
+ * size across target and host platforms. More items may be added at the end.
+ */
+struct host_interest {
+       /*
+        * Pointer to application-defined area, if any.
+        * Set by Target application during startup.
+        */
+       u32 hi_app_host_interest;                       /* 0x00 */
+
+       /* Pointer to register dump area, valid after Target crash. */
+       u32 hi_failure_state;                           /* 0x04 */
+
+       /* Pointer to debug logging header */
+       u32 hi_dbglog_hdr;                              /* 0x08 */
+
+       u32 hi_unused0c;                                /* 0x0c */
+
+       /*
+        * General-purpose flag bits, similar to SOC_OPTION_* flags.
+        * Can be used by application rather than by OS.
+        */
+       u32 hi_option_flag;                             /* 0x10 */
+
+       /*
+        * Boolean that determines whether or not to
+        * display messages on the serial port.
+        */
+       u32 hi_serial_enable;                           /* 0x14 */
+
+       /* Start address of DataSet index, if any */
+       u32 hi_dset_list_head;                          /* 0x18 */
+
+       /* Override Target application start address */
+       u32 hi_app_start;                               /* 0x1c */
+
+       /* Clock and voltage tuning */
+       u32 hi_skip_clock_init;                         /* 0x20 */
+       u32 hi_core_clock_setting;                      /* 0x24 */
+       u32 hi_cpu_clock_setting;                       /* 0x28 */
+       u32 hi_system_sleep_setting;                    /* 0x2c */
+       u32 hi_xtal_control_setting;                    /* 0x30 */
+       u32 hi_pll_ctrl_setting_24ghz;                  /* 0x34 */
+       u32 hi_pll_ctrl_setting_5ghz;                   /* 0x38 */
+       u32 hi_ref_voltage_trim_setting;                /* 0x3c */
+       u32 hi_clock_info;                              /* 0x40 */
+
+       /* Host uses BE CPU or not */
+       u32 hi_be;                                      /* 0x44 */
+
+       u32 hi_stack;   /* normal stack */                      /* 0x48 */
+       u32 hi_err_stack; /* error stack */             /* 0x4c */
+       u32 hi_desired_cpu_speed_hz;                    /* 0x50 */
+
+       /* Pointer to Board Data  */
+       u32 hi_board_data;                              /* 0x54 */
+
+       /*
+        * Indication of Board Data state:
+        *    0: board data is not yet initialized.
+        *    1: board data is initialized; unknown size
+        *   >1: number of bytes of initialized board data
+        */
+       u32 hi_board_data_initialized;                  /* 0x58 */
+
+       u32 hi_dset_ram_index_table;                    /* 0x5c */
+
+       u32 hi_desired_baud_rate;                       /* 0x60 */
+       u32 hi_dbglog_config;                           /* 0x64 */
+       u32 hi_end_ram_reserve_sz;                      /* 0x68 */
+       u32 hi_mbox_io_block_sz;                        /* 0x6c */
+
+       u32 hi_num_bpatch_streams;                      /* 0x70 -- unused */
+       u32 hi_mbox_isr_yield_limit;                    /* 0x74 */
+
+       u32 hi_refclk_hz;                               /* 0x78 */
+       u32 hi_ext_clk_detected;                        /* 0x7c */
+       u32 hi_dbg_uart_txpin;                          /* 0x80 */
+       u32 hi_dbg_uart_rxpin;                          /* 0x84 */
+       u32 hi_hci_uart_baud;                           /* 0x88 */
+       u32 hi_hci_uart_pin_assignments;                /* 0x8C */
+
+       u32 hi_hci_uart_baud_scale_val;                 /* 0x90 */
+       u32 hi_hci_uart_baud_step_val;                  /* 0x94 */
+
+       u32 hi_allocram_start;                          /* 0x98 */
+       u32 hi_allocram_sz;                             /* 0x9c */
+       u32 hi_hci_bridge_flags;                        /* 0xa0 */
+       u32 hi_hci_uart_support_pins;                   /* 0xa4 */
+
+       u32 hi_hci_uart_pwr_mgmt_params;                /* 0xa8 */
+
+       /*
+        * 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high
+        *        [31:16]: wakeup timeout in ms
+        */
+       /* Pointer to extended board Data  */
+       u32 hi_board_ext_data;                          /* 0xac */
+       u32 hi_board_ext_data_config;                   /* 0xb0 */
+       /*
+        * Bit [0]  :   valid
+        * Bit[31:16:   size
+        */
+       /*
+        * hi_reset_flag is used to do some stuff when target reset.
+        * such as restore app_start after warm reset or
+        * preserve host Interest area, or preserve ROM data, literals etc.
+        */
+       u32  hi_reset_flag;                             /* 0xb4 */
+       /* indicate hi_reset_flag is valid */
+       u32  hi_reset_flag_valid;                       /* 0xb8 */
+       u32 hi_hci_uart_pwr_mgmt_params_ext;            /* 0xbc */
+       /* 0xbc - [31:0]: idle timeout in ms */
+       /* ACS flags */
+       u32 hi_acs_flags;                               /* 0xc0 */
+       u32 hi_console_flags;                           /* 0xc4 */
+       u32 hi_nvram_state;                             /* 0xc8 */
+       u32 hi_option_flag2;                            /* 0xcc */
+
+       /* If non-zero, override values sent to Host in WMI_READY event. */
+       u32 hi_sw_version_override;                     /* 0xd0 */
+       u32 hi_abi_version_override;                    /* 0xd4 */
+
+       /*
+        * Percentage of high priority RX traffic to total expected RX traffic
+        * applicable only to ar6004
+        */
+       u32 hi_hp_rx_traffic_ratio;                     /* 0xd8 */
+
+       /* test applications flags */
+       u32 hi_test_apps_related;                       /* 0xdc */
+       /* location of test script */
+       u32 hi_ota_testscript;                          /* 0xe0 */
+       /* location of CAL data */
+       u32 hi_cal_data;                                /* 0xe4 */
+
+       /* Number of packet log buffers */
+       u32 hi_pktlog_num_buffers;                      /* 0xe8 */
+
+       /* wow extension configuration */
+       u32 hi_wow_ext_config;                          /* 0xec */
+       u32 hi_pwr_save_flags;                          /* 0xf0 */
+
+       /* Spatial Multiplexing Power Save (SMPS) options */
+       u32 hi_smps_options;                            /* 0xf4 */
+
+       /* Interconnect-specific state */
+       u32 hi_interconnect_state;                      /* 0xf8 */
+
+       /* Coex configuration flags */
+       u32 hi_coex_config;                             /* 0xfc */
+
+       /* Early allocation support */
+       u32 hi_early_alloc;                             /* 0x100 */
+       /* FW swap field */
+       /*
+        * Bits of this 32bit word will be used to pass specific swap
+        * instruction to FW
+        */
+       /*
+        * Bit 0 -- AP Nart descriptor no swap. When this bit is set
+        * FW will not swap TX descriptor. Meaning packets are formed
+        * on the target processor.
+        */
+       /* Bit 1 - unused */
+       u32 hi_fw_swap;                                 /* 0x104 */
+} __packed;
+
+#define HI_ITEM(item)  offsetof(struct host_interest, item)
+
+/* Bits defined in hi_option_flag */
+
+/* Enable timer workaround */
+#define HI_OPTION_TIMER_WAR         0x01
+/* Limit BMI command credits */
+#define HI_OPTION_BMI_CRED_LIMIT    0x02
+/* Relay Dot11 hdr to/from host */
+#define HI_OPTION_RELAY_DOT11_HDR   0x04
+/* MAC addr method 0-locally administred 1-globally unique addrs */
+#define HI_OPTION_MAC_ADDR_METHOD   0x08
+/* Firmware Bridging */
+#define HI_OPTION_FW_BRIDGE         0x10
+/* Enable CPU profiling */
+#define HI_OPTION_ENABLE_PROFILE    0x20
+/* Disable debug logging */
+#define HI_OPTION_DISABLE_DBGLOG    0x40
+/* Skip Era Tracking */
+#define HI_OPTION_SKIP_ERA_TRACKING 0x80
+/* Disable PAPRD (debug) */
+#define HI_OPTION_PAPRD_DISABLE     0x100
+#define HI_OPTION_NUM_DEV_LSB       0x200
+#define HI_OPTION_NUM_DEV_MSB       0x800
+#define HI_OPTION_DEV_MODE_LSB      0x1000
+#define HI_OPTION_DEV_MODE_MSB      0x8000000
+/* Disable LowFreq Timer Stabilization */
+#define HI_OPTION_NO_LFT_STBL       0x10000000
+/* Skip regulatory scan */
+#define HI_OPTION_SKIP_REG_SCAN     0x20000000
+/*
+ * Do regulatory scan during init before
+ * sending WMI ready event to host
+ */
+#define HI_OPTION_INIT_REG_SCAN     0x40000000
+
+/* REV6: Do not adjust memory map */
+#define HI_OPTION_SKIP_MEMMAP       0x80000000
+
+#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3
+
+/* 2 bits of hi_option_flag are used to represent 3 modes */
+#define HI_OPTION_FW_MODE_IBSS    0x0 /* IBSS Mode */
+#define HI_OPTION_FW_MODE_BSS_STA 0x1 /* STA Mode */
+#define HI_OPTION_FW_MODE_AP      0x2 /* AP Mode */
+#define HI_OPTION_FW_MODE_BT30AMP 0x3 /* BT30 AMP Mode */
+
+/* 2 bits of hi_option flag are usedto represent 4 submodes */
+#define HI_OPTION_FW_SUBMODE_NONE    0x0  /* Normal mode */
+#define HI_OPTION_FW_SUBMODE_P2PDEV  0x1  /* p2p device mode */
+#define HI_OPTION_FW_SUBMODE_P2PCLIENT 0x2 /* p2p client mode */
+#define HI_OPTION_FW_SUBMODE_P2PGO   0x3 /* p2p go mode */
+
+/* Num dev Mask */
+#define HI_OPTION_NUM_DEV_MASK    0x7
+#define HI_OPTION_NUM_DEV_SHIFT   0x9
+
+/* firmware bridging */
+#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
+
+/*
+Fw Mode/SubMode Mask
+|-----------------------------------------------------------------------------|
+|  SUB   |   SUB   |   SUB   |  SUB    |         |         |         |        |
+|MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]|
+|  (2)   |   (2)   |   (2)   |   (2)   |   (2)   |   (2)   |   (2)   |   (2)  |
+|-----------------------------------------------------------------------------|
+*/
+#define HI_OPTION_FW_MODE_BITS         0x2
+#define HI_OPTION_FW_MODE_MASK         0x3
+#define HI_OPTION_FW_MODE_SHIFT        0xC
+#define HI_OPTION_ALL_FW_MODE_MASK     0xFF
+
+#define HI_OPTION_FW_SUBMODE_BITS      0x2
+#define HI_OPTION_FW_SUBMODE_MASK      0x3
+#define HI_OPTION_FW_SUBMODE_SHIFT     0x14
+#define HI_OPTION_ALL_FW_SUBMODE_MASK  0xFF00
+#define HI_OPTION_ALL_FW_SUBMODE_SHIFT 0x8
+
+
+/* hi_option_flag2 options */
+#define HI_OPTION_OFFLOAD_AMSDU     0x01
+#define HI_OPTION_DFS_SUPPORT       0x02 /* Enable DFS support */
+#define HI_OPTION_ENABLE_RFKILL     0x04 /* RFKill Enable Feature*/
+#define HI_OPTION_RADIO_RETENTION_DISABLE 0x08 /* Disable radio retention */
+#define HI_OPTION_EARLY_CFG_DONE    0x10 /* Early configuration is complete */
+
+#define HI_OPTION_RF_KILL_SHIFT     0x2
+#define HI_OPTION_RF_KILL_MASK      0x1
+
+/* hi_reset_flag */
+/* preserve App Start address */
+#define HI_RESET_FLAG_PRESERVE_APP_START         0x01
+/* preserve host interest */
+#define HI_RESET_FLAG_PRESERVE_HOST_INTEREST     0x02
+/* preserve ROM data */
+#define HI_RESET_FLAG_PRESERVE_ROMDATA           0x04
+#define HI_RESET_FLAG_PRESERVE_NVRAM_STATE       0x08
+#define HI_RESET_FLAG_PRESERVE_BOOT_INFO         0x10
+#define HI_RESET_FLAG_WARM_RESET       0x20
+
+/* define hi_fw_swap bits */
+#define HI_DESC_IN_FW_BIT      0x01
+
+/* indicate the reset flag is valid */
+#define HI_RESET_FLAG_IS_VALID  0x12345678
+
+/* ACS is enabled */
+#define HI_ACS_FLAGS_ENABLED        (1 << 0)
+/* Use physical WWAN device */
+#define HI_ACS_FLAGS_USE_WWAN       (1 << 1)
+/* Use test VAP */
+#define HI_ACS_FLAGS_TEST_VAP       (1 << 2)
+
+/*
+ * CONSOLE FLAGS
+ *
+ * Bit Range  Meaning
+ * ---------  --------------------------------
+ *   2..0     UART ID (0 = Default)
+ *    3       Baud Select (0 = 9600, 1 = 115200)
+ *   30..4    Reserved
+ *    31      Enable Console
+ *
+ */
+
+#define HI_CONSOLE_FLAGS_ENABLE       (1 << 31)
+#define HI_CONSOLE_FLAGS_UART_MASK    (0x7)
+#define HI_CONSOLE_FLAGS_UART_SHIFT   0
+#define HI_CONSOLE_FLAGS_BAUD_SELECT  (1 << 3)
+
+/* SM power save options */
+#define HI_SMPS_ALLOW_MASK            (0x00000001)
+#define HI_SMPS_MODE_MASK             (0x00000002)
+#define HI_SMPS_MODE_STATIC           (0x00000000)
+#define HI_SMPS_MODE_DYNAMIC          (0x00000002)
+#define HI_SMPS_DISABLE_AUTO_MODE     (0x00000004)
+#define HI_SMPS_DATA_THRESH_MASK      (0x000007f8)
+#define HI_SMPS_DATA_THRESH_SHIFT     (3)
+#define HI_SMPS_RSSI_THRESH_MASK      (0x0007f800)
+#define HI_SMPS_RSSI_THRESH_SHIFT     (11)
+#define HI_SMPS_LOWPWR_CM_MASK        (0x00380000)
+#define HI_SMPS_LOWPWR_CM_SHIFT       (15)
+#define HI_SMPS_HIPWR_CM_MASK         (0x03c00000)
+#define HI_SMPS_HIPWR_CM_SHIFT        (19)
+
+/*
+ * WOW Extension configuration
+ *
+ * Bit Range  Meaning
+ * ---------  --------------------------------
+ *   8..0     Size of each WOW pattern (max 511)
+ *   15..9    Number of patterns per list (max 127)
+ *   17..16   Number of lists (max 4)
+ *   30..18   Reserved
+ *   31       Enabled
+ *
+ *  set values (except enable) to zeros for default settings
+ */
+
+#define HI_WOW_EXT_ENABLED_MASK        (1 << 31)
+#define HI_WOW_EXT_NUM_LIST_SHIFT      16
+#define HI_WOW_EXT_NUM_LIST_MASK       (0x3 << HI_WOW_EXT_NUM_LIST_SHIFT)
+#define HI_WOW_EXT_NUM_PATTERNS_SHIFT  9
+#define HI_WOW_EXT_NUM_PATTERNS_MASK   (0x7F << HI_WOW_EXT_NUM_PATTERNS_SHIFT)
+#define HI_WOW_EXT_PATTERN_SIZE_SHIFT  0
+#define HI_WOW_EXT_PATTERN_SIZE_MASK   (0x1FF << HI_WOW_EXT_PATTERN_SIZE_SHIFT)
+
+#define HI_WOW_EXT_MAKE_CONFIG(num_lists, count, size) \
+       ((((num_lists) << HI_WOW_EXT_NUM_LIST_SHIFT) & \
+               HI_WOW_EXT_NUM_LIST_MASK) | \
+       (((count) << HI_WOW_EXT_NUM_PATTERNS_SHIFT) & \
+               HI_WOW_EXT_NUM_PATTERNS_MASK) | \
+       (((size) << HI_WOW_EXT_PATTERN_SIZE_SHIFT) & \
+               HI_WOW_EXT_PATTERN_SIZE_MASK))
+
+#define HI_WOW_EXT_GET_NUM_LISTS(config) \
+       (((config) & HI_WOW_EXT_NUM_LIST_MASK) >> HI_WOW_EXT_NUM_LIST_SHIFT)
+#define HI_WOW_EXT_GET_NUM_PATTERNS(config) \
+       (((config) & HI_WOW_EXT_NUM_PATTERNS_MASK) >> \
+               HI_WOW_EXT_NUM_PATTERNS_SHIFT)
+#define HI_WOW_EXT_GET_PATTERN_SIZE(config) \
+       (((config) & HI_WOW_EXT_PATTERN_SIZE_MASK) >> \
+               HI_WOW_EXT_PATTERN_SIZE_SHIFT)
+
+/*
+ * Early allocation configuration
+ * Support RAM bank configuration before BMI done and this eases the memory
+ * allocation at very early stage
+ * Bit Range  Meaning
+ * ---------  ----------------------------------
+ * [0:3]      number of bank assigned to be IRAM
+ * [4:15]     reserved
+ * [16:31]    magic number
+ *
+ * Note:
+ * 1. target firmware would check magic number and if it's a match, firmware
+ *    would consider the bits[0:15] are valid and base on that to calculate
+ *    the end of DRAM. Early allocation would be located at that area and
+ *    may be reclaimed when necesary
+ * 2. if no magic number is found, early allocation would happen at "_end"
+ *    symbol of ROM which is located before the app-data and might NOT be
+ *    re-claimable. If this is adopted, link script should keep this in
+ *    mind to avoid data corruption.
+ */
+#define HI_EARLY_ALLOC_MAGIC           0x6d8a
+#define HI_EARLY_ALLOC_MAGIC_MASK      0xffff0000
+#define HI_EARLY_ALLOC_MAGIC_SHIFT     16
+#define HI_EARLY_ALLOC_IRAM_BANKS_MASK 0x0000000f
+#define HI_EARLY_ALLOC_IRAM_BANKS_SHIFT        0
+
+#define HI_EARLY_ALLOC_VALID() \
+       ((((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_MAGIC_MASK) >> \
+       HI_EARLY_ALLOC_MAGIC_SHIFT) == (HI_EARLY_ALLOC_MAGIC))
+#define HI_EARLY_ALLOC_GET_IRAM_BANKS() \
+       (((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_IRAM_BANKS_MASK) \
+       >> HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
+
+/*power save flag bit definitions*/
+#define HI_PWR_SAVE_LPL_ENABLED   0x1
+/*b1-b3 reserved*/
+/*b4-b5 : dev0 LPL type : 0 - none
+                         1- Reduce Pwr Search
+                         2- Reduce Pwr Listen*/
+/*b6-b7 : dev1 LPL type and so on for Max 8 devices*/
+#define HI_PWR_SAVE_LPL_DEV0_LSB   4
+#define HI_PWR_SAVE_LPL_DEV_MASK   0x3
+/*power save related utility macros*/
+#define HI_LPL_ENABLED() \
+       ((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED))
+#define HI_DEV_LPL_TYPE_GET(_devix) \
+       (HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \
+        (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2)))
+
+#define HOST_INTEREST_SMPS_IS_ALLOWED() \
+       ((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK))
+
+/* Reserve 1024 bytes for extended board data */
+#define QCA988X_BOARD_DATA_SZ     7168
+#define QCA988X_BOARD_EXT_DATA_SZ 0
+
+#endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/trace.c b/drivers/net/wireless/ath/ath10k/trace.c
new file mode 100644 (file)
index 0000000..4a31e2c
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
new file mode 100644 (file)
index 0000000..85e806b
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+
+#include <linux/tracepoint.h>
+
+#define _TRACE_H_
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_ATH10K_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_ATH10K_TRACING || __CHECKER__ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath10k
+
+#define ATH10K_MSG_MAX 200
+
+DECLARE_EVENT_CLASS(ath10k_log_event,
+       TP_PROTO(struct va_format *vaf),
+       TP_ARGS(vaf),
+       TP_STRUCT__entry(
+               __dynamic_array(char, msg, ATH10K_MSG_MAX)
+       ),
+       TP_fast_assign(
+               WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+                                      ATH10K_MSG_MAX,
+                                      vaf->fmt,
+                                      *vaf->va) >= ATH10K_MSG_MAX);
+       ),
+       TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_err,
+            TP_PROTO(struct va_format *vaf),
+            TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_warn,
+            TP_PROTO(struct va_format *vaf),
+            TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_info,
+            TP_PROTO(struct va_format *vaf),
+            TP_ARGS(vaf)
+);
+
+TRACE_EVENT(ath10k_log_dbg,
+       TP_PROTO(unsigned int level, struct va_format *vaf),
+       TP_ARGS(level, vaf),
+       TP_STRUCT__entry(
+               __field(unsigned int, level)
+               __dynamic_array(char, msg, ATH10K_MSG_MAX)
+       ),
+       TP_fast_assign(
+               __entry->level = level;
+               WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+                                      ATH10K_MSG_MAX,
+                                      vaf->fmt,
+                                      *vaf->va) >= ATH10K_MSG_MAX);
+       ),
+       TP_printk("%s", __get_str(msg))
+);
+
+TRACE_EVENT(ath10k_log_dbg_dump,
+       TP_PROTO(const char *msg, const char *prefix,
+                const void *buf, size_t buf_len),
+
+       TP_ARGS(msg, prefix, buf, buf_len),
+
+       TP_STRUCT__entry(
+               __string(msg, msg)
+               __string(prefix, prefix)
+               __field(size_t, buf_len)
+               __dynamic_array(u8, buf, buf_len)
+       ),
+
+       TP_fast_assign(
+               __assign_str(msg, msg);
+               __assign_str(prefix, prefix);
+               __entry->buf_len = buf_len;
+               memcpy(__get_dynamic_array(buf), buf, buf_len);
+       ),
+
+       TP_printk(
+               "%s/%s\n", __get_str(prefix), __get_str(msg)
+       )
+);
+
+TRACE_EVENT(ath10k_wmi_cmd,
+       TP_PROTO(int id, void *buf, size_t buf_len),
+
+       TP_ARGS(id, buf, buf_len),
+
+       TP_STRUCT__entry(
+               __field(unsigned int, id)
+               __field(size_t, buf_len)
+               __dynamic_array(u8, buf, buf_len)
+       ),
+
+       TP_fast_assign(
+               __entry->id = id;
+               __entry->buf_len = buf_len;
+               memcpy(__get_dynamic_array(buf), buf, buf_len);
+       ),
+
+       TP_printk(
+               "id %d len %zu",
+               __entry->id,
+               __entry->buf_len
+       )
+);
+
+TRACE_EVENT(ath10k_wmi_event,
+       TP_PROTO(int id, void *buf, size_t buf_len),
+
+       TP_ARGS(id, buf, buf_len),
+
+       TP_STRUCT__entry(
+               __field(unsigned int, id)
+               __field(size_t, buf_len)
+               __dynamic_array(u8, buf, buf_len)
+       ),
+
+       TP_fast_assign(
+               __entry->id = id;
+               __entry->buf_len = buf_len;
+               memcpy(__get_dynamic_array(buf), buf, buf_len);
+       ),
+
+       TP_printk(
+               "id %d len %zu",
+               __entry->id,
+               __entry->buf_len
+       )
+);
+
+#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
+
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
new file mode 100644 (file)
index 0000000..68b6fae
--- /dev/null
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "txrx.h"
+#include "htt.h"
+#include "mac.h"
+#include "debug.h"
+
+static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
+{
+       if (!ATH10K_SKB_CB(skb)->htt.is_offchan)
+               return;
+
+       /* If the original wait_for_completion() timed out before
+        * {data,mgmt}_tx_completed() was called then we could complete
+        * offchan_tx_completed for a different skb. Prevent this by using
+        * offchan_tx_skb. */
+       spin_lock_bh(&ar->data_lock);
+       if (ar->offchan_tx_skb != skb) {
+               ath10k_warn("completed old offchannel frame\n");
+               goto out;
+       }
+
+       complete(&ar->offchan_tx_completed);
+       ar->offchan_tx_skb = NULL; /* just for sanity */
+
+       ath10k_dbg(ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
+out:
+       spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
+{
+       struct device *dev = htt->ar->dev;
+       struct ieee80211_tx_info *info;
+       struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag;
+       struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu;
+       int ret;
+
+       if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0)
+               return;
+
+       ATH10K_SKB_CB(txdesc)->htt.refcount--;
+
+       if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
+               return;
+
+       if (txfrag) {
+               ret = ath10k_skb_unmap(dev, txfrag);
+               if (ret)
+                       ath10k_warn("txfrag unmap failed (%d)\n", ret);
+
+               dev_kfree_skb_any(txfrag);
+       }
+
+       ret = ath10k_skb_unmap(dev, msdu);
+       if (ret)
+               ath10k_warn("data skb unmap failed (%d)\n", ret);
+
+       ath10k_report_offchan_tx(htt->ar, msdu);
+
+       info = IEEE80211_SKB_CB(msdu);
+       memset(&info->status, 0, sizeof(info->status));
+
+       if (ATH10K_SKB_CB(txdesc)->htt.discard) {
+               ieee80211_free_txskb(htt->ar->hw, msdu);
+               goto exit;
+       }
+
+       if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+               info->flags |= IEEE80211_TX_STAT_ACK;
+
+       if (ATH10K_SKB_CB(txdesc)->htt.no_ack)
+               info->flags &= ~IEEE80211_TX_STAT_ACK;
+
+       ieee80211_tx_status(htt->ar->hw, msdu);
+       /* we do not own the msdu anymore */
+
+exit:
+       spin_lock_bh(&htt->tx_lock);
+       htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL;
+       ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id);
+       __ath10k_htt_tx_dec_pending(htt);
+       if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx))
+               wake_up(&htt->empty_tx_wq);
+       spin_unlock_bh(&htt->tx_lock);
+
+       dev_kfree_skb_any(txdesc);
+}
+
+void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
+                             const struct htt_tx_done *tx_done)
+{
+       struct sk_buff *txdesc;
+
+       ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
+                  tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
+
+       if (tx_done->msdu_id >= htt->max_num_pending_tx) {
+               ath10k_warn("warning: msdu_id %d too big, ignoring\n",
+                           tx_done->msdu_id);
+               return;
+       }
+
+       txdesc = htt->pending_tx[tx_done->msdu_id];
+
+       ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard;
+       ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack;
+
+       ath10k_txrx_tx_unref(htt, txdesc);
+}
+
+static const u8 rx_legacy_rate_idx[] = {
+       3,      /* 0x00  - 11Mbps  */
+       2,      /* 0x01  - 5.5Mbps */
+       1,      /* 0x02  - 2Mbps   */
+       0,      /* 0x03  - 1Mbps   */
+       3,      /* 0x04  - 11Mbps  */
+       2,      /* 0x05  - 5.5Mbps */
+       1,      /* 0x06  - 2Mbps   */
+       0,      /* 0x07  - 1Mbps   */
+       10,     /* 0x08  - 48Mbps  */
+       8,      /* 0x09  - 24Mbps  */
+       6,      /* 0x0A  - 12Mbps  */
+       4,      /* 0x0B  - 6Mbps   */
+       11,     /* 0x0C  - 54Mbps  */
+       9,      /* 0x0D  - 36Mbps  */
+       7,      /* 0x0E  - 18Mbps  */
+       5,      /* 0x0F  - 9Mbps   */
+};
+
+static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
+                            enum ieee80211_band band,
+                            struct ieee80211_rx_status *status)
+{
+       u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
+       u8 info0 = info->rate.info0;
+       u32 info1 = info->rate.info1;
+       u32 info2 = info->rate.info2;
+       u8 preamble = 0;
+
+       /* Check if valid fields */
+       if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
+               return;
+
+       preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
+
+       switch (preamble) {
+       case HTT_RX_LEGACY:
+               cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
+               rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
+               rate_idx = 0;
+
+               if (rate < 0x08 || rate > 0x0F)
+                       break;
+
+               switch (band) {
+               case IEEE80211_BAND_2GHZ:
+                       if (cck)
+                               rate &= ~BIT(3);
+                       rate_idx = rx_legacy_rate_idx[rate];
+                       break;
+               case IEEE80211_BAND_5GHZ:
+                       rate_idx = rx_legacy_rate_idx[rate];
+                       /* We are using same rate table registering
+                          HW - ath10k_rates[]. In case of 5GHz skip
+                          CCK rates, so -4 here */
+                       rate_idx -= 4;
+                       break;
+               default:
+                       break;
+               }
+
+               status->rate_idx = rate_idx;
+               break;
+       case HTT_RX_HT:
+       case HTT_RX_HT_WITH_TXBF:
+               /* HT-SIG - Table 20-11 in info1 and info2 */
+               mcs = info1 & 0x1F;
+               nss = mcs >> 3;
+               bw = (info1 >> 7) & 1;
+               sgi = (info2 >> 7) & 1;
+
+               status->rate_idx = mcs;
+               status->flag |= RX_FLAG_HT;
+               if (sgi)
+                       status->flag |= RX_FLAG_SHORT_GI;
+               if (bw)
+                       status->flag |= RX_FLAG_40MHZ;
+               break;
+       case HTT_RX_VHT:
+       case HTT_RX_VHT_WITH_TXBF:
+               /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
+                  TODO check this */
+               mcs = (info2 >> 4) & 0x0F;
+               nss = (info1 >> 10) & 0x07;
+               bw = info1 & 3;
+               sgi = info2 & 1;
+
+               status->rate_idx = mcs;
+               status->vht_nss = nss;
+
+               if (sgi)
+                       status->flag |= RX_FLAG_SHORT_GI;
+
+               switch (bw) {
+               /* 20MHZ */
+               case 0:
+                       break;
+               /* 40MHZ */
+               case 1:
+                       status->flag |= RX_FLAG_40MHZ;
+                       break;
+               /* 80MHZ */
+               case 2:
+                       status->flag |= RX_FLAG_80MHZ;
+               }
+
+               status->flag |= RX_FLAG_VHT;
+               break;
+       default:
+               break;
+       }
+}
+
+void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
+{
+       struct ieee80211_rx_status *status;
+       struct ieee80211_channel *ch;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data;
+
+       status = IEEE80211_SKB_RXCB(info->skb);
+       memset(status, 0, sizeof(*status));
+
+       if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
+               status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
+                               RX_FLAG_MMIC_STRIPPED;
+               hdr->frame_control = __cpu_to_le16(
+                               __le16_to_cpu(hdr->frame_control) &
+                               ~IEEE80211_FCTL_PROTECTED);
+       }
+
+       if (info->status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
+               status->flag |= RX_FLAG_MMIC_ERROR;
+
+       if (info->fcs_err)
+               status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+       status->signal = info->signal;
+
+       spin_lock_bh(&ar->data_lock);
+       ch = ar->scan_channel;
+       if (!ch)
+               ch = ar->rx_channel;
+       spin_unlock_bh(&ar->data_lock);
+
+       if (!ch) {
+               ath10k_warn("no channel configured; ignoring frame!\n");
+               dev_kfree_skb_any(info->skb);
+               return;
+       }
+
+       process_rx_rates(ar, info, ch->band, status);
+       status->band = ch->band;
+       status->freq = ch->center_freq;
+
+       ath10k_dbg(ATH10K_DBG_DATA,
+                  "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u\n",
+                  info->skb,
+                  info->skb->len,
+                  status->flag == 0 ? "legacy" : "",
+                  status->flag & RX_FLAG_HT ? "ht" : "",
+                  status->flag & RX_FLAG_VHT ? "vht" : "",
+                  status->flag & RX_FLAG_40MHZ ? "40" : "",
+                  status->flag & RX_FLAG_80MHZ ? "80" : "",
+                  status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
+                  status->rate_idx,
+                  status->vht_nss,
+                  status->freq,
+                  status->band);
+
+       ieee80211_rx(ar->hw, info->skb);
+}
+
+struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
+                                    const u8 *addr)
+{
+       struct ath10k_peer *peer;
+
+       lockdep_assert_held(&ar->data_lock);
+
+       list_for_each_entry(peer, &ar->peers, list) {
+               if (peer->vdev_id != vdev_id)
+                       continue;
+               if (memcmp(peer->addr, addr, ETH_ALEN))
+                       continue;
+
+               return peer;
+       }
+
+       return NULL;
+}
+
+static struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar,
+                                                 int peer_id)
+{
+       struct ath10k_peer *peer;
+
+       lockdep_assert_held(&ar->data_lock);
+
+       list_for_each_entry(peer, &ar->peers, list)
+               if (test_bit(peer_id, peer->peer_ids))
+                       return peer;
+
+       return NULL;
+}
+
+static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
+                                      const u8 *addr, bool expect_mapped)
+{
+       int ret;
+
+       ret = wait_event_timeout(ar->peer_mapping_wq, ({
+                       bool mapped;
+
+                       spin_lock_bh(&ar->data_lock);
+                       mapped = !!ath10k_peer_find(ar, vdev_id, addr);
+                       spin_unlock_bh(&ar->data_lock);
+
+                       mapped == expect_mapped;
+               }), 3*HZ);
+
+       if (ret <= 0)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
+{
+       return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
+}
+
+int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
+{
+       return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
+}
+
+void ath10k_peer_map_event(struct ath10k_htt *htt,
+                          struct htt_peer_map_event *ev)
+{
+       struct ath10k *ar = htt->ar;
+       struct ath10k_peer *peer;
+
+       spin_lock_bh(&ar->data_lock);
+       peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
+       if (!peer) {
+               peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
+               if (!peer)
+                       goto exit;
+
+               peer->vdev_id = ev->vdev_id;
+               memcpy(peer->addr, ev->addr, ETH_ALEN);
+               list_add(&peer->list, &ar->peers);
+               wake_up(&ar->peer_mapping_wq);
+       }
+
+       ath10k_dbg(ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
+                  ev->vdev_id, ev->addr, ev->peer_id);
+
+       set_bit(ev->peer_id, peer->peer_ids);
+exit:
+       spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_peer_unmap_event(struct ath10k_htt *htt,
+                            struct htt_peer_unmap_event *ev)
+{
+       struct ath10k *ar = htt->ar;
+       struct ath10k_peer *peer;
+
+       spin_lock_bh(&ar->data_lock);
+       peer = ath10k_peer_find_by_id(ar, ev->peer_id);
+       if (!peer) {
+               ath10k_warn("unknown peer id %d\n", ev->peer_id);
+               goto exit;
+       }
+
+       ath10k_dbg(ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
+                  peer->vdev_id, peer->addr, ev->peer_id);
+
+       clear_bit(ev->peer_id, peer->peer_ids);
+
+       if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
+               list_del(&peer->list);
+               kfree(peer);
+               wake_up(&ar->peer_mapping_wq);
+       }
+
+exit:
+       spin_unlock_bh(&ar->data_lock);
+}
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
new file mode 100644 (file)
index 0000000..e78632a
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _TXRX_H_
+#define _TXRX_H_
+
+#include "htt.h"
+
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc);
+void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
+                             const struct htt_tx_done *tx_done);
+void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
+
+struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
+                                    const u8 *addr);
+int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id,
+                                const u8 *addr);
+int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id,
+                                const u8 *addr);
+
+void ath10k_peer_map_event(struct ath10k_htt *htt,
+                          struct htt_peer_map_event *ev);
+void ath10k_peer_unmap_event(struct ath10k_htt *htt,
+                            struct htt_peer_unmap_event *ev);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
new file mode 100644 (file)
index 0000000..7d4b798
--- /dev/null
@@ -0,0 +1,2081 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+
+#include "core.h"
+#include "htc.h"
+#include "debug.h"
+#include "wmi.h"
+#include "mac.h"
+
+void ath10k_wmi_flush_tx(struct ath10k *ar)
+{
+       int ret;
+
+       ret = wait_event_timeout(ar->wmi.wq,
+                                atomic_read(&ar->wmi.pending_tx_count) == 0,
+                                5*HZ);
+       if (atomic_read(&ar->wmi.pending_tx_count) == 0)
+               return;
+
+       if (ret == 0)
+               ret = -ETIMEDOUT;
+
+       if (ret < 0)
+               ath10k_warn("wmi flush failed (%d)\n", ret);
+}
+
+int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
+{
+       int ret;
+       ret = wait_for_completion_timeout(&ar->wmi.service_ready,
+                                         WMI_SERVICE_READY_TIMEOUT_HZ);
+       return ret;
+}
+
+int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
+{
+       int ret;
+       ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
+                                         WMI_UNIFIED_READY_TIMEOUT_HZ);
+       return ret;
+}
+
+static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
+{
+       struct sk_buff *skb;
+       u32 round_len = roundup(len, 4);
+
+       skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len);
+       if (!skb)
+               return NULL;
+
+       skb_reserve(skb, WMI_SKB_HEADROOM);
+       if (!IS_ALIGNED((unsigned long)skb->data, 4))
+               ath10k_warn("Unaligned WMI skb\n");
+
+       skb_put(skb, round_len);
+       memset(skb->data, 0, round_len);
+
+       return skb;
+}
+
+static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+       dev_kfree_skb(skb);
+
+       if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0)
+               wake_up(&ar->wmi.wq);
+}
+
+/* WMI command API */
+static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
+                              enum wmi_cmd_id cmd_id)
+{
+       struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+       struct wmi_cmd_hdr *cmd_hdr;
+       int status;
+       u32 cmd = 0;
+
+       if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+               return -ENOMEM;
+
+       cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
+
+       cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+       cmd_hdr->cmd_id = __cpu_to_le32(cmd);
+
+       if (atomic_add_return(1, &ar->wmi.pending_tx_count) >
+           WMI_MAX_PENDING_TX_COUNT) {
+               /* avoid using up memory when FW hangs */
+               atomic_dec(&ar->wmi.pending_tx_count);
+               return -EBUSY;
+       }
+
+       memset(skb_cb, 0, sizeof(*skb_cb));
+
+       trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
+
+       status = ath10k_htc_send(ar->htc, ar->wmi.eid, skb);
+       if (status) {
+               dev_kfree_skb_any(skb);
+               atomic_dec(&ar->wmi.pending_tx_count);
+               return status;
+       }
+
+       return 0;
+}
+
+static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data;
+       enum wmi_scan_event_type event_type;
+       enum wmi_scan_completion_reason reason;
+       u32 freq;
+       u32 req_id;
+       u32 scan_id;
+       u32 vdev_id;
+
+       event_type = __le32_to_cpu(event->event_type);
+       reason     = __le32_to_cpu(event->reason);
+       freq       = __le32_to_cpu(event->channel_freq);
+       req_id     = __le32_to_cpu(event->scan_req_id);
+       scan_id    = __le32_to_cpu(event->scan_id);
+       vdev_id    = __le32_to_cpu(event->vdev_id);
+
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n");
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "scan event type %d reason %d freq %d req_id %d "
+                  "scan_id %d vdev_id %d\n",
+                  event_type, reason, freq, req_id, scan_id, vdev_id);
+
+       spin_lock_bh(&ar->data_lock);
+
+       switch (event_type) {
+       case WMI_SCAN_EVENT_STARTED:
+               ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n");
+               if (ar->scan.in_progress && ar->scan.is_roc)
+                       ieee80211_ready_on_channel(ar->hw);
+
+               complete(&ar->scan.started);
+               break;
+       case WMI_SCAN_EVENT_COMPLETED:
+               ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n");
+               switch (reason) {
+               case WMI_SCAN_REASON_COMPLETED:
+                       ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n");
+                       break;
+               case WMI_SCAN_REASON_CANCELLED:
+                       ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n");
+                       break;
+               case WMI_SCAN_REASON_PREEMPTED:
+                       ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n");
+                       break;
+               case WMI_SCAN_REASON_TIMEDOUT:
+                       ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n");
+                       break;
+               default:
+                       break;
+               }
+
+               ar->scan_channel = NULL;
+               if (!ar->scan.in_progress) {
+                       ath10k_warn("no scan requested, ignoring\n");
+                       break;
+               }
+
+               if (ar->scan.is_roc) {
+                       ath10k_offchan_tx_purge(ar);
+
+                       if (!ar->scan.aborting)
+                               ieee80211_remain_on_channel_expired(ar->hw);
+               } else {
+                       ieee80211_scan_completed(ar->hw, ar->scan.aborting);
+               }
+
+               del_timer(&ar->scan.timeout);
+               complete_all(&ar->scan.completed);
+               ar->scan.in_progress = false;
+               break;
+       case WMI_SCAN_EVENT_BSS_CHANNEL:
+               ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n");
+               ar->scan_channel = NULL;
+               break;
+       case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
+               ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n");
+               ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+               if (ar->scan.in_progress && ar->scan.is_roc &&
+                   ar->scan.roc_freq == freq) {
+                       complete(&ar->scan.on_channel);
+               }
+               break;
+       case WMI_SCAN_EVENT_DEQUEUED:
+               ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n");
+               break;
+       case WMI_SCAN_EVENT_PREEMPTED:
+               ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n");
+               break;
+       case WMI_SCAN_EVENT_START_FAILED:
+               ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n");
+               break;
+       default:
+               break;
+       }
+
+       spin_unlock_bh(&ar->data_lock);
+       return 0;
+}
+
+static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
+{
+       enum ieee80211_band band;
+
+       switch (phy_mode) {
+       case MODE_11A:
+       case MODE_11NA_HT20:
+       case MODE_11NA_HT40:
+       case MODE_11AC_VHT20:
+       case MODE_11AC_VHT40:
+       case MODE_11AC_VHT80:
+               band = IEEE80211_BAND_5GHZ;
+               break;
+       case MODE_11G:
+       case MODE_11B:
+       case MODE_11GONLY:
+       case MODE_11NG_HT20:
+       case MODE_11NG_HT40:
+       case MODE_11AC_VHT20_2G:
+       case MODE_11AC_VHT40_2G:
+       case MODE_11AC_VHT80_2G:
+       default:
+               band = IEEE80211_BAND_2GHZ;
+       }
+
+       return band;
+}
+
+static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
+{
+       u8 rate_idx = 0;
+
+       /* rate in Kbps */
+       switch (rate) {
+       case 1000:
+               rate_idx = 0;
+               break;
+       case 2000:
+               rate_idx = 1;
+               break;
+       case 5500:
+               rate_idx = 2;
+               break;
+       case 11000:
+               rate_idx = 3;
+               break;
+       case 6000:
+               rate_idx = 4;
+               break;
+       case 9000:
+               rate_idx = 5;
+               break;
+       case 12000:
+               rate_idx = 6;
+               break;
+       case 18000:
+               rate_idx = 7;
+               break;
+       case 24000:
+               rate_idx = 8;
+               break;
+       case 36000:
+               rate_idx = 9;
+               break;
+       case 48000:
+               rate_idx = 10;
+               break;
+       case 54000:
+               rate_idx = 11;
+               break;
+       default:
+               break;
+       }
+
+       if (band == IEEE80211_BAND_5GHZ) {
+               if (rate_idx > 3)
+                       /* Omit CCK rates */
+                       rate_idx -= 4;
+               else
+                       rate_idx = 0;
+       }
+
+       return rate_idx;
+}
+
+static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data;
+       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       struct ieee80211_hdr *hdr;
+       u32 rx_status;
+       u32 channel;
+       u32 phy_mode;
+       u32 snr;
+       u32 rate;
+       u32 buf_len;
+       u16 fc;
+
+       channel   = __le32_to_cpu(event->hdr.channel);
+       buf_len   = __le32_to_cpu(event->hdr.buf_len);
+       rx_status = __le32_to_cpu(event->hdr.status);
+       snr       = __le32_to_cpu(event->hdr.snr);
+       phy_mode  = __le32_to_cpu(event->hdr.phy_mode);
+       rate      = __le32_to_cpu(event->hdr.rate);
+
+       memset(status, 0, sizeof(*status));
+
+       ath10k_dbg(ATH10K_DBG_MGMT,
+                  "event mgmt rx status %08x\n", rx_status);
+
+       if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
+               dev_kfree_skb(skb);
+               return 0;
+       }
+
+       if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) {
+               dev_kfree_skb(skb);
+               return 0;
+       }
+
+       if (rx_status & WMI_RX_STATUS_ERR_CRC)
+               status->flag |= RX_FLAG_FAILED_FCS_CRC;
+       if (rx_status & WMI_RX_STATUS_ERR_MIC)
+               status->flag |= RX_FLAG_MMIC_ERROR;
+
+       status->band = phy_mode_to_band(phy_mode);
+       status->freq = ieee80211_channel_to_frequency(channel, status->band);
+       status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
+       status->rate_idx = get_rate_idx(rate, status->band);
+
+       skb_pull(skb, sizeof(event->hdr));
+
+       hdr = (struct ieee80211_hdr *)skb->data;
+       fc = le16_to_cpu(hdr->frame_control);
+
+       if (fc & IEEE80211_FCTL_PROTECTED) {
+               status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
+                               RX_FLAG_MMIC_STRIPPED;
+               hdr->frame_control = __cpu_to_le16(fc &
+                                       ~IEEE80211_FCTL_PROTECTED);
+       }
+
+       ath10k_dbg(ATH10K_DBG_MGMT,
+                  "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
+                  skb, skb->len,
+                  fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
+
+       ath10k_dbg(ATH10K_DBG_MGMT,
+                  "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
+                  status->freq, status->band, status->signal,
+                  status->rate_idx);
+
+       /*
+        * packets from HTC come aligned to 4byte boundaries
+        * because they can originally come in along with a trailer
+        */
+       skb_trim(skb, buf_len);
+
+       ieee80211_rx(ar->hw, skb);
+       return 0;
+}
+
+static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_CHAN_INFO_EVENTID\n");
+}
+
+static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
+}
+
+static void ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_MESG_EVENTID\n");
+}
+
+static void ath10k_wmi_event_update_stats(struct ath10k *ar,
+                                         struct sk_buff *skb)
+{
+       struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data;
+
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
+
+       ath10k_debug_read_target_stats(ar, ev);
+}
+
+static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
+                                            struct sk_buff *skb)
+{
+       struct wmi_vdev_start_response_event *ev;
+
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
+
+       ev = (struct wmi_vdev_start_response_event *)skb->data;
+
+       if (WARN_ON(__le32_to_cpu(ev->status)))
+               return;
+
+       complete(&ar->vdev_setup_done);
+}
+
+static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
+                                         struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
+       complete(&ar->vdev_setup_done);
+}
+
+static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
+                                             struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_PEER_STA_KICKOUT_EVENTID\n");
+}
+
+/*
+ * FIXME
+ *
+ * We don't report to mac80211 sleep state of connected
+ * stations. Due to this mac80211 can't fill in TIM IE
+ * correctly.
+ *
+ * I know of no way of getting nullfunc frames that contain
+ * sleep transition from connected stations - these do not
+ * seem to be sent from the target to the host. There also
+ * doesn't seem to be a dedicated event for that. So the
+ * only way left to do this would be to read tim_bitmap
+ * during SWBA.
+ *
+ * We could probably try using tim_bitmap from SWBA to tell
+ * mac80211 which stations are asleep and which are not. The
+ * problem here is calling mac80211 functions so many times
+ * could take too long and make us miss the time to submit
+ * the beacon to the target.
+ *
+ * So as a workaround we try to extend the TIM IE if there
+ * is unicast buffered for stations with aid > 7 and fill it
+ * in ourselves.
+ */
+static void ath10k_wmi_update_tim(struct ath10k *ar,
+                                 struct ath10k_vif *arvif,
+                                 struct sk_buff *bcn,
+                                 struct wmi_bcn_info *bcn_info)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
+       struct ieee80211_tim_ie *tim;
+       u8 *ies, *ie;
+       u8 ie_len, pvm_len;
+
+       /* if next SWBA has no tim_changed the tim_bitmap is garbage.
+        * we must copy the bitmap upon change and reuse it later */
+       if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) {
+               int i;
+
+               BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
+                            sizeof(bcn_info->tim_info.tim_bitmap));
+
+               for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
+                       __le32 t = bcn_info->tim_info.tim_bitmap[i / 4];
+                       u32 v = __le32_to_cpu(t);
+                       arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
+               }
+
+               /* FW reports either length 0 or 16
+                * so we calculate this on our own */
+               arvif->u.ap.tim_len = 0;
+               for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
+                       if (arvif->u.ap.tim_bitmap[i])
+                               arvif->u.ap.tim_len = i;
+
+               arvif->u.ap.tim_len++;
+       }
+
+       ies = bcn->data;
+       ies += ieee80211_hdrlen(hdr->frame_control);
+       ies += 12; /* fixed parameters */
+
+       ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
+                                   (u8 *)skb_tail_pointer(bcn) - ies);
+       if (!ie) {
+               /* highly unlikely for mac80211 */
+               ath10k_warn("no tim ie found;\n");
+               return;
+       }
+
+       tim = (void *)ie + 2;
+       ie_len = ie[1];
+       pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
+
+       if (pvm_len < arvif->u.ap.tim_len) {
+               int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
+               int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
+               void *next_ie = ie + 2 + ie_len;
+
+               if (skb_put(bcn, expand_size)) {
+                       memmove(next_ie + expand_size, next_ie, move_size);
+
+                       ie[1] += expand_size;
+                       ie_len += expand_size;
+                       pvm_len += expand_size;
+               } else {
+                       ath10k_warn("tim expansion failed\n");
+               }
+       }
+
+       if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
+               ath10k_warn("tim pvm length is too great (%d)\n", pvm_len);
+               return;
+       }
+
+       tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);
+       memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
+
+       ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
+                  tim->dtim_count, tim->dtim_period,
+                  tim->bitmap_ctrl, pvm_len);
+}
+
+static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
+                                  struct wmi_p2p_noa_info *noa)
+{
+       struct ieee80211_p2p_noa_attr *noa_attr;
+       u8  ctwindow_oppps = noa->ctwindow_oppps;
+       u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
+       bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
+       __le16 *noa_attr_len;
+       u16 attr_len;
+       u8 noa_descriptors = noa->num_descriptors;
+       int i;
+
+       /* P2P IE */
+       data[0] = WLAN_EID_VENDOR_SPECIFIC;
+       data[1] = len - 2;
+       data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+       data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+       data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+       data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+       /* NOA ATTR */
+       data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+       noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+       noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+       noa_attr->index = noa->index;
+       noa_attr->oppps_ctwindow = ctwindow;
+       if (oppps)
+               noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+       for (i = 0; i < noa_descriptors; i++) {
+               noa_attr->desc[i].count =
+                       __le32_to_cpu(noa->descriptors[i].type_count);
+               noa_attr->desc[i].duration = noa->descriptors[i].duration;
+               noa_attr->desc[i].interval = noa->descriptors[i].interval;
+               noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
+       }
+
+       attr_len = 2; /* index + oppps_ctwindow */
+       attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+       *noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa)
+{
+       u32 len = 0;
+       u8 noa_descriptors = noa->num_descriptors;
+       u8 opp_ps_info = noa->ctwindow_oppps;
+       bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
+
+
+       if (!noa_descriptors && !opps_enabled)
+               return len;
+
+       len += 1 + 1 + 4; /* EID + len + OUI */
+       len += 1 + 2; /* noa attr  + attr len */
+       len += 1 + 1; /* index + oppps_ctwindow */
+       len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+
+       return len;
+}
+
+static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
+                                 struct sk_buff *bcn,
+                                 struct wmi_bcn_info *bcn_info)
+{
+       struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info;
+       u8 *new_data, *old_data = arvif->u.ap.noa_data;
+       u32 new_len;
+
+       if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+               return;
+
+       ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
+       if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
+               new_len = ath10k_p2p_calc_noa_ie_len(noa);
+               if (!new_len)
+                       goto cleanup;
+
+               new_data = kmalloc(new_len, GFP_ATOMIC);
+               if (!new_data)
+                       goto cleanup;
+
+               ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
+
+               spin_lock_bh(&ar->data_lock);
+               arvif->u.ap.noa_data = new_data;
+               arvif->u.ap.noa_len = new_len;
+               spin_unlock_bh(&ar->data_lock);
+               kfree(old_data);
+       }
+
+       if (arvif->u.ap.noa_data)
+               if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
+                       memcpy(skb_put(bcn, arvif->u.ap.noa_len),
+                              arvif->u.ap.noa_data,
+                              arvif->u.ap.noa_len);
+       return;
+
+cleanup:
+       spin_lock_bh(&ar->data_lock);
+       arvif->u.ap.noa_data = NULL;
+       arvif->u.ap.noa_len = 0;
+       spin_unlock_bh(&ar->data_lock);
+       kfree(old_data);
+}
+
+
+static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct wmi_host_swba_event *ev;
+       u32 map;
+       int i = -1;
+       struct wmi_bcn_info *bcn_info;
+       struct ath10k_vif *arvif;
+       struct wmi_bcn_tx_arg arg;
+       struct sk_buff *bcn;
+       int vdev_id = 0;
+       int ret;
+
+       ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
+
+       ev = (struct wmi_host_swba_event *)skb->data;
+       map = __le32_to_cpu(ev->vdev_map);
+
+       ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n"
+                  "-vdev map 0x%x\n",
+                  ev->vdev_map);
+
+       for (; map; map >>= 1, vdev_id++) {
+               if (!(map & 0x1))
+                       continue;
+
+               i++;
+
+               if (i >= WMI_MAX_AP_VDEV) {
+                       ath10k_warn("swba has corrupted vdev map\n");
+                       break;
+               }
+
+               bcn_info = &ev->bcn_info[i];
+
+               ath10k_dbg(ATH10K_DBG_MGMT,
+                          "-bcn_info[%d]:\n"
+                          "--tim_len %d\n"
+                          "--tim_mcast %d\n"
+                          "--tim_changed %d\n"
+                          "--tim_num_ps_pending %d\n"
+                          "--tim_bitmap 0x%08x%08x%08x%08x\n",
+                          i,
+                          __le32_to_cpu(bcn_info->tim_info.tim_len),
+                          __le32_to_cpu(bcn_info->tim_info.tim_mcast),
+                          __le32_to_cpu(bcn_info->tim_info.tim_changed),
+                          __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending),
+                          __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]),
+                          __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]),
+                          __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]),
+                          __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0]));
+
+               arvif = ath10k_get_arvif(ar, vdev_id);
+               if (arvif == NULL) {
+                       ath10k_warn("no vif for vdev_id %d found\n", vdev_id);
+                       continue;
+               }
+
+               bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
+               if (!bcn) {
+                       ath10k_warn("could not get mac80211 beacon\n");
+                       continue;
+               }
+
+               ath10k_tx_h_seq_no(bcn);
+               ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
+               ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
+
+               arg.vdev_id = arvif->vdev_id;
+               arg.tx_rate = 0;
+               arg.tx_power = 0;
+               arg.bcn = bcn->data;
+               arg.bcn_len = bcn->len;
+
+               ret = ath10k_wmi_beacon_send(ar, &arg);
+               if (ret)
+                       ath10k_warn("could not send beacon (%d)\n", ret);
+
+               dev_kfree_skb_any(bcn);
+       }
+}
+
+static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
+                                              struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
+}
+
+static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_PHYERR_EVENTID\n");
+}
+
+static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
+}
+
+static void ath10k_wmi_event_profile_match(struct ath10k *ar,
+                                   struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
+}
+
+static void ath10k_wmi_event_debug_print(struct ath10k *ar,
+                                 struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_PRINT_EVENTID\n");
+}
+
+static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
+}
+
+static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar,
+                                              struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
+}
+
+static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
+                                            struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
+}
+
+static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
+                                            struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
+}
+
+static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar,
+                                             struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
+}
+
+static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar,
+                                            struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
+}
+
+static void ath10k_wmi_event_dcs_interference(struct ath10k *ar,
+                                             struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
+}
+
+static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar,
+                                            struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
+}
+
+static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar,
+                                          struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
+}
+
+static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
+                                        struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
+}
+
+static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar,
+                                           struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
+}
+
+static void ath10k_wmi_event_delba_complete(struct ath10k *ar,
+                                           struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
+}
+
+static void ath10k_wmi_event_addba_complete(struct ath10k *ar,
+                                           struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
+}
+
+static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
+                                               struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
+}
+
+static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
+                                             struct sk_buff *skb)
+{
+       struct wmi_service_ready_event *ev = (void *)skb->data;
+
+       if (skb->len < sizeof(*ev)) {
+               ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
+                           skb->len, sizeof(*ev));
+               return;
+       }
+
+       ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
+       ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
+       ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
+       ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
+       ar->fw_version_major =
+               (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
+       ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
+       ar->fw_version_release =
+               (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
+       ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
+       ar->phy_capability = __le32_to_cpu(ev->phy_capability);
+
+       ar->ath_common.regulatory.current_rd =
+               __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
+
+       ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
+                                     sizeof(ev->wmi_service_bitmap));
+
+       if (strlen(ar->hw->wiphy->fw_version) == 0) {
+               snprintf(ar->hw->wiphy->fw_version,
+                        sizeof(ar->hw->wiphy->fw_version),
+                        "%u.%u.%u.%u",
+                        ar->fw_version_major,
+                        ar->fw_version_minor,
+                        ar->fw_version_release,
+                        ar->fw_version_build);
+       }
+
+       /* FIXME: it probably should be better to support this */
+       if (__le32_to_cpu(ev->num_mem_reqs) > 0) {
+               ath10k_warn("target requested %d memory chunks; ignoring\n",
+                           __le32_to_cpu(ev->num_mem_reqs));
+       }
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u\n",
+                  __le32_to_cpu(ev->sw_version),
+                  __le32_to_cpu(ev->sw_version_1),
+                  __le32_to_cpu(ev->abi_version),
+                  __le32_to_cpu(ev->phy_capability),
+                  __le32_to_cpu(ev->ht_cap_info),
+                  __le32_to_cpu(ev->vht_cap_info),
+                  __le32_to_cpu(ev->vht_supp_mcs),
+                  __le32_to_cpu(ev->sys_cap_info),
+                  __le32_to_cpu(ev->num_mem_reqs));
+
+       complete(&ar->wmi.service_ready);
+}
+
+static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
+
+       if (WARN_ON(skb->len < sizeof(*ev)))
+               return -EINVAL;
+
+       memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
+                  __le32_to_cpu(ev->sw_version),
+                  __le32_to_cpu(ev->abi_version),
+                  ev->mac_addr.addr,
+                  __le32_to_cpu(ev->status));
+
+       complete(&ar->wmi.unified_ready);
+       return 0;
+}
+
+static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct wmi_cmd_hdr *cmd_hdr;
+       enum wmi_event_id id;
+       u16 len;
+
+       cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+       id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+       if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+               return;
+
+       len = skb->len;
+
+       trace_ath10k_wmi_event(id, skb->data, skb->len);
+
+       switch (id) {
+       case WMI_MGMT_RX_EVENTID:
+               ath10k_wmi_event_mgmt_rx(ar, skb);
+               /* mgmt_rx() owns the skb now! */
+               return;
+       case WMI_SCAN_EVENTID:
+               ath10k_wmi_event_scan(ar, skb);
+               break;
+       case WMI_CHAN_INFO_EVENTID:
+               ath10k_wmi_event_chan_info(ar, skb);
+               break;
+       case WMI_ECHO_EVENTID:
+               ath10k_wmi_event_echo(ar, skb);
+               break;
+       case WMI_DEBUG_MESG_EVENTID:
+               ath10k_wmi_event_debug_mesg(ar, skb);
+               break;
+       case WMI_UPDATE_STATS_EVENTID:
+               ath10k_wmi_event_update_stats(ar, skb);
+               break;
+       case WMI_VDEV_START_RESP_EVENTID:
+               ath10k_wmi_event_vdev_start_resp(ar, skb);
+               break;
+       case WMI_VDEV_STOPPED_EVENTID:
+               ath10k_wmi_event_vdev_stopped(ar, skb);
+               break;
+       case WMI_PEER_STA_KICKOUT_EVENTID:
+               ath10k_wmi_event_peer_sta_kickout(ar, skb);
+               break;
+       case WMI_HOST_SWBA_EVENTID:
+               ath10k_wmi_event_host_swba(ar, skb);
+               break;
+       case WMI_TBTTOFFSET_UPDATE_EVENTID:
+               ath10k_wmi_event_tbttoffset_update(ar, skb);
+               break;
+       case WMI_PHYERR_EVENTID:
+               ath10k_wmi_event_phyerr(ar, skb);
+               break;
+       case WMI_ROAM_EVENTID:
+               ath10k_wmi_event_roam(ar, skb);
+               break;
+       case WMI_PROFILE_MATCH:
+               ath10k_wmi_event_profile_match(ar, skb);
+               break;
+       case WMI_DEBUG_PRINT_EVENTID:
+               ath10k_wmi_event_debug_print(ar, skb);
+               break;
+       case WMI_PDEV_QVIT_EVENTID:
+               ath10k_wmi_event_pdev_qvit(ar, skb);
+               break;
+       case WMI_WLAN_PROFILE_DATA_EVENTID:
+               ath10k_wmi_event_wlan_profile_data(ar, skb);
+               break;
+       case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
+               ath10k_wmi_event_rtt_measurement_report(ar, skb);
+               break;
+       case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
+               ath10k_wmi_event_tsf_measurement_report(ar, skb);
+               break;
+       case WMI_RTT_ERROR_REPORT_EVENTID:
+               ath10k_wmi_event_rtt_error_report(ar, skb);
+               break;
+       case WMI_WOW_WAKEUP_HOST_EVENTID:
+               ath10k_wmi_event_wow_wakeup_host(ar, skb);
+               break;
+       case WMI_DCS_INTERFERENCE_EVENTID:
+               ath10k_wmi_event_dcs_interference(ar, skb);
+               break;
+       case WMI_PDEV_TPC_CONFIG_EVENTID:
+               ath10k_wmi_event_pdev_tpc_config(ar, skb);
+               break;
+       case WMI_PDEV_FTM_INTG_EVENTID:
+               ath10k_wmi_event_pdev_ftm_intg(ar, skb);
+               break;
+       case WMI_GTK_OFFLOAD_STATUS_EVENTID:
+               ath10k_wmi_event_gtk_offload_status(ar, skb);
+               break;
+       case WMI_GTK_REKEY_FAIL_EVENTID:
+               ath10k_wmi_event_gtk_rekey_fail(ar, skb);
+               break;
+       case WMI_TX_DELBA_COMPLETE_EVENTID:
+               ath10k_wmi_event_delba_complete(ar, skb);
+               break;
+       case WMI_TX_ADDBA_COMPLETE_EVENTID:
+               ath10k_wmi_event_addba_complete(ar, skb);
+               break;
+       case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+               ath10k_wmi_event_vdev_install_key_complete(ar, skb);
+               break;
+       case WMI_SERVICE_READY_EVENTID:
+               ath10k_wmi_service_ready_event_rx(ar, skb);
+               break;
+       case WMI_READY_EVENTID:
+               ath10k_wmi_ready_event_rx(ar, skb);
+               break;
+       default:
+               ath10k_warn("Unknown eventid: %d\n", id);
+               break;
+       }
+
+       dev_kfree_skb(skb);
+}
+
+static void ath10k_wmi_event_work(struct work_struct *work)
+{
+       struct ath10k *ar = container_of(work, struct ath10k,
+                                        wmi.wmi_event_work);
+       struct sk_buff *skb;
+
+       for (;;) {
+               skb = skb_dequeue(&ar->wmi.wmi_event_list);
+               if (!skb)
+                       break;
+
+               ath10k_wmi_event_process(ar, skb);
+       }
+}
+
+static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+       enum wmi_event_id event_id;
+
+       event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+       /* some events require to be handled ASAP
+        * thus can't be defered to a worker thread */
+       switch (event_id) {
+       case WMI_HOST_SWBA_EVENTID:
+       case WMI_MGMT_RX_EVENTID:
+               ath10k_wmi_event_process(ar, skb);
+               return;
+       default:
+               break;
+       }
+
+       skb_queue_tail(&ar->wmi.wmi_event_list, skb);
+       queue_work(ar->workqueue, &ar->wmi.wmi_event_work);
+}
+
+/* WMI Initialization functions */
+int ath10k_wmi_attach(struct ath10k *ar)
+{
+       init_completion(&ar->wmi.service_ready);
+       init_completion(&ar->wmi.unified_ready);
+       init_waitqueue_head(&ar->wmi.wq);
+
+       skb_queue_head_init(&ar->wmi.wmi_event_list);
+       INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work);
+
+       return 0;
+}
+
+void ath10k_wmi_detach(struct ath10k *ar)
+{
+       /* HTC should've drained the packets already */
+       if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0))
+               ath10k_warn("there are still pending packets\n");
+
+       cancel_work_sync(&ar->wmi.wmi_event_work);
+       skb_queue_purge(&ar->wmi.wmi_event_list);
+}
+
+int ath10k_wmi_connect_htc_service(struct ath10k *ar)
+{
+       int status;
+       struct ath10k_htc_svc_conn_req conn_req;
+       struct ath10k_htc_svc_conn_resp conn_resp;
+
+       memset(&conn_req, 0, sizeof(conn_req));
+       memset(&conn_resp, 0, sizeof(conn_resp));
+
+       /* these fields are the same for all service endpoints */
+       conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
+       conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
+
+       /* connect to control service */
+       conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
+
+       status = ath10k_htc_connect_service(ar->htc, &conn_req, &conn_resp);
+       if (status) {
+               ath10k_warn("failed to connect to WMI CONTROL service status: %d\n",
+                           status);
+               return status;
+       }
+
+       ar->wmi.eid = conn_resp.eid;
+       return 0;
+}
+
+int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
+                                 u16 rd5g, u16 ctl2g, u16 ctl5g)
+{
+       struct wmi_pdev_set_regdomain_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
+       cmd->reg_domain = __cpu_to_le32(rd);
+       cmd->reg_domain_2G = __cpu_to_le32(rd2g);
+       cmd->reg_domain_5G = __cpu_to_le32(rd5g);
+       cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
+       cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
+                  rd, rd2g, rd5g, ctl2g, ctl5g);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
+}
+
+int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
+                               const struct wmi_channel_arg *arg)
+{
+       struct wmi_set_channel_cmd *cmd;
+       struct sk_buff *skb;
+
+       if (arg->passive)
+               return -EINVAL;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_set_channel_cmd *)skb->data;
+       cmd->chan.mhz               = __cpu_to_le32(arg->freq);
+       cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq);
+       cmd->chan.mode              = arg->mode;
+       cmd->chan.min_power         = arg->min_power;
+       cmd->chan.max_power         = arg->max_power;
+       cmd->chan.reg_power         = arg->max_reg_power;
+       cmd->chan.reg_classid       = arg->reg_class_id;
+       cmd->chan.antenna_max       = arg->max_antenna_gain;
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi set channel mode %d freq %d\n",
+                  arg->mode, arg->freq);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID);
+}
+
+int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
+{
+       struct wmi_pdev_suspend_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
+       cmd->suspend_opt = WMI_PDEV_SUSPEND;
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID);
+}
+
+int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
+{
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(0);
+       if (skb == NULL)
+               return -ENOMEM;
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID);
+}
+
+int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
+                             u32 value)
+{
+       struct wmi_pdev_set_param_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
+       cmd->param_id    = __cpu_to_le32(id);
+       cmd->param_value = __cpu_to_le32(value);
+
+       ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
+                  id, value);
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID);
+}
+
+int ath10k_wmi_cmd_init(struct ath10k *ar)
+{
+       struct wmi_init_cmd *cmd;
+       struct sk_buff *buf;
+       struct wmi_resource_config config = {};
+       u32 val;
+
+       config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
+       config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
+       config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
+
+       config.num_offload_reorder_bufs =
+               __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
+
+       config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
+       config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
+       config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
+       config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
+       config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
+       config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+       config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+       config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+       config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
+       config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
+
+       config.scan_max_pending_reqs =
+               __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
+
+       config.bmiss_offload_max_vdev =
+               __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
+
+       config.roam_offload_max_vdev =
+               __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
+
+       config.roam_offload_max_ap_profiles =
+               __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+       config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
+       config.num_mcast_table_elems =
+               __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
+
+       config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
+       config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
+       config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
+       config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
+       config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
+
+       val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+       config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+       config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
+
+       config.gtk_offload_max_vdev =
+               __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
+
+       config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
+       config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
+
+       buf = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!buf)
+               return -ENOMEM;
+
+       cmd = (struct wmi_init_cmd *)buf->data;
+       cmd->num_host_mem_chunks = 0;
+       memcpy(&cmd->resource_config, &config, sizeof(config));
+
+       ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n");
+       return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID);
+}
+
+static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg)
+{
+       int len;
+
+       len = sizeof(struct wmi_start_scan_cmd);
+
+       if (arg->ie_len) {
+               if (!arg->ie)
+                       return -EINVAL;
+               if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
+                       return -EINVAL;
+
+               len += sizeof(struct wmi_ie_data);
+               len += roundup(arg->ie_len, 4);
+       }
+
+       if (arg->n_channels) {
+               if (!arg->channels)
+                       return -EINVAL;
+               if (arg->n_channels > ARRAY_SIZE(arg->channels))
+                       return -EINVAL;
+
+               len += sizeof(struct wmi_chan_list);
+               len += sizeof(__le32) * arg->n_channels;
+       }
+
+       if (arg->n_ssids) {
+               if (!arg->ssids)
+                       return -EINVAL;
+               if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
+                       return -EINVAL;
+
+               len += sizeof(struct wmi_ssid_list);
+               len += sizeof(struct wmi_ssid) * arg->n_ssids;
+       }
+
+       if (arg->n_bssids) {
+               if (!arg->bssids)
+                       return -EINVAL;
+               if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
+                       return -EINVAL;
+
+               len += sizeof(struct wmi_bssid_list);
+               len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
+       }
+
+       return len;
+}
+
+int ath10k_wmi_start_scan(struct ath10k *ar,
+                         const struct wmi_start_scan_arg *arg)
+{
+       struct wmi_start_scan_cmd *cmd;
+       struct sk_buff *skb;
+       struct wmi_ie_data *ie;
+       struct wmi_chan_list *channels;
+       struct wmi_ssid_list *ssids;
+       struct wmi_bssid_list *bssids;
+       u32 scan_id;
+       u32 scan_req_id;
+       int off;
+       int len = 0;
+       int i;
+
+       len = ath10k_wmi_start_scan_calc_len(arg);
+       if (len < 0)
+               return len; /* len contains error code here */
+
+       skb = ath10k_wmi_alloc_skb(len);
+       if (!skb)
+               return -ENOMEM;
+
+       scan_id  = WMI_HOST_SCAN_REQ_ID_PREFIX;
+       scan_id |= arg->scan_id;
+
+       scan_req_id  = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+       scan_req_id |= arg->scan_req_id;
+
+       cmd = (struct wmi_start_scan_cmd *)skb->data;
+       cmd->scan_id            = __cpu_to_le32(scan_id);
+       cmd->scan_req_id        = __cpu_to_le32(scan_req_id);
+       cmd->vdev_id            = __cpu_to_le32(arg->vdev_id);
+       cmd->scan_priority      = __cpu_to_le32(arg->scan_priority);
+       cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
+       cmd->dwell_time_active  = __cpu_to_le32(arg->dwell_time_active);
+       cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
+       cmd->min_rest_time      = __cpu_to_le32(arg->min_rest_time);
+       cmd->max_rest_time      = __cpu_to_le32(arg->max_rest_time);
+       cmd->repeat_probe_time  = __cpu_to_le32(arg->repeat_probe_time);
+       cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
+       cmd->idle_time          = __cpu_to_le32(arg->idle_time);
+       cmd->max_scan_time      = __cpu_to_le32(arg->max_scan_time);
+       cmd->probe_delay        = __cpu_to_le32(arg->probe_delay);
+       cmd->scan_ctrl_flags    = __cpu_to_le32(arg->scan_ctrl_flags);
+
+       /* TLV list starts after fields included in the struct */
+       off = sizeof(*cmd);
+
+       if (arg->n_channels) {
+               channels = (void *)skb->data + off;
+               channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
+               channels->num_chan = __cpu_to_le32(arg->n_channels);
+
+               for (i = 0; i < arg->n_channels; i++)
+                       channels->channel_list[i] =
+                               __cpu_to_le32(arg->channels[i]);
+
+               off += sizeof(*channels);
+               off += sizeof(__le32) * arg->n_channels;
+       }
+
+       if (arg->n_ssids) {
+               ssids = (void *)skb->data + off;
+               ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
+               ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
+
+               for (i = 0; i < arg->n_ssids; i++) {
+                       ssids->ssids[i].ssid_len =
+                               __cpu_to_le32(arg->ssids[i].len);
+                       memcpy(&ssids->ssids[i].ssid,
+                              arg->ssids[i].ssid,
+                              arg->ssids[i].len);
+               }
+
+               off += sizeof(*ssids);
+               off += sizeof(struct wmi_ssid) * arg->n_ssids;
+       }
+
+       if (arg->n_bssids) {
+               bssids = (void *)skb->data + off;
+               bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
+               bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
+
+               for (i = 0; i < arg->n_bssids; i++)
+                       memcpy(&bssids->bssid_list[i],
+                              arg->bssids[i].bssid,
+                              ETH_ALEN);
+
+               off += sizeof(*bssids);
+               off += sizeof(struct wmi_mac_addr) * arg->n_bssids;
+       }
+
+       if (arg->ie_len) {
+               ie = (void *)skb->data + off;
+               ie->tag = __cpu_to_le32(WMI_IE_TAG);
+               ie->ie_len = __cpu_to_le32(arg->ie_len);
+               memcpy(ie->ie_data, arg->ie, arg->ie_len);
+
+               off += sizeof(*ie);
+               off += roundup(arg->ie_len, 4);
+       }
+
+       if (off != skb->len) {
+               dev_kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n");
+       return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID);
+}
+
+void ath10k_wmi_start_scan_init(struct ath10k *ar,
+                               struct wmi_start_scan_arg *arg)
+{
+       /* setup commonly used values */
+       arg->scan_req_id = 1;
+       arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+       arg->dwell_time_active = 50;
+       arg->dwell_time_passive = 150;
+       arg->min_rest_time = 50;
+       arg->max_rest_time = 500;
+       arg->repeat_probe_time = 0;
+       arg->probe_spacing_time = 0;
+       arg->idle_time = 0;
+       arg->max_scan_time = 5000;
+       arg->probe_delay = 5;
+       arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
+               | WMI_SCAN_EVENT_COMPLETED
+               | WMI_SCAN_EVENT_BSS_CHANNEL
+               | WMI_SCAN_EVENT_FOREIGN_CHANNEL
+               | WMI_SCAN_EVENT_DEQUEUED;
+       arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
+       arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+       arg->n_bssids = 1;
+       arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
+}
+
+int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
+{
+       struct wmi_stop_scan_cmd *cmd;
+       struct sk_buff *skb;
+       u32 scan_id;
+       u32 req_id;
+
+       if (arg->req_id > 0xFFF)
+               return -EINVAL;
+       if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
+               return -EINVAL;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       scan_id = arg->u.scan_id;
+       scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
+
+       req_id = arg->req_id;
+       req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+
+       cmd = (struct wmi_stop_scan_cmd *)skb->data;
+       cmd->req_type    = __cpu_to_le32(arg->req_type);
+       cmd->vdev_id     = __cpu_to_le32(arg->u.vdev_id);
+       cmd->scan_id     = __cpu_to_le32(scan_id);
+       cmd->scan_req_id = __cpu_to_le32(req_id);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
+                  arg->req_id, arg->req_type, arg->u.scan_id);
+       return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID);
+}
+
+int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
+                          enum wmi_vdev_type type,
+                          enum wmi_vdev_subtype subtype,
+                          const u8 macaddr[ETH_ALEN])
+{
+       struct wmi_vdev_create_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_vdev_create_cmd *)skb->data;
+       cmd->vdev_id      = __cpu_to_le32(vdev_id);
+       cmd->vdev_type    = __cpu_to_le32(type);
+       cmd->vdev_subtype = __cpu_to_le32(subtype);
+       memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
+                  vdev_id, type, subtype, macaddr);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID);
+}
+
+int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
+{
+       struct wmi_vdev_delete_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_vdev_delete_cmd *)skb->data;
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "WMI vdev delete id %d\n", vdev_id);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID);
+}
+
+static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
+                               const struct wmi_vdev_start_request_arg *arg,
+                               enum wmi_cmd_id cmd_id)
+{
+       struct wmi_vdev_start_request_cmd *cmd;
+       struct sk_buff *skb;
+       const char *cmdname;
+       u32 flags = 0;
+
+       if (cmd_id != WMI_VDEV_START_REQUEST_CMDID &&
+           cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID)
+               return -EINVAL;
+       if (WARN_ON(arg->ssid && arg->ssid_len == 0))
+               return -EINVAL;
+       if (WARN_ON(arg->hidden_ssid && !arg->ssid))
+               return -EINVAL;
+       if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+               return -EINVAL;
+
+       if (cmd_id == WMI_VDEV_START_REQUEST_CMDID)
+               cmdname = "start";
+       else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID)
+               cmdname = "restart";
+       else
+               return -EINVAL; /* should not happen, we already check cmd_id */
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       if (arg->hidden_ssid)
+               flags |= WMI_VDEV_START_HIDDEN_SSID;
+       if (arg->pmf_enabled)
+               flags |= WMI_VDEV_START_PMF_ENABLED;
+
+       cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
+       cmd->vdev_id         = __cpu_to_le32(arg->vdev_id);
+       cmd->disable_hw_ack  = __cpu_to_le32(arg->disable_hw_ack);
+       cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
+       cmd->dtim_period     = __cpu_to_le32(arg->dtim_period);
+       cmd->flags           = __cpu_to_le32(flags);
+       cmd->bcn_tx_rate     = __cpu_to_le32(arg->bcn_tx_rate);
+       cmd->bcn_tx_power    = __cpu_to_le32(arg->bcn_tx_power);
+
+       if (arg->ssid) {
+               cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
+               memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+       }
+
+       cmd->chan.mhz = __cpu_to_le32(arg->channel.freq);
+
+       cmd->chan.band_center_freq1 =
+               __cpu_to_le32(arg->channel.band_center_freq1);
+
+       cmd->chan.mode = arg->channel.mode;
+       cmd->chan.min_power = arg->channel.min_power;
+       cmd->chan.max_power = arg->channel.max_power;
+       cmd->chan.reg_power = arg->channel.max_reg_power;
+       cmd->chan.reg_classid = arg->channel.reg_class_id;
+       cmd->chan.antenna_max = arg->channel.max_antenna_gain;
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi vdev %s id 0x%x freq %d, mode %d, ch_flags: 0x%0X,"
+                  "max_power: %d\n", cmdname, arg->vdev_id, arg->channel.freq,
+                  arg->channel.mode, flags, arg->channel.max_power);
+
+       return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+int ath10k_wmi_vdev_start(struct ath10k *ar,
+                         const struct wmi_vdev_start_request_arg *arg)
+{
+       return ath10k_wmi_vdev_start_restart(ar, arg,
+                                            WMI_VDEV_START_REQUEST_CMDID);
+}
+
+int ath10k_wmi_vdev_restart(struct ath10k *ar,
+                    const struct wmi_vdev_start_request_arg *arg)
+{
+       return ath10k_wmi_vdev_start_restart(ar, arg,
+                                            WMI_VDEV_RESTART_REQUEST_CMDID);
+}
+
+int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
+{
+       struct wmi_vdev_stop_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_vdev_stop_cmd *)skb->data;
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+       ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID);
+}
+
+int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+{
+       struct wmi_vdev_up_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_vdev_up_cmd *)skb->data;
+       cmd->vdev_id       = __cpu_to_le32(vdev_id);
+       cmd->vdev_assoc_id = __cpu_to_le32(aid);
+       memcpy(&cmd->vdev_bssid.addr, bssid, 6);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
+                  vdev_id, aid, bssid);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID);
+}
+
+int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
+{
+       struct wmi_vdev_down_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_vdev_down_cmd *)skb->data;
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi mgmt vdev down id 0x%x\n", vdev_id);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID);
+}
+
+int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
+                             enum wmi_vdev_param param_id, u32 param_value)
+{
+       struct wmi_vdev_set_param_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
+       cmd->vdev_id     = __cpu_to_le32(vdev_id);
+       cmd->param_id    = __cpu_to_le32(param_id);
+       cmd->param_value = __cpu_to_le32(param_value);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi vdev id 0x%x set param %d value %d\n",
+                  vdev_id, param_id, param_value);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID);
+}
+
+int ath10k_wmi_vdev_install_key(struct ath10k *ar,
+                               const struct wmi_vdev_install_key_arg *arg)
+{
+       struct wmi_vdev_install_key_cmd *cmd;
+       struct sk_buff *skb;
+
+       if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
+               return -EINVAL;
+       if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
+               return -EINVAL;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len);
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
+       cmd->vdev_id       = __cpu_to_le32(arg->vdev_id);
+       cmd->key_idx       = __cpu_to_le32(arg->key_idx);
+       cmd->key_flags     = __cpu_to_le32(arg->key_flags);
+       cmd->key_cipher    = __cpu_to_le32(arg->key_cipher);
+       cmd->key_len       = __cpu_to_le32(arg->key_len);
+       cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
+       cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
+
+       if (arg->macaddr)
+               memcpy(cmd->peer_macaddr.addr, arg->macaddr, ETH_ALEN);
+       if (arg->key_data)
+               memcpy(cmd->key_data, arg->key_data, arg->key_len);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID);
+}
+
+int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
+                          const u8 peer_addr[ETH_ALEN])
+{
+       struct wmi_peer_create_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_peer_create_cmd *)skb->data;
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+       memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi peer create vdev_id %d peer_addr %pM\n",
+                  vdev_id, peer_addr);
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID);
+}
+
+int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
+                          const u8 peer_addr[ETH_ALEN])
+{
+       struct wmi_peer_delete_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_peer_delete_cmd *)skb->data;
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+       memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi peer delete vdev_id %d peer_addr %pM\n",
+                  vdev_id, peer_addr);
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID);
+}
+
+int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
+                         const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
+{
+       struct wmi_peer_flush_tids_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
+       cmd->vdev_id         = __cpu_to_le32(vdev_id);
+       cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
+       memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
+                  vdev_id, peer_addr, tid_bitmap);
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID);
+}
+
+int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
+                             const u8 *peer_addr, enum wmi_peer_param param_id,
+                             u32 param_value)
+{
+       struct wmi_peer_set_param_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_peer_set_param_cmd *)skb->data;
+       cmd->vdev_id     = __cpu_to_le32(vdev_id);
+       cmd->param_id    = __cpu_to_le32(param_id);
+       cmd->param_value = __cpu_to_le32(param_value);
+       memcpy(&cmd->peer_macaddr.addr, peer_addr, 6);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi vdev %d peer 0x%pM set param %d value %d\n",
+                  vdev_id, peer_addr, param_id, param_value);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID);
+}
+
+int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
+                         enum wmi_sta_ps_mode psmode)
+{
+       struct wmi_sta_powersave_mode_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
+       cmd->vdev_id     = __cpu_to_le32(vdev_id);
+       cmd->sta_ps_mode = __cpu_to_le32(psmode);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi set powersave id 0x%x mode %d\n",
+                  vdev_id, psmode);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID);
+}
+
+int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
+                               enum wmi_sta_powersave_param param_id,
+                               u32 value)
+{
+       struct wmi_sta_powersave_param_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
+       cmd->vdev_id     = __cpu_to_le32(vdev_id);
+       cmd->param_id    = __cpu_to_le32(param_id);
+       cmd->param_value = __cpu_to_le32(value);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi sta ps param vdev_id 0x%x param %d value %d\n",
+                  vdev_id, param_id, value);
+       return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
+}
+
+int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+                              enum wmi_ap_ps_peer_param param_id, u32 value)
+{
+       struct wmi_ap_ps_peer_cmd *cmd;
+       struct sk_buff *skb;
+
+       if (!mac)
+               return -EINVAL;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+       cmd->param_id = __cpu_to_le32(param_id);
+       cmd->param_value = __cpu_to_le32(value);
+       memcpy(&cmd->peer_macaddr, mac, ETH_ALEN);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
+                  vdev_id, param_id, value, mac);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID);
+}
+
+int ath10k_wmi_scan_chan_list(struct ath10k *ar,
+                             const struct wmi_scan_chan_list_arg *arg)
+{
+       struct wmi_scan_chan_list_cmd *cmd;
+       struct sk_buff *skb;
+       struct wmi_channel_arg *ch;
+       struct wmi_channel *ci;
+       int len;
+       int i;
+
+       len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
+
+       skb = ath10k_wmi_alloc_skb(len);
+       if (!skb)
+               return -EINVAL;
+
+       cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
+       cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
+
+       for (i = 0; i < arg->n_channels; i++) {
+               u32 flags = 0;
+
+               ch = &arg->channels[i];
+               ci = &cmd->chan_info[i];
+
+               if (ch->passive)
+                       flags |= WMI_CHAN_FLAG_PASSIVE;
+               if (ch->allow_ibss)
+                       flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
+               if (ch->allow_ht)
+                       flags |= WMI_CHAN_FLAG_ALLOW_HT;
+               if (ch->allow_vht)
+                       flags |= WMI_CHAN_FLAG_ALLOW_VHT;
+               if (ch->ht40plus)
+                       flags |= WMI_CHAN_FLAG_HT40_PLUS;
+
+               ci->mhz               = __cpu_to_le32(ch->freq);
+               ci->band_center_freq1 = __cpu_to_le32(ch->freq);
+               ci->band_center_freq2 = 0;
+               ci->min_power         = ch->min_power;
+               ci->max_power         = ch->max_power;
+               ci->reg_power         = ch->max_reg_power;
+               ci->antenna_max       = ch->max_antenna_gain;
+               ci->antenna_max       = 0;
+
+               /* mode & flags share storage */
+               ci->mode              = ch->mode;
+               ci->flags            |= __cpu_to_le32(flags);
+       }
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID);
+}
+
+int ath10k_wmi_peer_assoc(struct ath10k *ar,
+                         const struct wmi_peer_assoc_complete_arg *arg)
+{
+       struct wmi_peer_assoc_complete_cmd *cmd;
+       struct sk_buff *skb;
+
+       if (arg->peer_mpdu_density > 16)
+               return -EINVAL;
+       if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
+               return -EINVAL;
+       if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
+               return -EINVAL;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_peer_assoc_complete_cmd *)skb->data;
+       cmd->vdev_id            = __cpu_to_le32(arg->vdev_id);
+       cmd->peer_new_assoc     = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
+       cmd->peer_associd       = __cpu_to_le32(arg->peer_aid);
+       cmd->peer_flags         = __cpu_to_le32(arg->peer_flags);
+       cmd->peer_caps          = __cpu_to_le32(arg->peer_caps);
+       cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
+       cmd->peer_ht_caps       = __cpu_to_le32(arg->peer_ht_caps);
+       cmd->peer_max_mpdu      = __cpu_to_le32(arg->peer_max_mpdu);
+       cmd->peer_mpdu_density  = __cpu_to_le32(arg->peer_mpdu_density);
+       cmd->peer_rate_caps     = __cpu_to_le32(arg->peer_rate_caps);
+       cmd->peer_nss           = __cpu_to_le32(arg->peer_num_spatial_streams);
+       cmd->peer_vht_caps      = __cpu_to_le32(arg->peer_vht_caps);
+       cmd->peer_phymode       = __cpu_to_le32(arg->peer_phymode);
+
+       memcpy(cmd->peer_macaddr.addr, arg->addr, ETH_ALEN);
+
+       cmd->peer_legacy_rates.num_rates =
+               __cpu_to_le32(arg->peer_legacy_rates.num_rates);
+       memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
+              arg->peer_legacy_rates.num_rates);
+
+       cmd->peer_ht_rates.num_rates =
+               __cpu_to_le32(arg->peer_ht_rates.num_rates);
+       memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
+              arg->peer_ht_rates.num_rates);
+
+       cmd->peer_vht_rates.rx_max_rate =
+               __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
+       cmd->peer_vht_rates.rx_mcs_set =
+               __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
+       cmd->peer_vht_rates.tx_max_rate =
+               __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
+       cmd->peer_vht_rates.tx_mcs_set =
+               __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
+}
+
+int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
+{
+       struct wmi_bcn_tx_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len);
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_bcn_tx_cmd *)skb->data;
+       cmd->hdr.vdev_id  = __cpu_to_le32(arg->vdev_id);
+       cmd->hdr.tx_rate  = __cpu_to_le32(arg->tx_rate);
+       cmd->hdr.tx_power = __cpu_to_le32(arg->tx_power);
+       cmd->hdr.bcn_len  = __cpu_to_le32(arg->bcn_len);
+       memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID);
+}
+
+static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
+                                         const struct wmi_wmm_params_arg *arg)
+{
+       params->cwmin  = __cpu_to_le32(arg->cwmin);
+       params->cwmax  = __cpu_to_le32(arg->cwmax);
+       params->aifs   = __cpu_to_le32(arg->aifs);
+       params->txop   = __cpu_to_le32(arg->txop);
+       params->acm    = __cpu_to_le32(arg->acm);
+       params->no_ack = __cpu_to_le32(arg->no_ack);
+}
+
+int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
+                       const struct wmi_pdev_set_wmm_params_arg *arg)
+{
+       struct wmi_pdev_set_wmm_params *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
+       ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be);
+       ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
+       ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
+       ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
+
+       ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID);
+}
+
+int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
+{
+       struct wmi_request_stats_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_request_stats_cmd *)skb->data;
+       cmd->stats_id = __cpu_to_le32(stats_id);
+
+       ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
+       return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID);
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
new file mode 100644 (file)
index 0000000..9555f5a
--- /dev/null
@@ -0,0 +1,3052 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WMI_H_
+#define _WMI_H_
+
+#include <linux/types.h>
+#include <net/mac80211.h>
+
+/*
+ * This file specifies the WMI interface for the Unified Software
+ * Architecture.
+ *
+ * It includes definitions of all the commands and events. Commands are
+ * messages from the host to the target. Events and Replies are messages
+ * from the target to the host.
+ *
+ * Ownership of correctness in regards to WMI commands belongs to the host
+ * driver and the target is not required to validate parameters for value,
+ * proper range, or any other checking.
+ *
+ * Guidelines for extending this interface are below.
+ *
+ * 1. Add new WMI commands ONLY within the specified range - 0x9000 - 0x9fff
+ *
+ * 2. Use ONLY u32 type for defining member variables within WMI
+ *    command/event structures. Do not use u8, u16, bool or
+ *    enum types within these structures.
+ *
+ * 3. DO NOT define bit fields within structures. Implement bit fields
+ *    using masks if necessary. Do not use the programming language's bit
+ *    field definition.
+ *
+ * 4. Define macros for encode/decode of u8, u16 fields within
+ *    the u32 variables. Use these macros for set/get of these fields.
+ *    Try to use this to optimize the structure without bloating it with
+ *    u32 variables for every lower sized field.
+ *
+ * 5. Do not use PACK/UNPACK attributes for the structures as each member
+ *    variable is already 4-byte aligned by virtue of being a u32
+ *    type.
+ *
+ * 6. Comment each parameter part of the WMI command/event structure by
+ *    using the 2 stars at the begining of C comment instead of one star to
+ *    enable HTML document generation using Doxygen.
+ *
+ */
+
+/* Control Path */
+struct wmi_cmd_hdr {
+       __le32 cmd_id;
+} __packed;
+
+#define WMI_CMD_HDR_CMD_ID_MASK   0x00FFFFFF
+#define WMI_CMD_HDR_CMD_ID_LSB    0
+#define WMI_CMD_HDR_PLT_PRIV_MASK 0xFF000000
+#define WMI_CMD_HDR_PLT_PRIV_LSB  24
+
+#define HTC_PROTOCOL_VERSION    0x0002
+#define WMI_PROTOCOL_VERSION    0x0002
+
+enum wmi_service_id {
+       WMI_SERVICE_BEACON_OFFLOAD = 0,   /* beacon offload */
+       WMI_SERVICE_SCAN_OFFLOAD,         /* scan offload */
+       WMI_SERVICE_ROAM_OFFLOAD,         /* roam offload */
+       WMI_SERVICE_BCN_MISS_OFFLOAD,     /* beacon miss offload */
+       WMI_SERVICE_STA_PWRSAVE,          /* fake sleep + basic power save */
+       WMI_SERVICE_STA_ADVANCED_PWRSAVE, /* uapsd, pspoll, force sleep */
+       WMI_SERVICE_AP_UAPSD,             /* uapsd on AP */
+       WMI_SERVICE_AP_DFS,               /* DFS on AP */
+       WMI_SERVICE_11AC,                 /* supports 11ac */
+       WMI_SERVICE_BLOCKACK,   /* Supports triggering ADDBA/DELBA from host*/
+       WMI_SERVICE_PHYERR,               /* PHY error */
+       WMI_SERVICE_BCN_FILTER,           /* Beacon filter support */
+       WMI_SERVICE_RTT,                  /* RTT (round trip time) support */
+       WMI_SERVICE_RATECTRL,             /* Rate-control */
+       WMI_SERVICE_WOW,                  /* WOW Support */
+       WMI_SERVICE_RATECTRL_CACHE,       /* Rate-control caching */
+       WMI_SERVICE_IRAM_TIDS,            /* TIDs in IRAM */
+       WMI_SERVICE_ARPNS_OFFLOAD,        /* ARP NS Offload support */
+       WMI_SERVICE_NLO,                  /* Network list offload service */
+       WMI_SERVICE_GTK_OFFLOAD,          /* GTK offload */
+       WMI_SERVICE_SCAN_SCH,             /* Scan Scheduler Service */
+       WMI_SERVICE_CSA_OFFLOAD,          /* CSA offload service */
+       WMI_SERVICE_CHATTER,              /* Chatter service */
+       WMI_SERVICE_COEX_FREQAVOID,       /* FW report freq range to avoid */
+       WMI_SERVICE_PACKET_POWER_SAVE,    /* packet power save service */
+       WMI_SERVICE_FORCE_FW_HANG,        /* To test fw recovery mechanism */
+       WMI_SERVICE_GPIO,                 /* GPIO service */
+       WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, /* Modulated DTIM support */
+       WMI_STA_UAPSD_BASIC_AUTO_TRIG,    /* UAPSD AC Trigger Generation  */
+       WMI_STA_UAPSD_VAR_AUTO_TRIG,      /* -do- */
+       WMI_SERVICE_STA_KEEP_ALIVE,       /* STA keep alive mechanism support */
+       WMI_SERVICE_TX_ENCAP,             /* Packet type for TX encapsulation */
+
+       WMI_SERVICE_LAST,
+       WMI_MAX_SERVICE = 64              /* max service */
+};
+
+static inline char *wmi_service_name(int service_id)
+{
+       switch (service_id) {
+       case WMI_SERVICE_BEACON_OFFLOAD:
+               return "BEACON_OFFLOAD";
+       case WMI_SERVICE_SCAN_OFFLOAD:
+               return "SCAN_OFFLOAD";
+       case WMI_SERVICE_ROAM_OFFLOAD:
+               return "ROAM_OFFLOAD";
+       case WMI_SERVICE_BCN_MISS_OFFLOAD:
+               return "BCN_MISS_OFFLOAD";
+       case WMI_SERVICE_STA_PWRSAVE:
+               return "STA_PWRSAVE";
+       case WMI_SERVICE_STA_ADVANCED_PWRSAVE:
+               return "STA_ADVANCED_PWRSAVE";
+       case WMI_SERVICE_AP_UAPSD:
+               return "AP_UAPSD";
+       case WMI_SERVICE_AP_DFS:
+               return "AP_DFS";
+       case WMI_SERVICE_11AC:
+               return "11AC";
+       case WMI_SERVICE_BLOCKACK:
+               return "BLOCKACK";
+       case WMI_SERVICE_PHYERR:
+               return "PHYERR";
+       case WMI_SERVICE_BCN_FILTER:
+               return "BCN_FILTER";
+       case WMI_SERVICE_RTT:
+               return "RTT";
+       case WMI_SERVICE_RATECTRL:
+               return "RATECTRL";
+       case WMI_SERVICE_WOW:
+               return "WOW";
+       case WMI_SERVICE_RATECTRL_CACHE:
+               return "RATECTRL CACHE";
+       case WMI_SERVICE_IRAM_TIDS:
+               return "IRAM TIDS";
+       case WMI_SERVICE_ARPNS_OFFLOAD:
+               return "ARPNS_OFFLOAD";
+       case WMI_SERVICE_NLO:
+               return "NLO";
+       case WMI_SERVICE_GTK_OFFLOAD:
+               return "GTK_OFFLOAD";
+       case WMI_SERVICE_SCAN_SCH:
+               return "SCAN_SCH";
+       case WMI_SERVICE_CSA_OFFLOAD:
+               return "CSA_OFFLOAD";
+       case WMI_SERVICE_CHATTER:
+               return "CHATTER";
+       case WMI_SERVICE_COEX_FREQAVOID:
+               return "COEX_FREQAVOID";
+       case WMI_SERVICE_PACKET_POWER_SAVE:
+               return "PACKET_POWER_SAVE";
+       case WMI_SERVICE_FORCE_FW_HANG:
+               return "FORCE FW HANG";
+       case WMI_SERVICE_GPIO:
+               return "GPIO";
+       case WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM:
+               return "MODULATED DTIM";
+       case WMI_STA_UAPSD_BASIC_AUTO_TRIG:
+               return "BASIC UAPSD";
+       case WMI_STA_UAPSD_VAR_AUTO_TRIG:
+               return "VAR UAPSD";
+       case WMI_SERVICE_STA_KEEP_ALIVE:
+               return "STA KEEP ALIVE";
+       case WMI_SERVICE_TX_ENCAP:
+               return "TX ENCAP";
+       default:
+               return "UNKNOWN SERVICE\n";
+       }
+}
+
+
+#define WMI_SERVICE_BM_SIZE \
+       ((WMI_MAX_SERVICE + sizeof(u32) - 1)/sizeof(u32))
+
+/* 2 word representation of MAC addr */
+struct wmi_mac_addr {
+       union {
+               u8 addr[6];
+               struct {
+                       u32 word0;
+                       u32 word1;
+               } __packed;
+       } __packed;
+} __packed;
+
+/* macro to convert MAC address from WMI word format to char array */
+#define WMI_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \
+       (c_macaddr)[0] =  ((pwmi_mac_addr)->word0) & 0xff; \
+       (c_macaddr)[1] = (((pwmi_mac_addr)->word0) >> 8) & 0xff; \
+       (c_macaddr)[2] = (((pwmi_mac_addr)->word0) >> 16) & 0xff; \
+       (c_macaddr)[3] = (((pwmi_mac_addr)->word0) >> 24) & 0xff; \
+       (c_macaddr)[4] =  ((pwmi_mac_addr)->word1) & 0xff; \
+       (c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
+       } while (0)
+
+/*
+ * wmi command groups.
+ */
+enum wmi_cmd_group {
+       /* 0 to 2 are reserved */
+       WMI_GRP_START = 0x3,
+       WMI_GRP_SCAN = WMI_GRP_START,
+       WMI_GRP_PDEV,
+       WMI_GRP_VDEV,
+       WMI_GRP_PEER,
+       WMI_GRP_MGMT,
+       WMI_GRP_BA_NEG,
+       WMI_GRP_STA_PS,
+       WMI_GRP_DFS,
+       WMI_GRP_ROAM,
+       WMI_GRP_OFL_SCAN,
+       WMI_GRP_P2P,
+       WMI_GRP_AP_PS,
+       WMI_GRP_RATE_CTRL,
+       WMI_GRP_PROFILE,
+       WMI_GRP_SUSPEND,
+       WMI_GRP_BCN_FILTER,
+       WMI_GRP_WOW,
+       WMI_GRP_RTT,
+       WMI_GRP_SPECTRAL,
+       WMI_GRP_STATS,
+       WMI_GRP_ARP_NS_OFL,
+       WMI_GRP_NLO_OFL,
+       WMI_GRP_GTK_OFL,
+       WMI_GRP_CSA_OFL,
+       WMI_GRP_CHATTER,
+       WMI_GRP_TID_ADDBA,
+       WMI_GRP_MISC,
+       WMI_GRP_GPIO,
+};
+
+#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
+
+/* Command IDs and commande events. */
+enum wmi_cmd_id {
+       WMI_INIT_CMDID = 0x1,
+
+       /* Scan specific commands */
+       WMI_START_SCAN_CMDID = WMI_CMD_GRP(WMI_GRP_SCAN),
+       WMI_STOP_SCAN_CMDID,
+       WMI_SCAN_CHAN_LIST_CMDID,
+       WMI_SCAN_SCH_PRIO_TBL_CMDID,
+
+       /* PDEV (physical device) specific commands */
+       WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_CMD_GRP(WMI_GRP_PDEV),
+       WMI_PDEV_SET_CHANNEL_CMDID,
+       WMI_PDEV_SET_PARAM_CMDID,
+       WMI_PDEV_PKTLOG_ENABLE_CMDID,
+       WMI_PDEV_PKTLOG_DISABLE_CMDID,
+       WMI_PDEV_SET_WMM_PARAMS_CMDID,
+       WMI_PDEV_SET_HT_CAP_IE_CMDID,
+       WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+       WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+       WMI_PDEV_SET_QUIET_MODE_CMDID,
+       WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+       WMI_PDEV_GET_TPC_CONFIG_CMDID,
+       WMI_PDEV_SET_BASE_MACADDR_CMDID,
+
+       /* VDEV (virtual device) specific commands */
+       WMI_VDEV_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_VDEV),
+       WMI_VDEV_DELETE_CMDID,
+       WMI_VDEV_START_REQUEST_CMDID,
+       WMI_VDEV_RESTART_REQUEST_CMDID,
+       WMI_VDEV_UP_CMDID,
+       WMI_VDEV_STOP_CMDID,
+       WMI_VDEV_DOWN_CMDID,
+       WMI_VDEV_SET_PARAM_CMDID,
+       WMI_VDEV_INSTALL_KEY_CMDID,
+
+       /* peer specific commands */
+       WMI_PEER_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_PEER),
+       WMI_PEER_DELETE_CMDID,
+       WMI_PEER_FLUSH_TIDS_CMDID,
+       WMI_PEER_SET_PARAM_CMDID,
+       WMI_PEER_ASSOC_CMDID,
+       WMI_PEER_ADD_WDS_ENTRY_CMDID,
+       WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+       WMI_PEER_MCAST_GROUP_CMDID,
+
+       /* beacon/management specific commands */
+       WMI_BCN_TX_CMDID = WMI_CMD_GRP(WMI_GRP_MGMT),
+       WMI_PDEV_SEND_BCN_CMDID,
+       WMI_BCN_TMPL_CMDID,
+       WMI_BCN_FILTER_RX_CMDID,
+       WMI_PRB_REQ_FILTER_RX_CMDID,
+       WMI_MGMT_TX_CMDID,
+       WMI_PRB_TMPL_CMDID,
+
+       /* commands to directly control BA negotiation directly from host. */
+       WMI_ADDBA_CLEAR_RESP_CMDID = WMI_CMD_GRP(WMI_GRP_BA_NEG),
+       WMI_ADDBA_SEND_CMDID,
+       WMI_ADDBA_STATUS_CMDID,
+       WMI_DELBA_SEND_CMDID,
+       WMI_ADDBA_SET_RESP_CMDID,
+       WMI_SEND_SINGLEAMSDU_CMDID,
+
+       /* Station power save specific config */
+       WMI_STA_POWERSAVE_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_STA_PS),
+       WMI_STA_POWERSAVE_PARAM_CMDID,
+       WMI_STA_MIMO_PS_MODE_CMDID,
+
+       /** DFS-specific commands */
+       WMI_PDEV_DFS_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_DFS),
+       WMI_PDEV_DFS_DISABLE_CMDID,
+
+       /* Roaming specific  commands */
+       WMI_ROAM_SCAN_MODE = WMI_CMD_GRP(WMI_GRP_ROAM),
+       WMI_ROAM_SCAN_RSSI_THRESHOLD,
+       WMI_ROAM_SCAN_PERIOD,
+       WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+       WMI_ROAM_AP_PROFILE,
+
+       /* offload scan specific commands */
+       WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_CMD_GRP(WMI_GRP_OFL_SCAN),
+       WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+       WMI_OFL_SCAN_PERIOD,
+
+       /* P2P specific commands */
+       WMI_P2P_DEV_SET_DEVICE_INFO = WMI_CMD_GRP(WMI_GRP_P2P),
+       WMI_P2P_DEV_SET_DISCOVERABILITY,
+       WMI_P2P_GO_SET_BEACON_IE,
+       WMI_P2P_GO_SET_PROBE_RESP_IE,
+       WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+
+       /* AP power save specific config */
+       WMI_AP_PS_PEER_PARAM_CMDID = WMI_CMD_GRP(WMI_GRP_AP_PS),
+       WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+
+       /* Rate-control specific commands */
+       WMI_PEER_RATE_RETRY_SCHED_CMDID =
+       WMI_CMD_GRP(WMI_GRP_RATE_CTRL),
+
+       /* WLAN Profiling commands. */
+       WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_CMD_GRP(WMI_GRP_PROFILE),
+       WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+       WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+       WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+       WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+
+       /* Suspend resume command Ids */
+       WMI_PDEV_SUSPEND_CMDID = WMI_CMD_GRP(WMI_GRP_SUSPEND),
+       WMI_PDEV_RESUME_CMDID,
+
+       /* Beacon filter commands */
+       WMI_ADD_BCN_FILTER_CMDID = WMI_CMD_GRP(WMI_GRP_BCN_FILTER),
+       WMI_RMV_BCN_FILTER_CMDID,
+
+       /* WOW Specific WMI commands*/
+       WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_CMD_GRP(WMI_GRP_WOW),
+       WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+       WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+       WMI_WOW_ENABLE_CMDID,
+       WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+
+       /* RTT measurement related cmd */
+       WMI_RTT_MEASREQ_CMDID = WMI_CMD_GRP(WMI_GRP_RTT),
+       WMI_RTT_TSF_CMDID,
+
+       /* spectral scan commands */
+       WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_CMD_GRP(WMI_GRP_SPECTRAL),
+       WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+
+       /* F/W stats */
+       WMI_REQUEST_STATS_CMDID = WMI_CMD_GRP(WMI_GRP_STATS),
+
+       /* ARP OFFLOAD REQUEST*/
+       WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_ARP_NS_OFL),
+
+       /* NS offload confid*/
+       WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_NLO_OFL),
+
+       /* GTK offload Specific WMI commands*/
+       WMI_GTK_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_GTK_OFL),
+
+       /* CSA offload Specific WMI commands*/
+       WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_CSA_OFL),
+       WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+
+       /* Chatter commands*/
+       WMI_CHATTER_SET_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_CHATTER),
+
+       /* addba specific commands */
+       WMI_PEER_TID_ADDBA_CMDID = WMI_CMD_GRP(WMI_GRP_TID_ADDBA),
+       WMI_PEER_TID_DELBA_CMDID,
+
+       /* set station mimo powersave method */
+       WMI_STA_DTIM_PS_METHOD_CMDID,
+       /* Configure the Station UAPSD AC Auto Trigger Parameters */
+       WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+
+       /* STA Keep alive parameter configuration,
+          Requires WMI_SERVICE_STA_KEEP_ALIVE */
+       WMI_STA_KEEPALIVE_CMD,
+
+       /* misc command group */
+       WMI_ECHO_CMDID = WMI_CMD_GRP(WMI_GRP_MISC),
+       WMI_PDEV_UTF_CMDID,
+       WMI_DBGLOG_CFG_CMDID,
+       WMI_PDEV_QVIT_CMDID,
+       WMI_PDEV_FTM_INTG_CMDID,
+       WMI_VDEV_SET_KEEPALIVE_CMDID,
+       WMI_VDEV_GET_KEEPALIVE_CMDID,
+
+       /* GPIO Configuration */
+       WMI_GPIO_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_GPIO),
+       WMI_GPIO_OUTPUT_CMDID,
+};
+
+enum wmi_event_id {
+       WMI_SERVICE_READY_EVENTID = 0x1,
+       WMI_READY_EVENTID,
+
+       /* Scan specific events */
+       WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
+
+       /* PDEV specific events */
+       WMI_PDEV_TPC_CONFIG_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PDEV),
+       WMI_CHAN_INFO_EVENTID,
+       WMI_PHYERR_EVENTID,
+
+       /* VDEV specific events */
+       WMI_VDEV_START_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_VDEV),
+       WMI_VDEV_STOPPED_EVENTID,
+       WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+
+       /* peer specific events */
+       WMI_PEER_STA_KICKOUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PEER),
+
+       /* beacon/mgmt specific events */
+       WMI_MGMT_RX_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MGMT),
+       WMI_HOST_SWBA_EVENTID,
+       WMI_TBTTOFFSET_UPDATE_EVENTID,
+
+       /* ADDBA Related WMI Events*/
+       WMI_TX_DELBA_COMPLETE_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_BA_NEG),
+       WMI_TX_ADDBA_COMPLETE_EVENTID,
+
+       /* Roam event to trigger roaming on host */
+       WMI_ROAM_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_ROAM),
+       WMI_PROFILE_MATCH,
+
+       /* WoW */
+       WMI_WOW_WAKEUP_HOST_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_WOW),
+
+       /* RTT */
+       WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_RTT),
+       WMI_TSF_MEASUREMENT_REPORT_EVENTID,
+       WMI_RTT_ERROR_REPORT_EVENTID,
+
+       /* GTK offload */
+       WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GTK_OFL),
+       WMI_GTK_REKEY_FAIL_EVENTID,
+
+       /* CSA IE received event */
+       WMI_CSA_HANDLING_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_CSA_OFL),
+
+       /* Misc events */
+       WMI_ECHO_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MISC),
+       WMI_PDEV_UTF_EVENTID,
+       WMI_DEBUG_MESG_EVENTID,
+       WMI_UPDATE_STATS_EVENTID,
+       WMI_DEBUG_PRINT_EVENTID,
+       WMI_DCS_INTERFERENCE_EVENTID,
+       WMI_PDEV_QVIT_EVENTID,
+       WMI_WLAN_PROFILE_DATA_EVENTID,
+       WMI_PDEV_FTM_INTG_EVENTID,
+       WMI_WLAN_FREQ_AVOID_EVENTID,
+       WMI_VDEV_GET_KEEPALIVE_EVENTID,
+
+       /* GPIO Event */
+       WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO),
+};
+
+enum wmi_phy_mode {
+       MODE_11A        = 0,   /* 11a Mode */
+       MODE_11G        = 1,   /* 11b/g Mode */
+       MODE_11B        = 2,   /* 11b Mode */
+       MODE_11GONLY    = 3,   /* 11g only Mode */
+       MODE_11NA_HT20   = 4,  /* 11a HT20 mode */
+       MODE_11NG_HT20   = 5,  /* 11g HT20 mode */
+       MODE_11NA_HT40   = 6,  /* 11a HT40 mode */
+       MODE_11NG_HT40   = 7,  /* 11g HT40 mode */
+       MODE_11AC_VHT20 = 8,
+       MODE_11AC_VHT40 = 9,
+       MODE_11AC_VHT80 = 10,
+       /*    MODE_11AC_VHT160 = 11, */
+       MODE_11AC_VHT20_2G = 11,
+       MODE_11AC_VHT40_2G = 12,
+       MODE_11AC_VHT80_2G = 13,
+       MODE_UNKNOWN    = 14,
+       MODE_MAX        = 14
+};
+
+#define WMI_CHAN_LIST_TAG      0x1
+#define WMI_SSID_LIST_TAG      0x2
+#define WMI_BSSID_LIST_TAG     0x3
+#define WMI_IE_TAG             0x4
+
+struct wmi_channel {
+       __le32 mhz;
+       __le32 band_center_freq1;
+       __le32 band_center_freq2; /* valid for 11ac, 80plus80 */
+       union {
+               __le32 flags; /* WMI_CHAN_FLAG_ */
+               struct {
+                       u8 mode; /* only 6 LSBs */
+               } __packed;
+       } __packed;
+       union {
+               __le32 reginfo0;
+               struct {
+                       u8 min_power;
+                       u8 max_power;
+                       u8 reg_power;
+                       u8 reg_classid;
+               } __packed;
+       } __packed;
+       union {
+               __le32 reginfo1;
+               struct {
+                       u8 antenna_max;
+               } __packed;
+       } __packed;
+} __packed;
+
+struct wmi_channel_arg {
+       u32 freq;
+       u32 band_center_freq1;
+       bool passive;
+       bool allow_ibss;
+       bool allow_ht;
+       bool allow_vht;
+       bool ht40plus;
+       /* note: power unit is 1/4th of dBm */
+       u32 min_power;
+       u32 max_power;
+       u32 max_reg_power;
+       u32 max_antenna_gain;
+       u32 reg_class_id;
+       enum wmi_phy_mode mode;
+};
+
+enum wmi_channel_change_cause {
+       WMI_CHANNEL_CHANGE_CAUSE_NONE = 0,
+       WMI_CHANNEL_CHANGE_CAUSE_CSA,
+};
+
+#define WMI_CHAN_FLAG_HT40_PLUS      (1 << 6)
+#define WMI_CHAN_FLAG_PASSIVE        (1 << 7)
+#define WMI_CHAN_FLAG_ADHOC_ALLOWED  (1 << 8)
+#define WMI_CHAN_FLAG_AP_DISABLED    (1 << 9)
+#define WMI_CHAN_FLAG_DFS            (1 << 10)
+#define WMI_CHAN_FLAG_ALLOW_HT       (1 << 11)
+#define WMI_CHAN_FLAG_ALLOW_VHT      (1 << 12)
+
+/* Indicate reason for channel switch */
+#define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
+
+#define WMI_MAX_SPATIAL_STREAM   3
+
+/* HT Capabilities*/
+#define WMI_HT_CAP_ENABLED                0x0001   /* HT Enabled/ disabled */
+#define WMI_HT_CAP_HT20_SGI       0x0002   /* Short Guard Interval with HT20 */
+#define WMI_HT_CAP_DYNAMIC_SMPS           0x0004   /* Dynamic MIMO powersave */
+#define WMI_HT_CAP_TX_STBC                0x0008   /* B3 TX STBC */
+#define WMI_HT_CAP_TX_STBC_MASK_SHIFT     3
+#define WMI_HT_CAP_RX_STBC                0x0030   /* B4-B5 RX STBC */
+#define WMI_HT_CAP_RX_STBC_MASK_SHIFT     4
+#define WMI_HT_CAP_LDPC                   0x0040   /* LDPC supported */
+#define WMI_HT_CAP_L_SIG_TXOP_PROT        0x0080   /* L-SIG TXOP Protection */
+#define WMI_HT_CAP_MPDU_DENSITY           0x0700   /* MPDU Density */
+#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8
+#define WMI_HT_CAP_HT40_SGI               0x0800
+
+#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED       | \
+                               WMI_HT_CAP_HT20_SGI      | \
+                               WMI_HT_CAP_HT40_SGI      | \
+                               WMI_HT_CAP_TX_STBC       | \
+                               WMI_HT_CAP_RX_STBC       | \
+                               WMI_HT_CAP_LDPC)
+
+
+/*
+ * WMI_VHT_CAP_* these maps to ieee 802.11ac vht capability information
+ * field. The fields not defined here are not supported, or reserved.
+ * Do not change these masks and if you have to add new one follow the
+ * bitmask as specified by 802.11ac draft.
+ */
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK            0x00000003
+#define WMI_VHT_CAP_RX_LDPC                      0x00000010
+#define WMI_VHT_CAP_SGI_80MHZ                    0x00000020
+#define WMI_VHT_CAP_TX_STBC                      0x00000080
+#define WMI_VHT_CAP_RX_STBC_MASK                 0x00000300
+#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT           8
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP            0x03800000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT      23
+#define WMI_VHT_CAP_RX_FIXED_ANT                 0x10000000
+#define WMI_VHT_CAP_TX_FIXED_ANT                 0x20000000
+
+/* The following also refer for max HT AMSDU */
+#define WMI_VHT_CAP_MAX_MPDU_LEN_3839            0x00000000
+#define WMI_VHT_CAP_MAX_MPDU_LEN_7935            0x00000001
+#define WMI_VHT_CAP_MAX_MPDU_LEN_11454           0x00000002
+
+#define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454  | \
+                                WMI_VHT_CAP_RX_LDPC             | \
+                                WMI_VHT_CAP_SGI_80MHZ           | \
+                                WMI_VHT_CAP_TX_STBC             | \
+                                WMI_VHT_CAP_RX_STBC_MASK        | \
+                                WMI_VHT_CAP_MAX_AMPDU_LEN_EXP   | \
+                                WMI_VHT_CAP_RX_FIXED_ANT        | \
+                                WMI_VHT_CAP_TX_FIXED_ANT)
+
+/*
+ * Interested readers refer to Rx/Tx MCS Map definition as defined in
+ * 802.11ac
+ */
+#define WMI_VHT_MAX_MCS_4_SS_MASK(r, ss)      ((3 & (r)) << (((ss) - 1) << 1))
+#define WMI_VHT_MAX_SUPP_RATE_MASK           0x1fff0000
+#define WMI_VHT_MAX_SUPP_RATE_MASK_SHIFT     16
+
+enum {
+       REGDMN_MODE_11A              = 0x00001, /* 11a channels */
+       REGDMN_MODE_TURBO            = 0x00002, /* 11a turbo-only channels */
+       REGDMN_MODE_11B              = 0x00004, /* 11b channels */
+       REGDMN_MODE_PUREG            = 0x00008, /* 11g channels (OFDM only) */
+       REGDMN_MODE_11G              = 0x00008, /* XXX historical */
+       REGDMN_MODE_108G             = 0x00020, /* 11a+Turbo channels */
+       REGDMN_MODE_108A             = 0x00040, /* 11g+Turbo channels */
+       REGDMN_MODE_XR               = 0x00100, /* XR channels */
+       REGDMN_MODE_11A_HALF_RATE    = 0x00200, /* 11A half rate channels */
+       REGDMN_MODE_11A_QUARTER_RATE = 0x00400, /* 11A quarter rate channels */
+       REGDMN_MODE_11NG_HT20        = 0x00800, /* 11N-G HT20 channels */
+       REGDMN_MODE_11NA_HT20        = 0x01000, /* 11N-A HT20 channels */
+       REGDMN_MODE_11NG_HT40PLUS    = 0x02000, /* 11N-G HT40 + channels */
+       REGDMN_MODE_11NG_HT40MINUS   = 0x04000, /* 11N-G HT40 - channels */
+       REGDMN_MODE_11NA_HT40PLUS    = 0x08000, /* 11N-A HT40 + channels */
+       REGDMN_MODE_11NA_HT40MINUS   = 0x10000, /* 11N-A HT40 - channels */
+       REGDMN_MODE_11AC_VHT20       = 0x20000, /* 5Ghz, VHT20 */
+       REGDMN_MODE_11AC_VHT40PLUS   = 0x40000, /* 5Ghz, VHT40 + channels */
+       REGDMN_MODE_11AC_VHT40MINUS  = 0x80000, /* 5Ghz  VHT40 - channels */
+       REGDMN_MODE_11AC_VHT80       = 0x100000, /* 5Ghz, VHT80 channels */
+       REGDMN_MODE_ALL              = 0xffffffff
+};
+
+#define REGDMN_CAP1_CHAN_HALF_RATE        0x00000001
+#define REGDMN_CAP1_CHAN_QUARTER_RATE     0x00000002
+#define REGDMN_CAP1_CHAN_HAL49GHZ         0x00000004
+
+/* regulatory capabilities */
+#define REGDMN_EEPROM_EEREGCAP_EN_FCC_MIDBAND   0x0040
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_EVEN    0x0080
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U2         0x0100
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_MIDBAND    0x0200
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_ODD     0x0400
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_NEW_11A    0x0800
+
+struct hal_reg_capabilities {
+       /* regdomain value specified in EEPROM */
+       __le32 eeprom_rd;
+       /*regdomain */
+       __le32 eeprom_rd_ext;
+       /* CAP1 capabilities bit map. */
+       __le32 regcap1;
+       /* REGDMN EEPROM CAP. */
+       __le32 regcap2;
+       /* REGDMN MODE */
+       __le32 wireless_modes;
+       __le32 low_2ghz_chan;
+       __le32 high_2ghz_chan;
+       __le32 low_5ghz_chan;
+       __le32 high_5ghz_chan;
+} __packed;
+
+enum wlan_mode_capability {
+       WHAL_WLAN_11A_CAPABILITY   = 0x1,
+       WHAL_WLAN_11G_CAPABILITY   = 0x2,
+       WHAL_WLAN_11AG_CAPABILITY  = 0x3,
+};
+
+/* structure used by FW for requesting host memory */
+struct wlan_host_mem_req {
+       /* ID of the request */
+       __le32 req_id;
+       /* size of the  of each unit */
+       __le32 unit_size;
+       /* flags to  indicate that
+        * the number units is dependent
+        * on number of resources(num vdevs num peers .. etc)
+        */
+       __le32 num_unit_info;
+       /*
+        * actual number of units to allocate . if flags in the num_unit_info
+        * indicate that number of units is tied to number of a particular
+        * resource to allocate then  num_units filed is set to 0 and host
+        * will derive the number units from number of the resources it is
+        * requesting.
+        */
+       __le32 num_units;
+} __packed;
+
+#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \
+       ((((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
+       (1 << ((svc_id)%(sizeof(u32))))) != 0)
+
+/*
+ * The following struct holds optional payload for
+ * wmi_service_ready_event,e.g., 11ac pass some of the
+ * device capability to the host.
+ */
+struct wmi_service_ready_event {
+       __le32 sw_version;
+       __le32 sw_version_1;
+       __le32 abi_version;
+       /* WMI_PHY_CAPABILITY */
+       __le32 phy_capability;
+       /* Maximum number of frag table entries that SW will populate less 1 */
+       __le32 max_frag_entry;
+       __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
+       __le32 num_rf_chains;
+       /*
+        * The following field is only valid for service type
+        * WMI_SERVICE_11AC
+        */
+       __le32 ht_cap_info; /* WMI HT Capability */
+       __le32 vht_cap_info; /* VHT capability info field of 802.11ac */
+       __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
+       __le32 hw_min_tx_power;
+       __le32 hw_max_tx_power;
+       struct hal_reg_capabilities hal_reg_capabilities;
+       __le32 sys_cap_info;
+       __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
+       /*
+        * Max beacon and Probe Response IE offload size
+        * (includes optional P2P IEs)
+        */
+       __le32 max_bcn_ie_size;
+       /*
+        * request to host to allocate a chuck of memory and pss it down to FW
+        * via WM_INIT. FW uses this as FW extesnsion memory for saving its
+        * data structures. Only valid for low latency interfaces like PCIE
+        * where FW can access this memory directly (or) by DMA.
+        */
+       __le32 num_mem_reqs;
+       struct wlan_host_mem_req mem_reqs[1];
+} __packed;
+
+/*
+ * status consists of  upper 16 bits fo int status and lower 16 bits of
+ * module ID that retuned status
+ */
+#define WLAN_INIT_STATUS_SUCCESS   0x0
+#define WLAN_GET_INIT_STATUS_REASON(status)    ((status) & 0xffff)
+#define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff)
+
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
+#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
+
+struct wmi_ready_event {
+       __le32 sw_version;
+       __le32 abi_version;
+       struct wmi_mac_addr mac_addr;
+       __le32 status;
+} __packed;
+
+struct wmi_resource_config {
+       /* number of virtual devices (VAPs) to support */
+       __le32 num_vdevs;
+
+       /* number of peer nodes to support */
+       __le32 num_peers;
+
+       /*
+        * In offload mode target supports features like WOW, chatter and
+        * other protocol offloads. In order to support them some
+        * functionalities like reorder buffering, PN checking need to be
+        * done in target. This determines maximum number of peers suported
+        * by target in offload mode
+        */
+       __le32 num_offload_peers;
+
+       /* For target-based RX reordering */
+       __le32 num_offload_reorder_bufs;
+
+       /* number of keys per peer */
+       __le32 num_peer_keys;
+
+       /* total number of TX/RX data TIDs */
+       __le32 num_tids;
+
+       /*
+        * max skid for resolving hash collisions
+        *
+        *   The address search table is sparse, so that if two MAC addresses
+        *   result in the same hash value, the second of these conflicting
+        *   entries can slide to the next index in the address search table,
+        *   and use it, if it is unoccupied.  This ast_skid_limit parameter
+        *   specifies the upper bound on how many subsequent indices to search
+        *   over to find an unoccupied space.
+        */
+       __le32 ast_skid_limit;
+
+       /*
+        * the nominal chain mask for transmit
+        *
+        *   The chain mask may be modified dynamically, e.g. to operate AP
+        *   tx with a reduced number of chains if no clients are associated.
+        *   This configuration parameter specifies the nominal chain-mask that
+        *   should be used when not operating with a reduced set of tx chains.
+        */
+       __le32 tx_chain_mask;
+
+       /*
+        * the nominal chain mask for receive
+        *
+        *   The chain mask may be modified dynamically, e.g. for a client
+        *   to use a reduced number of chains for receive if the traffic to
+        *   the client is low enough that it doesn't require downlink MIMO
+        *   or antenna diversity.
+        *   This configuration parameter specifies the nominal chain-mask that
+        *   should be used when not operating with a reduced set of rx chains.
+        */
+       __le32 rx_chain_mask;
+
+       /*
+        * what rx reorder timeout (ms) to use for the AC
+        *
+        *   Each WMM access class (voice, video, best-effort, background) will
+        *   have its own timeout value to dictate how long to wait for missing
+        *   rx MPDUs to arrive before flushing subsequent MPDUs that have
+        *   already been received.
+        *   This parameter specifies the timeout in milliseconds for each
+        *   class.
+        */
+       __le32 rx_timeout_pri_vi;
+       __le32 rx_timeout_pri_vo;
+       __le32 rx_timeout_pri_be;
+       __le32 rx_timeout_pri_bk;
+
+       /*
+        * what mode the rx should decap packets to
+        *
+        *   MAC can decap to RAW (no decap), native wifi or Ethernet types
+        *   THis setting also determines the default TX behavior, however TX
+        *   behavior can be modified on a per VAP basis during VAP init
+        */
+       __le32 rx_decap_mode;
+
+       /* what is the maximum scan requests than can be queued */
+       __le32 scan_max_pending_reqs;
+
+       /* maximum VDEV that could use BMISS offload */
+       __le32 bmiss_offload_max_vdev;
+
+       /* maximum VDEV that could use offload roaming */
+       __le32 roam_offload_max_vdev;
+
+       /* maximum AP profiles that would push to offload roaming */
+       __le32 roam_offload_max_ap_profiles;
+
+       /*
+        * how many groups to use for mcast->ucast conversion
+        *
+        *   The target's WAL maintains a table to hold information regarding
+        *   which peers belong to a given multicast group, so that if
+        *   multicast->unicast conversion is enabled, the target can convert
+        *   multicast tx frames to a series of unicast tx frames, to each
+        *   peer within the multicast group.
+            This num_mcast_groups configuration parameter tells the target how
+        *   many multicast groups to provide storage for within its multicast
+        *   group membership table.
+        */
+       __le32 num_mcast_groups;
+
+       /*
+        * size to alloc for the mcast membership table
+        *
+        *   This num_mcast_table_elems configuration parameter tells the
+        *   target how many peer elements it needs to provide storage for in
+        *   its multicast group membership table.
+        *   These multicast group membership table elements are shared by the
+        *   multicast groups stored within the table.
+        */
+       __le32 num_mcast_table_elems;
+
+       /*
+        * whether/how to do multicast->unicast conversion
+        *
+        *   This configuration parameter specifies whether the target should
+        *   perform multicast --> unicast conversion on transmit, and if so,
+        *   what to do if it finds no entries in its multicast group
+        *   membership table for the multicast IP address in the tx frame.
+        *   Configuration value:
+        *   0 -> Do not perform multicast to unicast conversion.
+        *   1 -> Convert multicast frames to unicast, if the IP multicast
+        *        address from the tx frame is found in the multicast group
+        *        membership table.  If the IP multicast address is not found,
+        *        drop the frame.
+        *   2 -> Convert multicast frames to unicast, if the IP multicast
+        *        address from the tx frame is found in the multicast group
+        *        membership table.  If the IP multicast address is not found,
+        *        transmit the frame as multicast.
+        */
+       __le32 mcast2ucast_mode;
+
+       /*
+        * how much memory to allocate for a tx PPDU dbg log
+        *
+        *   This parameter controls how much memory the target will allocate
+        *   to store a log of tx PPDU meta-information (how large the PPDU
+        *   was, when it was sent, whether it was successful, etc.)
+        */
+       __le32 tx_dbg_log_size;
+
+       /* how many AST entries to be allocated for WDS */
+       __le32 num_wds_entries;
+
+       /*
+        * MAC DMA burst size, e.g., For target PCI limit can be
+        * 0 -default, 1 256B
+        */
+       __le32 dma_burst_size;
+
+       /*
+        * Fixed delimiters to be inserted after every MPDU to
+        * account for interface latency to avoid underrun.
+        */
+       __le32 mac_aggr_delim;
+
+       /*
+        *   determine whether target is responsible for detecting duplicate
+        *   non-aggregate MPDU and timing out stale fragments.
+        *
+        *   A-MPDU reordering is always performed on the target.
+        *
+        *   0: target responsible for frag timeout and dup checking
+        *   1: host responsible for frag timeout and dup checking
+        */
+       __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+       /*
+        * Configuration for VoW :
+        * No of Video Nodes to be supported
+        * and Max no of descriptors for each Video link (node).
+        */
+       __le32 vow_config;
+
+       /* maximum VDEV that could use GTK offload */
+       __le32 gtk_offload_max_vdev;
+
+       /* Number of msdu descriptors target should use */
+       __le32 num_msdu_desc;
+
+       /*
+        * Max. number of Tx fragments per MSDU
+        *  This parameter controls the max number of Tx fragments per MSDU.
+        *  This is sent by the target as part of the WMI_SERVICE_READY event
+        *  and is overriden by the OS shim as required.
+        */
+       __le32 max_frag_entries;
+} __packed;
+
+/* strucutre describing host memory chunk. */
+struct host_memory_chunk {
+       /* id of the request that is passed up in service ready */
+       __le32 req_id;
+       /* the physical address the memory chunk */
+       __le32 ptr;
+       /* size of the chunk */
+       __le32 size;
+} __packed;
+
+struct wmi_init_cmd {
+       struct wmi_resource_config resource_config;
+       __le32 num_host_mem_chunks;
+
+       /*
+        * variable number of host memory chunks.
+        * This should be the last element in the structure
+        */
+       struct host_memory_chunk host_mem_chunks[1];
+} __packed;
+
+/* TLV for channel list */
+struct wmi_chan_list {
+       __le32 tag; /* WMI_CHAN_LIST_TAG */
+       __le32 num_chan;
+       __le32 channel_list[0];
+} __packed;
+
+struct wmi_bssid_list {
+       __le32 tag; /* WMI_BSSID_LIST_TAG */
+       __le32 num_bssid;
+       struct wmi_mac_addr bssid_list[0];
+} __packed;
+
+struct wmi_ie_data {
+       __le32 tag; /* WMI_IE_TAG */
+       __le32 ie_len;
+       u8 ie_data[0];
+} __packed;
+
+struct wmi_ssid {
+       __le32 ssid_len;
+       u8 ssid[32];
+} __packed;
+
+struct wmi_ssid_list {
+       __le32 tag; /* WMI_SSID_LIST_TAG */
+       __le32 num_ssids;
+       struct wmi_ssid ssids[0];
+} __packed;
+
+/* prefix used by scan requestor ids on the host */
+#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
+
+/* prefix used by scan request ids generated on the host */
+/* host cycles through the lower 12 bits to generate ids */
+#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
+
+#define WLAN_SCAN_PARAMS_MAX_SSID    16
+#define WLAN_SCAN_PARAMS_MAX_BSSID   4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN  256
+
+/* Scan priority numbers must be sequential, starting with 0 */
+enum wmi_scan_priority {
+       WMI_SCAN_PRIORITY_VERY_LOW = 0,
+       WMI_SCAN_PRIORITY_LOW,
+       WMI_SCAN_PRIORITY_MEDIUM,
+       WMI_SCAN_PRIORITY_HIGH,
+       WMI_SCAN_PRIORITY_VERY_HIGH,
+       WMI_SCAN_PRIORITY_COUNT   /* number of priorities supported */
+};
+
+struct wmi_start_scan_cmd {
+       /* Scan ID */
+       __le32 scan_id;
+       /* Scan requestor ID */
+       __le32 scan_req_id;
+       /* VDEV id(interface) that is requesting scan */
+       __le32 vdev_id;
+       /* Scan Priority, input to scan scheduler */
+       __le32 scan_priority;
+       /* Scan events subscription */
+       __le32 notify_scan_events;
+       /* dwell time in msec on active channels */
+       __le32 dwell_time_active;
+       /* dwell time in msec on passive channels */
+       __le32 dwell_time_passive;
+       /*
+        * min time in msec on the BSS channel,only valid if atleast one
+        * VDEV is active
+        */
+       __le32 min_rest_time;
+       /*
+        * max rest time in msec on the BSS channel,only valid if at least
+        * one VDEV is active
+        */
+       /*
+        * the scanner will rest on the bss channel at least min_rest_time
+        * after min_rest_time the scanner will start checking for tx/rx
+        * activity on all VDEVs. if there is no activity the scanner will
+        * switch to off channel. if there is activity the scanner will let
+        * the radio on the bss channel until max_rest_time expires.at
+        * max_rest_time scanner will switch to off channel irrespective of
+        * activity. activity is determined by the idle_time parameter.
+        */
+       __le32 max_rest_time;
+       /*
+        * time before sending next set of probe requests.
+        * The scanner keeps repeating probe requests transmission with
+        * period specified by repeat_probe_time.
+        * The number of probe requests specified depends on the ssid_list
+        * and bssid_list
+        */
+       __le32 repeat_probe_time;
+       /* time in msec between 2 consequetive probe requests with in a set. */
+       __le32 probe_spacing_time;
+       /*
+        * data inactivity time in msec on bss channel that will be used by
+        * scanner for measuring the inactivity.
+        */
+       __le32 idle_time;
+       /* maximum time in msec allowed for scan  */
+       __le32 max_scan_time;
+       /*
+        * delay in msec before sending first probe request after switching
+        * to a channel
+        */
+       __le32 probe_delay;
+       /* Scan control flags */
+       __le32 scan_ctrl_flags;
+
+       /* Burst duration time in msecs */
+       __le32 burst_duration;
+       /*
+        * TLV (tag length value )  paramerters follow the scan_cmd structure.
+        * TLV can contain channel list, bssid list, ssid list and
+        * ie. the TLV tags are defined above;
+        */
+} __packed;
+
+struct wmi_ssid_arg {
+       int len;
+       const u8 *ssid;
+};
+
+struct wmi_bssid_arg {
+       const u8 *bssid;
+};
+
+struct wmi_start_scan_arg {
+       u32 scan_id;
+       u32 scan_req_id;
+       u32 vdev_id;
+       u32 scan_priority;
+       u32 notify_scan_events;
+       u32 dwell_time_active;
+       u32 dwell_time_passive;
+       u32 min_rest_time;
+       u32 max_rest_time;
+       u32 repeat_probe_time;
+       u32 probe_spacing_time;
+       u32 idle_time;
+       u32 max_scan_time;
+       u32 probe_delay;
+       u32 scan_ctrl_flags;
+
+       u32 ie_len;
+       u32 n_channels;
+       u32 n_ssids;
+       u32 n_bssids;
+
+       u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
+       u32 channels[64];
+       struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
+       struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
+};
+
+/* scan control flags */
+
+/* passively scan all channels including active channels */
+#define WMI_SCAN_FLAG_PASSIVE        0x1
+/* add wild card ssid probe request even though ssid_list is specified. */
+#define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2
+/* add cck rates to rates/xrate ie for the generated probe request */
+#define WMI_SCAN_ADD_CCK_RATES 0x4
+/* add ofdm rates to rates/xrate ie for the generated probe request */
+#define WMI_SCAN_ADD_OFDM_RATES 0x8
+/* To enable indication of Chan load and Noise floor to host */
+#define WMI_SCAN_CHAN_STAT_EVENT 0x10
+/* Filter Probe request frames  */
+#define WMI_SCAN_FILTER_PROBE_REQ 0x20
+/* When set, DFS channels will not be scanned */
+#define WMI_SCAN_BYPASS_DFS_CHN 0x40
+/* Different FW scan engine may choose to bail out on errors.
+ * Allow the driver to have influence over that. */
+#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
+
+/* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */
+#define WMI_SCAN_CLASS_MASK 0xFF000000
+
+
+enum wmi_stop_scan_type {
+       WMI_SCAN_STOP_ONE       = 0x00000000, /* stop by scan_id */
+       WMI_SCAN_STOP_VDEV_ALL  = 0x01000000, /* stop by vdev_id */
+       WMI_SCAN_STOP_ALL       = 0x04000000, /* stop all scans */
+};
+
+struct wmi_stop_scan_cmd {
+       __le32 scan_req_id;
+       __le32 scan_id;
+       __le32 req_type;
+       __le32 vdev_id;
+} __packed;
+
+struct wmi_stop_scan_arg {
+       u32 req_id;
+       enum wmi_stop_scan_type req_type;
+       union {
+               u32 scan_id;
+               u32 vdev_id;
+       } u;
+};
+
+struct wmi_scan_chan_list_cmd {
+       __le32 num_scan_chans;
+       struct wmi_channel chan_info[0];
+} __packed;
+
+struct wmi_scan_chan_list_arg {
+       u32 n_channels;
+       struct wmi_channel_arg *channels;
+};
+
+enum wmi_bss_filter {
+       WMI_BSS_FILTER_NONE = 0,        /* no beacons forwarded */
+       WMI_BSS_FILTER_ALL,             /* all beacons forwarded */
+       WMI_BSS_FILTER_PROFILE,         /* only beacons matching profile */
+       WMI_BSS_FILTER_ALL_BUT_PROFILE, /* all but beacons matching profile */
+       WMI_BSS_FILTER_CURRENT_BSS,     /* only beacons matching current BSS */
+       WMI_BSS_FILTER_ALL_BUT_BSS,     /* all but beacons matching BSS */
+       WMI_BSS_FILTER_PROBED_SSID,     /* beacons matching probed ssid */
+       WMI_BSS_FILTER_LAST_BSS,        /* marker only */
+};
+
+enum wmi_scan_event_type {
+       WMI_SCAN_EVENT_STARTED         = 0x1,
+       WMI_SCAN_EVENT_COMPLETED       = 0x2,
+       WMI_SCAN_EVENT_BSS_CHANNEL     = 0x4,
+       WMI_SCAN_EVENT_FOREIGN_CHANNEL = 0x8,
+       WMI_SCAN_EVENT_DEQUEUED        = 0x10,
+       WMI_SCAN_EVENT_PREEMPTED       = 0x20, /* possibly by high-prio scan */
+       WMI_SCAN_EVENT_START_FAILED    = 0x40,
+       WMI_SCAN_EVENT_RESTARTED       = 0x80,
+       WMI_SCAN_EVENT_MAX             = 0x8000
+};
+
+enum wmi_scan_completion_reason {
+       WMI_SCAN_REASON_COMPLETED,
+       WMI_SCAN_REASON_CANCELLED,
+       WMI_SCAN_REASON_PREEMPTED,
+       WMI_SCAN_REASON_TIMEDOUT,
+       WMI_SCAN_REASON_MAX,
+};
+
+struct wmi_scan_event {
+       __le32 event_type; /* %WMI_SCAN_EVENT_ */
+       __le32 reason; /* %WMI_SCAN_REASON_ */
+       __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+       __le32 scan_req_id;
+       __le32 scan_id;
+       __le32 vdev_id;
+} __packed;
+
+/*
+ * This defines how much headroom is kept in the
+ * receive frame between the descriptor and the
+ * payload, in order for the WMI PHY error and
+ * management handler to insert header contents.
+ *
+ * This is in bytes.
+ */
+#define WMI_MGMT_RX_HDR_HEADROOM    52
+
+/*
+ * This event will be used for sending scan results
+ * as well as rx mgmt frames to the host. The rx buffer
+ * will be sent as part of this WMI event. It would be a
+ * good idea to pass all the fields in the RX status
+ * descriptor up to the host.
+ */
+struct wmi_mgmt_rx_hdr {
+       __le32 channel;
+       __le32 snr;
+       __le32 rate;
+       __le32 phy_mode;
+       __le32 buf_len;
+       __le32 status; /* %WMI_RX_STATUS_ */
+} __packed;
+
+struct wmi_mgmt_rx_event {
+       struct wmi_mgmt_rx_hdr hdr;
+       u8 buf[0];
+} __packed;
+
+#define WMI_RX_STATUS_OK                       0x00
+#define WMI_RX_STATUS_ERR_CRC                  0x01
+#define WMI_RX_STATUS_ERR_DECRYPT              0x08
+#define WMI_RX_STATUS_ERR_MIC                  0x10
+#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS       0x20
+
+struct wmi_single_phyerr_rx_hdr {
+       /* TSF timestamp */
+       __le32 tsf_timestamp;
+
+       /*
+        * Current freq1, freq2
+        *
+        * [7:0]:    freq1[lo]
+        * [15:8] :   freq1[hi]
+        * [23:16]:   freq2[lo]
+        * [31:24]:   freq2[hi]
+        */
+       __le16 freq1;
+       __le16 freq2;
+
+       /*
+        * Combined RSSI over all chains and channel width for this PHY error
+        *
+        * [7:0]: RSSI combined
+        * [15:8]: Channel width (MHz)
+        * [23:16]: PHY error code
+        * [24:16]: reserved (future use)
+        */
+       u8 rssi_combined;
+       u8 chan_width_mhz;
+       u8 phy_err_code;
+       u8 rsvd0;
+
+       /*
+        * RSSI on chain 0 through 3
+        *
+        * This is formatted the same as the PPDU_START RX descriptor
+        * field:
+        *
+        * [7:0]:   pri20
+        * [15:8]:  sec20
+        * [23:16]: sec40
+        * [31:24]: sec80
+        */
+
+       __le32 rssi_chain0;
+       __le32 rssi_chain1;
+       __le32 rssi_chain2;
+       __le32 rssi_chain3;
+
+       /*
+        * Last calibrated NF value for chain 0 through 3
+        *
+        * nf_list_1:
+        *
+        * + [15:0] - chain 0
+        * + [31:16] - chain 1
+        *
+        * nf_list_2:
+        *
+        * + [15:0] - chain 2
+        * + [31:16] - chain 3
+        */
+       __le32 nf_list_1;
+       __le32 nf_list_2;
+
+
+       /* Length of the frame */
+       __le32 buf_len;
+} __packed;
+
+struct wmi_single_phyerr_rx_event {
+       /* Phy error event header */
+       struct wmi_single_phyerr_rx_hdr hdr;
+       /* frame buffer */
+       u8 bufp[0];
+} __packed;
+
+struct wmi_comb_phyerr_rx_hdr {
+       /* Phy error phy error count */
+       __le32 num_phyerr_events;
+       __le32 tsf_l32;
+       __le32 tsf_u32;
+} __packed;
+
+struct wmi_comb_phyerr_rx_event {
+       /* Phy error phy error count */
+       struct wmi_comb_phyerr_rx_hdr hdr;
+       /*
+        * frame buffer - contains multiple payloads in the order:
+        *                    header - payload, header - payload...
+        *  (The header is of type: wmi_single_phyerr_rx_hdr)
+        */
+       u8 bufp[0];
+} __packed;
+
+struct wmi_mgmt_tx_hdr {
+       __le32 vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       __le32 tx_rate;
+       __le32 tx_power;
+       __le32 buf_len;
+} __packed;
+
+struct wmi_mgmt_tx_cmd {
+       struct wmi_mgmt_tx_hdr hdr;
+       u8 buf[0];
+} __packed;
+
+struct wmi_echo_event {
+       __le32 value;
+} __packed;
+
+struct wmi_echo_cmd {
+       __le32 value;
+} __packed;
+
+
+struct wmi_pdev_set_regdomain_cmd {
+       __le32 reg_domain;
+       __le32 reg_domain_2G;
+       __le32 reg_domain_5G;
+       __le32 conformance_test_limit_2G;
+       __le32 conformance_test_limit_5G;
+} __packed;
+
+/* Command to set/unset chip in quiet mode */
+struct wmi_pdev_set_quiet_cmd {
+       /* period in TUs */
+       __le32 period;
+
+       /* duration in TUs */
+       __le32 duration;
+
+       /* offset in TUs */
+       __le32 next_start;
+
+       /* enable/disable */
+       __le32 enabled;
+} __packed;
+
+
+/*
+ * 802.11g protection mode.
+ */
+enum ath10k_protmode {
+       ATH10K_PROT_NONE     = 0,    /* no protection */
+       ATH10K_PROT_CTSONLY  = 1,    /* CTS to self */
+       ATH10K_PROT_RTSCTS   = 2,    /* RTS-CTS */
+};
+
+enum wmi_beacon_gen_mode {
+       WMI_BEACON_STAGGERED_MODE = 0,
+       WMI_BEACON_BURST_MODE = 1
+};
+
+enum wmi_csa_event_ies_present_flag {
+       WMI_CSA_IE_PRESENT = 0x00000001,
+       WMI_XCSA_IE_PRESENT = 0x00000002,
+       WMI_WBW_IE_PRESENT = 0x00000004,
+       WMI_CSWARP_IE_PRESENT = 0x00000008,
+};
+
+/* wmi CSA receive event from beacon frame */
+struct wmi_csa_event {
+       __le32 i_fc_dur;
+       /* Bit 0-15: FC */
+       /* Bit 16-31: DUR */
+       struct wmi_mac_addr i_addr1;
+       struct wmi_mac_addr i_addr2;
+       __le32 csa_ie[2];
+       __le32 xcsa_ie[2];
+       __le32 wb_ie[2];
+       __le32 cswarp_ie;
+       __le32 ies_present_flag; /* wmi_csa_event_ies_present_flag */
+} __packed;
+
+/* the definition of different PDEV parameters */
+#define PDEV_DEFAULT_STATS_UPDATE_PERIOD    500
+#define VDEV_DEFAULT_STATS_UPDATE_PERIOD    500
+#define PEER_DEFAULT_STATS_UPDATE_PERIOD    500
+
+enum wmi_pdev_param {
+       /* TX chian mask */
+       WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+       /* RX chian mask */
+       WMI_PDEV_PARAM_RX_CHAIN_MASK,
+       /* TX power limit for 2G Radio */
+       WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+       /* TX power limit for 5G Radio */
+       WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+       /* TX power scale */
+       WMI_PDEV_PARAM_TXPOWER_SCALE,
+       /* Beacon generation mode . 0: host, 1: target   */
+       WMI_PDEV_PARAM_BEACON_GEN_MODE,
+       /* Beacon generation mode . 0: staggered 1: bursted   */
+       WMI_PDEV_PARAM_BEACON_TX_MODE,
+       /*
+        * Resource manager off chan mode .
+        * 0: turn off off chan mode. 1: turn on offchan mode
+        */
+       WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+       /*
+        * Protection mode:
+        * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
+        */
+       WMI_PDEV_PARAM_PROTECTION_MODE,
+       /* Dynamic bandwidth 0: disable 1: enable */
+       WMI_PDEV_PARAM_DYNAMIC_BW,
+       /* Non aggregrate/ 11g sw retry threshold.0-disable */
+       WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+       /* aggregrate sw retry threshold. 0-disable*/
+       WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+       /* Station kickout threshold (non of consecutive failures).0-disable */
+       WMI_PDEV_PARAM_STA_KICKOUT_TH,
+       /* Aggerate size scaling configuration per AC */
+       WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+       /* LTR enable */
+       WMI_PDEV_PARAM_LTR_ENABLE,
+       /* LTR latency for BE, in us */
+       WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+       /* LTR latency for BK, in us */
+       WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+       /* LTR latency for VI, in us */
+       WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+       /* LTR latency for VO, in us  */
+       WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+       /* LTR AC latency timeout, in ms */
+       WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+       /* LTR platform latency override, in us */
+       WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+       /* LTR-RX override, in us */
+       WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+       /* Tx activity timeout for LTR, in us */
+       WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+       /* L1SS state machine enable */
+       WMI_PDEV_PARAM_L1SS_ENABLE,
+       /* Deep sleep state machine enable */
+       WMI_PDEV_PARAM_DSLEEP_ENABLE,
+       /* RX buffering flush enable */
+       WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+       /* RX buffering matermark */
+       WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+       /* RX buffering timeout enable */
+       WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+       /* RX buffering timeout value */
+       WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+       /* pdev level stats update period in ms */
+       WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+       /* vdev level stats update period in ms */
+       WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+       /* peer level stats update period in ms */
+       WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+       /* beacon filter status update period */
+       WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+       /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
+       WMI_PDEV_PARAM_PMF_QOS,
+       /* Access category on which ARP frames are sent */
+       WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+       /* DCS configuration */
+       WMI_PDEV_PARAM_DCS,
+       /* Enable/Disable ANI on target */
+       WMI_PDEV_PARAM_ANI_ENABLE,
+       /* configure the ANI polling period */
+       WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+       /* configure the ANI listening period */
+       WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+       /* configure OFDM immunity level */
+       WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+       /* configure CCK immunity level */
+       WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+       /* Enable/Disable CDD for 1x1 STAs in rate control module */
+       WMI_PDEV_PARAM_DYNTXCHAIN,
+       /* Enable/Disable proxy STA */
+       WMI_PDEV_PARAM_PROXY_STA,
+       /* Enable/Disable low power state when all VDEVs are inactive/idle. */
+       WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+       /* Enable/Disable power gating sleep */
+       WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+};
+
+struct wmi_pdev_set_param_cmd {
+       __le32 param_id;
+       __le32 param_value;
+} __packed;
+
+struct wmi_pdev_get_tpc_config_cmd {
+       /* parameter   */
+       __le32 param;
+} __packed;
+
+#define WMI_TPC_RATE_MAX               160
+#define WMI_TPC_TX_N_CHAIN             4
+
+enum wmi_tpc_config_event_flag {
+       WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD     = 0x1,
+       WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC    = 0x2,
+       WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF    = 0x4,
+};
+
+struct wmi_pdev_tpc_config_event {
+       __le32 reg_domain;
+       __le32 chan_freq;
+       __le32 phy_mode;
+       __le32 twice_antenna_reduction;
+       __le32 twice_max_rd_power;
+       s32 twice_antenna_gain;
+       __le32 power_limit;
+       __le32 rate_max;
+       __le32 num_tx_chain;
+       __le32 ctl;
+       __le32 flags;
+       s8 max_reg_allow_pow[WMI_TPC_TX_N_CHAIN];
+       s8 max_reg_allow_pow_agcdd[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+       s8 max_reg_allow_pow_agstbc[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+       s8 max_reg_allow_pow_agtxbf[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+       u8 rates_array[WMI_TPC_RATE_MAX];
+} __packed;
+
+/* Transmit power scale factor. */
+enum wmi_tp_scale {
+       WMI_TP_SCALE_MAX    = 0,        /* no scaling (default) */
+       WMI_TP_SCALE_50     = 1,        /* 50% of max (-3 dBm) */
+       WMI_TP_SCALE_25     = 2,        /* 25% of max (-6 dBm) */
+       WMI_TP_SCALE_12     = 3,        /* 12% of max (-9 dBm) */
+       WMI_TP_SCALE_MIN    = 4,        /* min, but still on   */
+       WMI_TP_SCALE_SIZE   = 5,        /* max num of enum     */
+};
+
+struct wmi_set_channel_cmd {
+       /* channel (only frequency and mode info are used) */
+       struct wmi_channel chan;
+} __packed;
+
+struct wmi_pdev_chanlist_update_event {
+       /* number of channels */
+       __le32 num_chan;
+       /* array of channels */
+       struct wmi_channel channel_list[1];
+} __packed;
+
+#define WMI_MAX_DEBUG_MESG (sizeof(u32) * 32)
+
+struct wmi_debug_mesg_event {
+       /* message buffer, NULL terminated */
+       char bufp[WMI_MAX_DEBUG_MESG];
+} __packed;
+
+enum {
+       /* P2P device */
+       VDEV_SUBTYPE_P2PDEV = 0,
+       /* P2P client */
+       VDEV_SUBTYPE_P2PCLI,
+       /* P2P GO */
+       VDEV_SUBTYPE_P2PGO,
+       /* BT3.0 HS */
+       VDEV_SUBTYPE_BT,
+};
+
+struct wmi_pdev_set_channel_cmd {
+       /* idnore power , only use flags , mode and freq */
+       struct wmi_channel chan;
+} __packed;
+
+/* Customize the DSCP (bit) to TID (0-7) mapping for QOS */
+#define WMI_DSCP_MAP_MAX    (64)
+struct wmi_pdev_set_dscp_tid_map_cmd {
+       /* map indicating DSCP to TID conversion */
+       __le32 dscp_to_tid_map[WMI_DSCP_MAP_MAX];
+} __packed;
+
+enum mcast_bcast_rate_id {
+       WMI_SET_MCAST_RATE,
+       WMI_SET_BCAST_RATE
+};
+
+struct mcast_bcast_rate {
+       enum mcast_bcast_rate_id rate_id;
+       __le32 rate;
+} __packed;
+
+struct wmi_wmm_params {
+       __le32 cwmin;
+       __le32 cwmax;
+       __le32 aifs;
+       __le32 txop;
+       __le32 acm;
+       __le32 no_ack;
+} __packed;
+
+struct wmi_pdev_set_wmm_params {
+       struct wmi_wmm_params ac_be;
+       struct wmi_wmm_params ac_bk;
+       struct wmi_wmm_params ac_vi;
+       struct wmi_wmm_params ac_vo;
+} __packed;
+
+struct wmi_wmm_params_arg {
+       u32 cwmin;
+       u32 cwmax;
+       u32 aifs;
+       u32 txop;
+       u32 acm;
+       u32 no_ack;
+};
+
+struct wmi_pdev_set_wmm_params_arg {
+       struct wmi_wmm_params_arg ac_be;
+       struct wmi_wmm_params_arg ac_bk;
+       struct wmi_wmm_params_arg ac_vi;
+       struct wmi_wmm_params_arg ac_vo;
+};
+
+struct wal_dbg_tx_stats {
+       /* Num HTT cookies queued to dispatch list */
+       __le32 comp_queued;
+
+       /* Num HTT cookies dispatched */
+       __le32 comp_delivered;
+
+       /* Num MSDU queued to WAL */
+       __le32 msdu_enqued;
+
+       /* Num MPDU queue to WAL */
+       __le32 mpdu_enqued;
+
+       /* Num MSDUs dropped by WMM limit */
+       __le32 wmm_drop;
+
+       /* Num Local frames queued */
+       __le32 local_enqued;
+
+       /* Num Local frames done */
+       __le32 local_freed;
+
+       /* Num queued to HW */
+       __le32 hw_queued;
+
+       /* Num PPDU reaped from HW */
+       __le32 hw_reaped;
+
+       /* Num underruns */
+       __le32 underrun;
+
+       /* Num PPDUs cleaned up in TX abort */
+       __le32 tx_abort;
+
+       /* Num MPDUs requed by SW */
+       __le32 mpdus_requed;
+
+       /* excessive retries */
+       __le32 tx_ko;
+
+       /* data hw rate code */
+       __le32 data_rc;
+
+       /* Scheduler self triggers */
+       __le32 self_triggers;
+
+       /* frames dropped due to excessive sw retries */
+       __le32 sw_retry_failure;
+
+       /* illegal rate phy errors  */
+       __le32 illgl_rate_phy_err;
+
+       /* wal pdev continous xretry */
+       __le32 pdev_cont_xretry;
+
+       /* wal pdev continous xretry */
+       __le32 pdev_tx_timeout;
+
+       /* wal pdev resets  */
+       __le32 pdev_resets;
+
+       __le32 phy_underrun;
+
+       /* MPDU is more than txop limit */
+       __le32 txop_ovf;
+} __packed;
+
+struct wal_dbg_rx_stats {
+       /* Cnts any change in ring routing mid-ppdu */
+       __le32 mid_ppdu_route_change;
+
+       /* Total number of statuses processed */
+       __le32 status_rcvd;
+
+       /* Extra frags on rings 0-3 */
+       __le32 r0_frags;
+       __le32 r1_frags;
+       __le32 r2_frags;
+       __le32 r3_frags;
+
+       /* MSDUs / MPDUs delivered to HTT */
+       __le32 htt_msdus;
+       __le32 htt_mpdus;
+
+       /* MSDUs / MPDUs delivered to local stack */
+       __le32 loc_msdus;
+       __le32 loc_mpdus;
+
+       /* AMSDUs that have more MSDUs than the status ring size */
+       __le32 oversize_amsdu;
+
+       /* Number of PHY errors */
+       __le32 phy_errs;
+
+       /* Number of PHY errors drops */
+       __le32 phy_err_drop;
+
+       /* Number of mpdu errors - FCS, MIC, ENC etc. */
+       __le32 mpdu_errs;
+} __packed;
+
+struct wal_dbg_peer_stats {
+       /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
+       __le32 dummy;
+} __packed;
+
+struct wal_dbg_stats {
+       struct wal_dbg_tx_stats tx;
+       struct wal_dbg_rx_stats rx;
+       struct wal_dbg_peer_stats peer;
+} __packed;
+
+enum wmi_stats_id {
+       WMI_REQUEST_PEER_STAT   = 0x01,
+       WMI_REQUEST_AP_STAT     = 0x02
+};
+
+struct wmi_request_stats_cmd {
+       __le32 stats_id;
+
+       /*
+        * Space to add parameters like
+        * peer mac addr
+        */
+} __packed;
+
+/* Suspend option */
+enum {
+       /* suspend */
+       WMI_PDEV_SUSPEND,
+
+       /* suspend and disable all interrupts */
+       WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+};
+
+struct wmi_pdev_suspend_cmd {
+       /* suspend option sent to target */
+       __le32 suspend_opt;
+} __packed;
+
+struct wmi_stats_event {
+       __le32 stats_id; /* %WMI_REQUEST_ */
+       /*
+        * number of pdev stats event structures
+        * (wmi_pdev_stats) 0 or 1
+        */
+       __le32 num_pdev_stats;
+       /*
+        * number of vdev stats event structures
+        * (wmi_vdev_stats) 0 or max vdevs
+        */
+       __le32 num_vdev_stats;
+       /*
+        * number of peer stats event structures
+        * (wmi_peer_stats) 0 or max peers
+        */
+       __le32 num_peer_stats;
+       __le32 num_bcnflt_stats;
+       /*
+        * followed by
+        *   num_pdev_stats * size of(struct wmi_pdev_stats)
+        *   num_vdev_stats * size of(struct wmi_vdev_stats)
+        *   num_peer_stats * size of(struct wmi_peer_stats)
+        *
+        *  By having a zero sized array, the pointer to data area
+        *  becomes available without increasing the struct size
+        */
+       u8 data[0];
+} __packed;
+
+/*
+ * PDEV statistics
+ * TODO: add all PDEV stats here
+ */
+struct wmi_pdev_stats {
+       __le32 chan_nf;        /* Channel noise floor */
+       __le32 tx_frame_count; /* TX frame count */
+       __le32 rx_frame_count; /* RX frame count */
+       __le32 rx_clear_count; /* rx clear count */
+       __le32 cycle_count;    /* cycle count */
+       __le32 phy_err_count;  /* Phy error count */
+       __le32 chan_tx_pwr;    /* channel tx power */
+       struct wal_dbg_stats wal; /* WAL dbg stats */
+} __packed;
+
+/*
+ * VDEV statistics
+ * TODO: add all VDEV stats here
+ */
+struct wmi_vdev_stats {
+       __le32 vdev_id;
+} __packed;
+
+/*
+ * peer statistics.
+ * TODO: add more stats
+ */
+struct wmi_peer_stats {
+       struct wmi_mac_addr peer_macaddr;
+       __le32 peer_rssi;
+       __le32 peer_tx_rate;
+} __packed;
+
+struct wmi_vdev_create_cmd {
+       __le32 vdev_id;
+       __le32 vdev_type;
+       __le32 vdev_subtype;
+       struct wmi_mac_addr vdev_macaddr;
+} __packed;
+
+enum wmi_vdev_type {
+       WMI_VDEV_TYPE_AP      = 1,
+       WMI_VDEV_TYPE_STA     = 2,
+       WMI_VDEV_TYPE_IBSS    = 3,
+       WMI_VDEV_TYPE_MONITOR = 4,
+};
+
+enum wmi_vdev_subtype {
+       WMI_VDEV_SUBTYPE_NONE       = 0,
+       WMI_VDEV_SUBTYPE_P2P_DEVICE = 1,
+       WMI_VDEV_SUBTYPE_P2P_CLIENT = 2,
+       WMI_VDEV_SUBTYPE_P2P_GO     = 3,
+};
+
+/* values for vdev_subtype */
+
+/* values for vdev_start_request flags */
+/*
+ * Indicates that AP VDEV uses hidden ssid. only valid for
+ *  AP/GO */
+#define WMI_VDEV_START_HIDDEN_SSID  (1<<0)
+/*
+ * Indicates if robust management frame/management frame
+ *  protection is enabled. For GO/AP vdevs, it indicates that
+ *  it may support station/client associations with RMF enabled.
+ *  For STA/client vdevs, it indicates that sta will
+ *  associate with AP with RMF enabled. */
+#define WMI_VDEV_START_PMF_ENABLED  (1<<1)
+
+struct wmi_p2p_noa_descriptor {
+       __le32 type_count; /* 255: continuous schedule, 0: reserved */
+       __le32 duration;  /* Absent period duration in micro seconds */
+       __le32 interval;   /* Absent period interval in micro seconds */
+       __le32 start_time; /* 32 bit tsf time when in starts */
+} __packed;
+
+struct wmi_vdev_start_request_cmd {
+       /* WMI channel */
+       struct wmi_channel chan;
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+       /* requestor id identifying the caller module */
+       __le32 requestor_id;
+       /* beacon interval from received beacon */
+       __le32 beacon_interval;
+       /* DTIM Period from the received beacon */
+       __le32 dtim_period;
+       /* Flags */
+       __le32 flags;
+       /* ssid field. Only valid for AP/GO/IBSS/BTAmp VDEV type. */
+       struct wmi_ssid ssid;
+       /* beacon/probe reponse xmit rate. Applicable for SoftAP. */
+       __le32 bcn_tx_rate;
+       /* beacon/probe reponse xmit power. Applicable for SoftAP. */
+       __le32 bcn_tx_power;
+       /* number of p2p NOA descriptor(s) from scan entry */
+       __le32 num_noa_descriptors;
+       /*
+        * Disable H/W ack. This used by WMI_VDEV_RESTART_REQUEST_CMDID.
+        * During CAC, Our HW shouldn't ack ditected frames
+        */
+       __le32 disable_hw_ack;
+       /* actual p2p NOA descriptor from scan entry */
+       struct wmi_p2p_noa_descriptor noa_descriptors[2];
+} __packed;
+
+struct wmi_vdev_restart_request_cmd {
+       struct wmi_vdev_start_request_cmd vdev_start_request_cmd;
+} __packed;
+
+struct wmi_vdev_start_request_arg {
+       u32 vdev_id;
+       struct wmi_channel_arg channel;
+       u32 bcn_intval;
+       u32 dtim_period;
+       u8 *ssid;
+       u32 ssid_len;
+       u32 bcn_tx_rate;
+       u32 bcn_tx_power;
+       bool disable_hw_ack;
+       bool hidden_ssid;
+       bool pmf_enabled;
+};
+
+struct wmi_vdev_delete_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_up_cmd {
+       __le32 vdev_id;
+       __le32 vdev_assoc_id;
+       struct wmi_mac_addr vdev_bssid;
+} __packed;
+
+struct wmi_vdev_stop_cmd {
+       __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_down_cmd {
+       __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_standby_response_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_resume_response_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_set_param_cmd {
+       __le32 vdev_id;
+       __le32 param_id;
+       __le32 param_value;
+} __packed;
+
+#define WMI_MAX_KEY_INDEX   3
+#define WMI_MAX_KEY_LEN     32
+
+#define WMI_KEY_PAIRWISE 0x00
+#define WMI_KEY_GROUP    0x01
+#define WMI_KEY_TX_USAGE 0x02 /* default tx key - static wep */
+
+struct wmi_key_seq_counter {
+       __le32 key_seq_counter_l;
+       __le32 key_seq_counter_h;
+} __packed;
+
+#define WMI_CIPHER_NONE     0x0 /* clear key */
+#define WMI_CIPHER_WEP      0x1
+#define WMI_CIPHER_TKIP     0x2
+#define WMI_CIPHER_AES_OCB  0x3
+#define WMI_CIPHER_AES_CCM  0x4
+#define WMI_CIPHER_WAPI     0x5
+#define WMI_CIPHER_CKIP     0x6
+#define WMI_CIPHER_AES_CMAC 0x7
+
+struct wmi_vdev_install_key_cmd {
+       __le32 vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       __le32 key_idx;
+       __le32 key_flags;
+       __le32 key_cipher; /* %WMI_CIPHER_ */
+       struct wmi_key_seq_counter key_rsc_counter;
+       struct wmi_key_seq_counter key_global_rsc_counter;
+       struct wmi_key_seq_counter key_tsc_counter;
+       u8 wpi_key_rsc_counter[16];
+       u8 wpi_key_tsc_counter[16];
+       __le32 key_len;
+       __le32 key_txmic_len;
+       __le32 key_rxmic_len;
+
+       /* contains key followed by tx mic followed by rx mic */
+       u8 key_data[0];
+} __packed;
+
+struct wmi_vdev_install_key_arg {
+       u32 vdev_id;
+       const u8 *macaddr;
+       u32 key_idx;
+       u32 key_flags;
+       u32 key_cipher;
+       u32 key_len;
+       u32 key_txmic_len;
+       u32 key_rxmic_len;
+       const void *key_data;
+};
+
+/* Preamble types to be used with VDEV fixed rate configuration */
+enum wmi_rate_preamble {
+       WMI_RATE_PREAMBLE_OFDM,
+       WMI_RATE_PREAMBLE_CCK,
+       WMI_RATE_PREAMBLE_HT,
+       WMI_RATE_PREAMBLE_VHT,
+};
+
+/* Value to disable fixed rate setting */
+#define WMI_FIXED_RATE_NONE    (0xff)
+
+/* the definition of different VDEV parameters */
+enum wmi_vdev_param {
+       /* RTS Threshold */
+       WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+       /* Fragmentation threshold */
+       WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+       /* beacon interval in TUs */
+       WMI_VDEV_PARAM_BEACON_INTERVAL,
+       /* Listen interval in TUs */
+       WMI_VDEV_PARAM_LISTEN_INTERVAL,
+       /* muticast rate in Mbps */
+       WMI_VDEV_PARAM_MULTICAST_RATE,
+       /* management frame rate in Mbps */
+       WMI_VDEV_PARAM_MGMT_TX_RATE,
+       /* slot time (long vs short) */
+       WMI_VDEV_PARAM_SLOT_TIME,
+       /* preamble (long vs short) */
+       WMI_VDEV_PARAM_PREAMBLE,
+       /* SWBA time (time before tbtt in msec) */
+       WMI_VDEV_PARAM_SWBA_TIME,
+       /* time period for updating VDEV stats */
+       WMI_VDEV_STATS_UPDATE_PERIOD,
+       /* age out time in msec for frames queued for station in power save */
+       WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+       /*
+        * Host SWBA interval (time in msec before tbtt for SWBA event
+        * generation).
+        */
+       WMI_VDEV_HOST_SWBA_INTERVAL,
+       /* DTIM period (specified in units of num beacon intervals) */
+       WMI_VDEV_PARAM_DTIM_PERIOD,
+       /*
+        * scheduler air time limit for this VDEV. used by off chan
+        * scheduler.
+        */
+       WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+       /* enable/dsiable WDS for this VDEV  */
+       WMI_VDEV_PARAM_WDS,
+       /* ATIM Window */
+       WMI_VDEV_PARAM_ATIM_WINDOW,
+       /* BMISS max */
+       WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+       /* BMISS first time */
+       WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+       /* BMISS final time */
+       WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+       /* WMM enables/disabled */
+       WMI_VDEV_PARAM_FEATURE_WMM,
+       /* Channel width */
+       WMI_VDEV_PARAM_CHWIDTH,
+       /* Channel Offset */
+       WMI_VDEV_PARAM_CHEXTOFFSET,
+       /* Disable HT Protection */
+       WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+       /* Quick STA Kickout */
+       WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+       /* Rate to be used with Management frames */
+       WMI_VDEV_PARAM_MGMT_RATE,
+       /* Protection Mode */
+       WMI_VDEV_PARAM_PROTECTION_MODE,
+       /* Fixed rate setting */
+       WMI_VDEV_PARAM_FIXED_RATE,
+       /* Short GI Enable/Disable */
+       WMI_VDEV_PARAM_SGI,
+       /* Enable LDPC */
+       WMI_VDEV_PARAM_LDPC,
+       /* Enable Tx STBC */
+       WMI_VDEV_PARAM_TX_STBC,
+       /* Enable Rx STBC */
+       WMI_VDEV_PARAM_RX_STBC,
+       /* Intra BSS forwarding  */
+       WMI_VDEV_PARAM_INTRA_BSS_FWD,
+       /* Setting Default xmit key for Vdev */
+       WMI_VDEV_PARAM_DEF_KEYID,
+       /* NSS width */
+       WMI_VDEV_PARAM_NSS,
+       /* Set the custom rate for the broadcast data frames */
+       WMI_VDEV_PARAM_BCAST_DATA_RATE,
+       /* Set the custom rate (rate-code) for multicast data frames */
+       WMI_VDEV_PARAM_MCAST_DATA_RATE,
+       /* Tx multicast packet indicate Enable/Disable */
+       WMI_VDEV_PARAM_MCAST_INDICATE,
+       /* Tx DHCP packet indicate Enable/Disable */
+       WMI_VDEV_PARAM_DHCP_INDICATE,
+       /* Enable host inspection of Tx unicast packet to unknown destination */
+       WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+
+       /* The minimum amount of time AP begins to consider STA inactive */
+       WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+
+       /*
+        * An associated STA is considered inactive when there is no recent
+        * TX/RX activity and no downlink frames are buffered for it. Once a
+        * STA exceeds the maximum idle inactive time, the AP will send an
+        * 802.11 data-null as a keep alive to verify the STA is still
+        * associated. If the STA does ACK the data-null, or if the data-null
+        * is buffered and the STA does not retrieve it, the STA will be
+        * considered unresponsive
+        * (see WMI_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
+        */
+       WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+
+       /*
+        * An associated STA is considered unresponsive if there is no recent
+        * TX/RX activity and downlink frames are buffered for it. Once a STA
+        * exceeds the maximum unresponsive time, the AP will send a
+        * WMI_STA_KICKOUT event to the host so the STA can be deleted. */
+       WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+
+       /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
+       WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+       /* Enable/Disable RTS-CTS */
+       WMI_VDEV_PARAM_ENABLE_RTSCTS,
+       /* Enable TXBFee/er */
+       WMI_VDEV_PARAM_TXBF,
+
+       /* Set packet power save */
+       WMI_VDEV_PARAM_PACKET_POWERSAVE,
+
+       /*
+        * Drops un-encrypted packets if eceived in an encrypted connection
+        * otherwise forwards to host.
+        */
+       WMI_VDEV_PARAM_DROP_UNENCRY,
+
+       /*
+        * Set the encapsulation type for frames.
+        */
+       WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+};
+
+/* slot time long */
+#define WMI_VDEV_SLOT_TIME_LONG                0x1
+/* slot time short */
+#define WMI_VDEV_SLOT_TIME_SHORT       0x2
+/* preablbe long */
+#define WMI_VDEV_PREAMBLE_LONG         0x1
+/* preablbe short */
+#define WMI_VDEV_PREAMBLE_SHORT                0x2
+
+enum wmi_start_event_param {
+       WMI_VDEV_RESP_START_EVENT = 0,
+       WMI_VDEV_RESP_RESTART_EVENT,
+};
+
+struct wmi_vdev_start_response_event {
+       __le32 vdev_id;
+       __le32 req_id;
+       __le32 resp_type; /* %WMI_VDEV_RESP_ */
+       __le32 status;
+} __packed;
+
+struct wmi_vdev_standby_req_event {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_resume_req_event {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_stopped_event {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+} __packed;
+
+/*
+ * common structure used for simple events
+ * (stopped, resume_req, standby response)
+ */
+struct wmi_vdev_simple_event {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+} __packed;
+
+/* VDEV start response status codes */
+/* VDEV succesfully started */
+#define WMI_INIFIED_VDEV_START_RESPONSE_STATUS_SUCCESS 0x0
+
+/* requested VDEV not found */
+#define WMI_INIFIED_VDEV_START_RESPONSE_INVALID_VDEVID 0x1
+
+/* unsupported VDEV combination */
+#define WMI_INIFIED_VDEV_START_RESPONSE_NOT_SUPPORTED  0x2
+
+/* Beacon processing related command and event structures */
+struct wmi_bcn_tx_hdr {
+       __le32 vdev_id;
+       __le32 tx_rate;
+       __le32 tx_power;
+       __le32 bcn_len;
+} __packed;
+
+struct wmi_bcn_tx_cmd {
+       struct wmi_bcn_tx_hdr hdr;
+       u8 *bcn[0];
+} __packed;
+
+struct wmi_bcn_tx_arg {
+       u32 vdev_id;
+       u32 tx_rate;
+       u32 tx_power;
+       u32 bcn_len;
+       const void *bcn;
+};
+
+/* Beacon filter */
+#define WMI_BCN_FILTER_ALL   0 /* Filter all beacons */
+#define WMI_BCN_FILTER_NONE  1 /* Pass all beacons */
+#define WMI_BCN_FILTER_RSSI  2 /* Pass Beacons RSSI >= RSSI threshold */
+#define WMI_BCN_FILTER_BSSID 3 /* Pass Beacons with matching BSSID */
+#define WMI_BCN_FILTER_SSID  4 /* Pass Beacons with matching SSID */
+
+struct wmi_bcn_filter_rx_cmd {
+       /* Filter ID */
+       __le32 bcn_filter_id;
+       /* Filter type - wmi_bcn_filter */
+       __le32 bcn_filter;
+       /* Buffer len */
+       __le32 bcn_filter_len;
+       /* Filter info (threshold, BSSID, RSSI) */
+       u8 *bcn_filter_buf;
+} __packed;
+
+/* Capabilities and IEs to be passed to firmware */
+struct wmi_bcn_prb_info {
+       /* Capabilities */
+       __le32 caps;
+       /* ERP info */
+       __le32 erp;
+       /* Advanced capabilities */
+       /* HT capabilities */
+       /* HT Info */
+       /* ibss_dfs */
+       /* wpa Info */
+       /* rsn Info */
+       /* rrm info */
+       /* ath_ext */
+       /* app IE */
+} __packed;
+
+struct wmi_bcn_tmpl_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+       /* TIM IE offset from the beginning of the template. */
+       __le32 tim_ie_offset;
+       /* beacon probe capabilities and IEs */
+       struct wmi_bcn_prb_info bcn_prb_info;
+       /* beacon buffer length */
+       __le32 buf_len;
+       /* variable length data */
+       u8 data[1];
+} __packed;
+
+struct wmi_prb_tmpl_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+       /* beacon probe capabilities and IEs */
+       struct wmi_bcn_prb_info bcn_prb_info;
+       /* beacon buffer length */
+       __le32 buf_len;
+       /* Variable length data */
+       u8 data[1];
+} __packed;
+
+enum wmi_sta_ps_mode {
+       /* enable power save for the given STA VDEV */
+       WMI_STA_PS_MODE_DISABLED = 0,
+       /* disable power save  for a given STA VDEV */
+       WMI_STA_PS_MODE_ENABLED = 1,
+};
+
+struct wmi_sta_powersave_mode_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+
+       /*
+        * Power save mode
+        * (see enum wmi_sta_ps_mode)
+        */
+       __le32 sta_ps_mode;
+} __packed;
+
+enum wmi_csa_offload_en {
+       WMI_CSA_OFFLOAD_DISABLE = 0,
+       WMI_CSA_OFFLOAD_ENABLE = 1,
+};
+
+struct wmi_csa_offload_enable_cmd {
+       __le32 vdev_id;
+       __le32 csa_offload_enable;
+} __packed;
+
+struct wmi_csa_offload_chanswitch_cmd {
+       __le32 vdev_id;
+       struct wmi_channel chan;
+} __packed;
+
+/*
+ * This parameter controls the policy for retrieving frames from AP while the
+ * STA is in sleep state.
+ *
+ * Only takes affect if the sta_ps_mode is enabled
+ */
+enum wmi_sta_ps_param_rx_wake_policy {
+       /*
+        * Wake up when ever there is an  RX activity on the VDEV. In this mode
+        * the Power save SM(state machine) will come out of sleep by either
+        * sending null frame (or) a data frame (with PS==0) in response to TIM
+        * bit set in the received beacon frame from AP.
+        */
+       WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0,
+
+       /*
+        * Here the power save state machine will not wakeup in response to TIM
+        * bit, instead it will send a PSPOLL (or) UASPD trigger based on UAPSD
+        * configuration setup by WMISET_PS_SET_UAPSD  WMI command.  When all
+        * access categories are delivery-enabled, the station will send a
+        * UAPSD trigger frame, otherwise it will send a PS-Poll.
+        */
+       WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
+};
+
+/*
+ * Number of tx frames/beacon  that cause the power save SM to wake up.
+ *
+ * Value 1 causes the SM to wake up for every TX. Value 0 has a special
+ * meaning, It will cause the SM to never wake up. This is useful if you want
+ * to keep the system to sleep all the time for some kind of test mode . host
+ * can change this parameter any time.  It will affect at the next tx frame.
+ */
+enum wmi_sta_ps_param_tx_wake_threshold {
+       WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0,
+       WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1,
+
+       /*
+        * Values greater than one indicate that many TX attempts per beacon
+        * interval before the STA will wake up
+        */
+};
+
+/*
+ * The maximum number of PS-Poll frames the FW will send in response to
+ * traffic advertised in TIM before waking up (by sending a null frame with PS
+ * = 0). Value 0 has a special meaning: there is no maximum count and the FW
+ * will send as many PS-Poll as are necessary to retrieve buffered BU. This
+ * parameter is used when the RX wake policy is
+ * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake
+ * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE.
+ */
+enum wmi_sta_ps_param_pspoll_count {
+       WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
+       /*
+        * Values greater than 0 indicate the maximum numer of PS-Poll frames
+        * FW will send before waking up.
+        */
+};
+
+/*
+ * This will include the delivery and trigger enabled state for every AC.
+ * This is the negotiated state with AP. The host MLME needs to set this based
+ * on AP capability and the state Set in the association request by the
+ * station MLME.Lower 8 bits of the value specify the UAPSD configuration.
+ */
+#define WMI_UAPSD_AC_TYPE_DELI 0
+#define WMI_UAPSD_AC_TYPE_TRIG 1
+
+#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
+       ((type ==  WMI_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) : (1<<((ac<<1)+1)))
+
+enum wmi_sta_ps_param_uapsd {
+       WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+       WMI_STA_PS_UAPSD_AC0_TRIGGER_EN  = (1 << 1),
+       WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+       WMI_STA_PS_UAPSD_AC1_TRIGGER_EN  = (1 << 3),
+       WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+       WMI_STA_PS_UAPSD_AC2_TRIGGER_EN  = (1 << 5),
+       WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+       WMI_STA_PS_UAPSD_AC3_TRIGGER_EN  = (1 << 7),
+};
+
+enum wmi_sta_powersave_param {
+       /*
+        * Controls how frames are retrievd from AP while STA is sleeping
+        *
+        * (see enum wmi_sta_ps_param_rx_wake_policy)
+        */
+       WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0,
+
+       /*
+        * The STA will go active after this many TX
+        *
+        * (see enum wmi_sta_ps_param_tx_wake_threshold)
+        */
+       WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1,
+
+       /*
+        * Number of PS-Poll to send before STA wakes up
+        *
+        * (see enum wmi_sta_ps_param_pspoll_count)
+        *
+        */
+       WMI_STA_PS_PARAM_PSPOLL_COUNT = 2,
+
+       /*
+        * TX/RX inactivity time in msec before going to sleep.
+        *
+        * The power save SM will monitor tx/rx activity on the VDEV, if no
+        * activity for the specified msec of the parameter the Power save
+        * SM will go to sleep.
+        */
+       WMI_STA_PS_PARAM_INACTIVITY_TIME = 3,
+
+       /*
+        * Set uapsd configuration.
+        *
+        * (see enum wmi_sta_ps_param_uapsd)
+        */
+       WMI_STA_PS_PARAM_UAPSD = 4,
+};
+
+struct wmi_sta_powersave_param_cmd {
+       __le32 vdev_id;
+       __le32 param_id; /* %WMI_STA_PS_PARAM_ */
+       __le32 param_value;
+} __packed;
+
+/* No MIMO power save */
+#define WMI_STA_MIMO_PS_MODE_DISABLE
+/* mimo powersave mode static*/
+#define WMI_STA_MIMO_PS_MODE_STATIC
+/* mimo powersave mode dynamic */
+#define WMI_STA_MIMO_PS_MODE_DYNAMIC
+
+struct wmi_sta_mimo_ps_mode_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+       /* mimo powersave mode as defined above */
+       __le32 mimo_pwrsave_mode;
+} __packed;
+
+/* U-APSD configuration of peer station from (re)assoc request and TSPECs */
+enum wmi_ap_ps_param_uapsd {
+       WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+       WMI_AP_PS_UAPSD_AC0_TRIGGER_EN  = (1 << 1),
+       WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+       WMI_AP_PS_UAPSD_AC1_TRIGGER_EN  = (1 << 3),
+       WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+       WMI_AP_PS_UAPSD_AC2_TRIGGER_EN  = (1 << 5),
+       WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+       WMI_AP_PS_UAPSD_AC3_TRIGGER_EN  = (1 << 7),
+};
+
+/* U-APSD maximum service period of peer station */
+enum wmi_ap_ps_peer_param_max_sp {
+       WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0,
+       WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1,
+       WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2,
+       WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3,
+       MAX_WMI_AP_PS_PEER_PARAM_MAX_SP,
+};
+
+/*
+ * AP power save parameter
+ * Set a power save specific parameter for a peer station
+ */
+enum wmi_ap_ps_peer_param {
+       /* Set uapsd configuration for a given peer.
+        *
+        * Include the delivery and trigger enabled state for every AC.
+        * The host  MLME needs to set this based on AP capability and stations
+        * request Set in the association request  received from the station.
+        *
+        * Lower 8 bits of the value specify the UAPSD configuration.
+        *
+        * (see enum wmi_ap_ps_param_uapsd)
+        * The default value is 0.
+        */
+       WMI_AP_PS_PEER_PARAM_UAPSD = 0,
+
+       /*
+        * Set the service period for a UAPSD capable station
+        *
+        * The service period from wme ie in the (re)assoc request frame.
+        *
+        * (see enum wmi_ap_ps_peer_param_max_sp)
+        */
+       WMI_AP_PS_PEER_PARAM_MAX_SP = 1,
+
+       /* Time in seconds for aging out buffered frames for STA in PS */
+       WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2,
+};
+
+struct wmi_ap_ps_peer_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+
+       /* AP powersave param (see enum wmi_ap_ps_peer_param) */
+       __le32 param_id;
+
+       /* AP powersave param value */
+       __le32 param_value;
+} __packed;
+
+/* 128 clients = 4 words */
+#define WMI_TIM_BITMAP_ARRAY_SIZE 4
+
+struct wmi_tim_info {
+       __le32 tim_len;
+       __le32 tim_mcast;
+       __le32 tim_bitmap[WMI_TIM_BITMAP_ARRAY_SIZE];
+       __le32 tim_changed;
+       __le32 tim_num_ps_pending;
+} __packed;
+
+/* Maximum number of NOA Descriptors supported */
+#define WMI_P2P_MAX_NOA_DESCRIPTORS 4
+#define WMI_P2P_OPPPS_ENABLE_BIT       BIT(0)
+#define WMI_P2P_OPPPS_CTWINDOW_OFFSET  1
+#define WMI_P2P_NOA_CHANGED_BIT        BIT(0)
+
+struct wmi_p2p_noa_info {
+       /* Bit 0 - Flag to indicate an update in NOA schedule
+          Bits 7-1 - Reserved */
+       u8 changed;
+       /* NOA index */
+       u8 index;
+       /* Bit 0 - Opp PS state of the AP
+          Bits 1-7 - Ctwindow in TUs */
+       u8 ctwindow_oppps;
+       /* Number of NOA descriptors */
+       u8 num_descriptors;
+
+       struct wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
+struct wmi_bcn_info {
+       struct wmi_tim_info tim_info;
+       struct wmi_p2p_noa_info p2p_noa_info;
+} __packed;
+
+struct wmi_host_swba_event {
+       __le32 vdev_map;
+       struct wmi_bcn_info bcn_info[1];
+} __packed;
+
+#define WMI_MAX_AP_VDEV 16
+
+struct wmi_tbtt_offset_event {
+       __le32 vdev_map;
+       __le32 tbttoffset_list[WMI_MAX_AP_VDEV];
+} __packed;
+
+
+struct wmi_peer_create_cmd {
+       __le32 vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_delete_cmd {
+       __le32 vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_flush_tids_cmd {
+       __le32 vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       __le32 peer_tid_bitmap;
+} __packed;
+
+struct wmi_fixed_rate {
+       /*
+        * rate mode . 0: disable fixed rate (auto rate)
+        *   1: legacy (non 11n) rate  specified as ieee rate 2*Mbps
+        *   2: ht20 11n rate  specified as mcs index
+        *   3: ht40 11n rate  specified as mcs index
+        */
+       __le32  rate_mode;
+       /*
+        * 4 rate values for 4 rate series. series 0 is stored in byte 0 (LSB)
+        * and series 3 is stored at byte 3 (MSB)
+        */
+       __le32  rate_series;
+       /*
+        * 4 retry counts for 4 rate series. retry count for rate 0 is stored
+        * in byte 0 (LSB) and retry count for rate 3 is stored at byte 3
+        * (MSB)
+        */
+       __le32  rate_retries;
+} __packed;
+
+struct wmi_peer_fixed_rate_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+       /* fixed rate */
+       struct wmi_fixed_rate peer_fixed_rate;
+} __packed;
+
+#define WMI_MGMT_TID    17
+
+struct wmi_addba_clear_resp_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_addba_send_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+       /* Tid number */
+       __le32 tid;
+       /* Buffer/Window size*/
+       __le32 buffersize;
+} __packed;
+
+struct wmi_delba_send_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+       /* Tid number */
+       __le32 tid;
+       /* Is Initiator */
+       __le32 initiator;
+       /* Reason code */
+       __le32 reasoncode;
+} __packed;
+
+struct wmi_addba_setresponse_cmd {
+       /* unique id identifying the vdev, generated by the caller */
+       __le32 vdev_id;
+       /* peer mac address */
+       struct wmi_mac_addr peer_macaddr;
+       /* Tid number */
+       __le32 tid;
+       /* status code */
+       __le32 statuscode;
+} __packed;
+
+struct wmi_send_singleamsdu_cmd {
+       /* unique id identifying the vdev, generated by the caller */
+       __le32 vdev_id;
+       /* peer mac address */
+       struct wmi_mac_addr peer_macaddr;
+       /* Tid number */
+       __le32 tid;
+} __packed;
+
+enum wmi_peer_smps_state {
+       WMI_PEER_SMPS_PS_NONE = 0x0,
+       WMI_PEER_SMPS_STATIC  = 0x1,
+       WMI_PEER_SMPS_DYNAMIC = 0x2
+};
+
+enum wmi_peer_param {
+       WMI_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */
+       WMI_PEER_AMPDU      = 0x2,
+       WMI_PEER_AUTHORIZE  = 0x3,
+       WMI_PEER_CHAN_WIDTH = 0x4,
+       WMI_PEER_NSS        = 0x5,
+       WMI_PEER_USE_4ADDR  = 0x6
+};
+
+struct wmi_peer_set_param_cmd {
+       __le32 vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       __le32 param_id;
+       __le32 param_value;
+} __packed;
+
+#define MAX_SUPPORTED_RATES 128
+
+struct wmi_rate_set {
+       /* total number of rates */
+       __le32 num_rates;
+       /*
+        * rates (each 8bit value) packed into a 32 bit word.
+        * the rates are filled from least significant byte to most
+        * significant byte.
+        */
+       __le32 rates[(MAX_SUPPORTED_RATES/4)+1];
+} __packed;
+
+struct wmi_rate_set_arg {
+       unsigned int num_rates;
+       u8 rates[MAX_SUPPORTED_RATES];
+};
+
+/*
+ * NOTE: It would bea good idea to represent the Tx MCS
+ * info in one word and Rx in another word. This is split
+ * into multiple words for convenience
+ */
+struct wmi_vht_rate_set {
+       __le32 rx_max_rate; /* Max Rx data rate */
+       __le32 rx_mcs_set;  /* Negotiated RX VHT rates */
+       __le32 tx_max_rate; /* Max Tx data rate */
+       __le32 tx_mcs_set;  /* Negotiated TX VHT rates */
+} __packed;
+
+struct wmi_vht_rate_set_arg {
+       u32 rx_max_rate;
+       u32 rx_mcs_set;
+       u32 tx_max_rate;
+       u32 tx_mcs_set;
+};
+
+struct wmi_peer_set_rates_cmd {
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+       /* legacy rate set */
+       struct wmi_rate_set peer_legacy_rates;
+       /* ht rate set */
+       struct wmi_rate_set peer_ht_rates;
+} __packed;
+
+struct wmi_peer_set_q_empty_callback_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+       __le32 callback_enable;
+} __packed;
+
+#define WMI_PEER_AUTH           0x00000001
+#define WMI_PEER_QOS            0x00000002
+#define WMI_PEER_NEED_PTK_4_WAY 0x00000004
+#define WMI_PEER_NEED_GTK_2_WAY 0x00000010
+#define WMI_PEER_APSD           0x00000800
+#define WMI_PEER_HT             0x00001000
+#define WMI_PEER_40MHZ          0x00002000
+#define WMI_PEER_STBC           0x00008000
+#define WMI_PEER_LDPC           0x00010000
+#define WMI_PEER_DYN_MIMOPS     0x00020000
+#define WMI_PEER_STATIC_MIMOPS  0x00040000
+#define WMI_PEER_SPATIAL_MUX    0x00200000
+#define WMI_PEER_VHT            0x02000000
+#define WMI_PEER_80MHZ          0x04000000
+#define WMI_PEER_PMF            0x08000000
+
+/*
+ * Peer rate capabilities.
+ *
+ * This is of interest to the ratecontrol
+ * module which resides in the firmware. The bit definitions are
+ * consistent with that defined in if_athrate.c.
+ */
+#define WMI_RC_DS_FLAG          0x01
+#define WMI_RC_CW40_FLAG        0x02
+#define WMI_RC_SGI_FLAG         0x04
+#define WMI_RC_HT_FLAG          0x08
+#define WMI_RC_RTSCTS_FLAG      0x10
+#define WMI_RC_TX_STBC_FLAG     0x20
+#define WMI_RC_RX_STBC_FLAG     0xC0
+#define WMI_RC_RX_STBC_FLAG_S   6
+#define WMI_RC_WEP_TKIP_FLAG    0x100
+#define WMI_RC_TS_FLAG          0x200
+#define WMI_RC_UAPSD_FLAG       0x400
+
+/* Maximum listen interval supported by hw in units of beacon interval */
+#define ATH10K_MAX_HW_LISTEN_INTERVAL 5
+
+struct wmi_peer_assoc_complete_cmd {
+       struct wmi_mac_addr peer_macaddr;
+       __le32 vdev_id;
+       __le32 peer_new_assoc; /* 1=assoc, 0=reassoc */
+       __le32 peer_associd; /* 16 LSBs */
+       __le32 peer_flags;
+       __le32 peer_caps; /* 16 LSBs */
+       __le32 peer_listen_intval;
+       __le32 peer_ht_caps;
+       __le32 peer_max_mpdu;
+       __le32 peer_mpdu_density; /* 0..16 */
+       __le32 peer_rate_caps;
+       struct wmi_rate_set peer_legacy_rates;
+       struct wmi_rate_set peer_ht_rates;
+       __le32 peer_nss; /* num of spatial streams */
+       __le32 peer_vht_caps;
+       __le32 peer_phymode;
+       struct wmi_vht_rate_set peer_vht_rates;
+       /* HT Operation Element of the peer. Five bytes packed in 2
+        *  INT32 array and filled from lsb to msb. */
+       __le32 peer_ht_info[2];
+} __packed;
+
+struct wmi_peer_assoc_complete_arg {
+       u8 addr[ETH_ALEN];
+       u32 vdev_id;
+       bool peer_reassoc;
+       u16 peer_aid;
+       u32 peer_flags; /* see %WMI_PEER_ */
+       u16 peer_caps;
+       u32 peer_listen_intval;
+       u32 peer_ht_caps;
+       u32 peer_max_mpdu;
+       u32 peer_mpdu_density; /* 0..16 */
+       u32 peer_rate_caps; /* see %WMI_RC_ */
+       struct wmi_rate_set_arg peer_legacy_rates;
+       struct wmi_rate_set_arg peer_ht_rates;
+       u32 peer_num_spatial_streams;
+       u32 peer_vht_caps;
+       enum wmi_phy_mode peer_phymode;
+       struct wmi_vht_rate_set_arg peer_vht_rates;
+};
+
+struct wmi_peer_add_wds_entry_cmd {
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+       /* wds MAC addr */
+       struct wmi_mac_addr wds_macaddr;
+} __packed;
+
+struct wmi_peer_remove_wds_entry_cmd {
+       /* wds MAC addr */
+       struct wmi_mac_addr wds_macaddr;
+} __packed;
+
+struct wmi_peer_q_empty_callback_event {
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+/*
+ * Channel info WMI event
+ */
+struct wmi_chan_info_event {
+       __le32 err_code;
+       __le32 freq;
+       __le32 cmd_flags;
+       __le32 noise_floor;
+       __le32 rx_clear_count;
+       __le32 cycle_count;
+} __packed;
+
+/* Beacon filter wmi command info */
+#define BCN_FLT_MAX_SUPPORTED_IES      256
+#define BCN_FLT_MAX_ELEMS_IE_LIST      (BCN_FLT_MAX_SUPPORTED_IES / 32)
+
+struct bss_bcn_stats {
+       __le32 vdev_id;
+       __le32 bss_bcnsdropped;
+       __le32 bss_bcnsdelivered;
+} __packed;
+
+struct bcn_filter_stats {
+       __le32 bcns_dropped;
+       __le32 bcns_delivered;
+       __le32 activefilters;
+       struct bss_bcn_stats bss_stats;
+} __packed;
+
+struct wmi_add_bcn_filter_cmd {
+       u32 vdev_id;
+       u32 ie_map[BCN_FLT_MAX_ELEMS_IE_LIST];
+} __packed;
+
+enum wmi_sta_keepalive_method {
+       WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1,
+       WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2,
+};
+
+/* note: ip4 addresses are in network byte order, i.e. big endian */
+struct wmi_sta_keepalive_arp_resp {
+       __be32 src_ip4_addr;
+       __be32 dest_ip4_addr;
+       struct wmi_mac_addr dest_mac_addr;
+} __packed;
+
+struct wmi_sta_keepalive_cmd {
+       __le32 vdev_id;
+       __le32 enabled;
+       __le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
+       __le32 interval; /* in seconds */
+       struct wmi_sta_keepalive_arp_resp arp_resp;
+} __packed;
+
+#define ATH10K_RTS_MAX         2347
+#define ATH10K_FRAGMT_THRESHOLD_MIN    540
+#define ATH10K_FRAGMT_THRESHOLD_MAX    2346
+
+#define WMI_MAX_EVENT 0x1000
+/* Maximum number of pending TXed WMI packets */
+#define WMI_MAX_PENDING_TX_COUNT 128
+#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
+
+/* By default disable power save for IBSS */
+#define ATH10K_DEFAULT_ATIM 0
+
+struct ath10k;
+struct ath10k_vif;
+
+int ath10k_wmi_attach(struct ath10k *ar);
+void ath10k_wmi_detach(struct ath10k *ar);
+int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
+int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
+void ath10k_wmi_flush_tx(struct ath10k *ar);
+
+int ath10k_wmi_connect_htc_service(struct ath10k *ar);
+int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
+                               const struct wmi_channel_arg *);
+int ath10k_wmi_pdev_suspend_target(struct ath10k *ar);
+int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
+int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
+                                 u16 rd5g, u16 ctl2g, u16 ctl5g);
+int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
+                             u32 value);
+int ath10k_wmi_cmd_init(struct ath10k *ar);
+int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
+void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
+int ath10k_wmi_stop_scan(struct ath10k *ar,
+                        const struct wmi_stop_scan_arg *arg);
+int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
+                          enum wmi_vdev_type type,
+                          enum wmi_vdev_subtype subtype,
+                          const u8 macaddr[ETH_ALEN]);
+int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id);
+int ath10k_wmi_vdev_start(struct ath10k *ar,
+                         const struct wmi_vdev_start_request_arg *);
+int ath10k_wmi_vdev_restart(struct ath10k *ar,
+                           const struct wmi_vdev_start_request_arg *);
+int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id);
+int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
+                      const u8 *bssid);
+int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id);
+int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
+                             enum wmi_vdev_param param_id, u32 param_value);
+int ath10k_wmi_vdev_install_key(struct ath10k *ar,
+                               const struct wmi_vdev_install_key_arg *arg);
+int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
+                   const u8 peer_addr[ETH_ALEN]);
+int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
+                   const u8 peer_addr[ETH_ALEN]);
+int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
+                  const u8 peer_addr[ETH_ALEN], u32 tid_bitmap);
+int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
+                             const u8 *peer_addr,
+                             enum wmi_peer_param param_id, u32 param_value);
+int ath10k_wmi_peer_assoc(struct ath10k *ar,
+                         const struct wmi_peer_assoc_complete_arg *arg);
+int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
+                         enum wmi_sta_ps_mode psmode);
+int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
+                               enum wmi_sta_powersave_param param_id,
+                               u32 value);
+int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+                              enum wmi_ap_ps_peer_param param_id, u32 value);
+int ath10k_wmi_scan_chan_list(struct ath10k *ar,
+                             const struct wmi_scan_chan_list_arg *arg);
+int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg);
+int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
+                       const struct wmi_pdev_set_wmm_params_arg *arg);
+int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
+
+#endif /* _WMI_H_ */