2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
26 #include "targaddrs.h"
35 static unsigned int ath10k_target_ps;
36 module_param(ath10k_target_ps, uint, 0644);
37 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
39 #define QCA988X_1_0_DEVICE_ID (0xabcd)
40 #define QCA988X_2_0_DEVICE_ID (0x003c)
42 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
43 { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
44 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
48 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
51 static void ath10k_pci_process_ce(struct ath10k *ar);
52 static int ath10k_pci_post_rx(struct ath10k *ar);
53 static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
55 static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
56 static void ath10k_pci_stop_ce(struct ath10k *ar);
57 static void ath10k_pci_device_reset(struct ath10k *ar);
58 static int ath10k_pci_reset_target(struct ath10k *ar);
59 static int ath10k_pci_start_intr(struct ath10k *ar);
60 static void ath10k_pci_stop_intr(struct ath10k *ar);
62 static const struct ce_attr host_ce_config_wlan[] = {
63 /* host->target HTC control and raw streams */
64 { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
65 /* could be moved to share CE3 */
66 /* target->host HTT + HTC control */
67 { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
68 /* target->host WMI */
69 { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
70 /* host->target WMI */
71 { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
72 /* host->target HTT */
73 { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
74 CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
76 { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
77 /* Target autonomous hif_memcpy */
78 { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
79 /* ce_diag, the Diagnostic Window */
80 { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
83 /* Target firmware's Copy Engine configuration. */
84 static const struct ce_pipe_config target_ce_config_wlan[] = {
85 /* host->target HTC control and raw streams */
86 { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
87 /* target->host HTT + HTC control */
88 { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
89 /* target->host WMI */
90 { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
91 /* host->target WMI */
92 { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
93 /* host->target HTT */
94 { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
95 /* NB: 50% of src nentries, since tx has 2 frags */
97 { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
98 /* Reserved for target autonomous hif_memcpy */
99 { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
100 /* CE7 used only by Host */
104 * Diagnostic read/write access is provided for startup/config/debug usage.
105 * Caller must guarantee proper alignment, when applicable, and single user
108 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
111 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
114 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
117 struct ce_state *ce_diag;
118 /* Host buffer address in CE space */
120 dma_addr_t ce_data_base = 0;
121 void *data_buf = NULL;
125 * This code cannot handle reads to non-memory space. Redirect to the
126 * register read fn but preserve the multi word read capability of
129 if (address < DRAM_BASE_ADDRESS) {
130 if (!IS_ALIGNED(address, 4) ||
131 !IS_ALIGNED((unsigned long)data, 4))
134 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
135 ar, address, (u32 *)data)) == 0)) {
136 nbytes -= sizeof(u32);
137 address += sizeof(u32);
143 ce_diag = ar_pci->ce_diag;
146 * Allocate a temporary bounce buffer to hold caller's data
147 * to be DMA'ed from Target. This guarantees
148 * 1) 4-byte alignment
149 * 2) Buffer in DMA-able space
151 orig_nbytes = nbytes;
152 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
160 memset(data_buf, 0, orig_nbytes);
162 remaining_bytes = orig_nbytes;
163 ce_data = ce_data_base;
164 while (remaining_bytes) {
165 nbytes = min_t(unsigned int, remaining_bytes,
166 DIAG_TRANSFER_LIMIT);
168 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
172 /* Request CE to send from Target(!) address to Host buffer */
174 * The address supplied by the caller is in the
175 * Target CPU virtual address space.
177 * In order to use this address with the diagnostic CE,
178 * convert it from Target CPU virtual address space
179 * to CE address space
182 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
184 ath10k_pci_sleep(ar);
186 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
192 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
196 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
202 if (nbytes != completed_nbytes) {
207 if (buf != (u32) address) {
213 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
218 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
224 if (nbytes != completed_nbytes) {
229 if (buf != ce_data) {
234 remaining_bytes -= nbytes;
241 /* Copy data from allocated DMA buf to caller's buf */
242 WARN_ON_ONCE(orig_nbytes & 3);
243 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
245 __le32_to_cpu(((__le32 *)data_buf)[i]);
248 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
252 pci_free_consistent(ar_pci->pdev, orig_nbytes,
253 data_buf, ce_data_base);
258 /* Read 4-byte aligned data from Target memory or register */
259 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
262 /* Assume range doesn't cross this boundary */
263 if (address >= DRAM_BASE_ADDRESS)
264 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
267 *data = ath10k_pci_read32(ar, address);
268 ath10k_pci_sleep(ar);
272 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
273 const void *data, int nbytes)
275 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
278 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
281 struct ce_state *ce_diag;
282 void *data_buf = NULL;
283 u32 ce_data; /* Host buffer address in CE space */
284 dma_addr_t ce_data_base = 0;
287 ce_diag = ar_pci->ce_diag;
290 * Allocate a temporary bounce buffer to hold caller's data
291 * to be DMA'ed to Target. This guarantees
292 * 1) 4-byte alignment
293 * 2) Buffer in DMA-able space
295 orig_nbytes = nbytes;
296 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
304 /* Copy caller's data to allocated DMA buf */
305 WARN_ON_ONCE(orig_nbytes & 3);
306 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
307 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
310 * The address supplied by the caller is in the
311 * Target CPU virtual address space.
313 * In order to use this address with the diagnostic CE,
315 * Target CPU virtual address space
320 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
321 ath10k_pci_sleep(ar);
323 remaining_bytes = orig_nbytes;
324 ce_data = ce_data_base;
325 while (remaining_bytes) {
326 /* FIXME: check cast */
327 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
329 /* Set up to receive directly into Target(!) address */
330 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
335 * Request CE to send caller-supplied data that
336 * was copied to bounce buffer to Target(!) address.
338 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
344 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
349 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
355 if (nbytes != completed_nbytes) {
360 if (buf != ce_data) {
366 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
371 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
377 if (nbytes != completed_nbytes) {
382 if (buf != address) {
387 remaining_bytes -= nbytes;
394 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
399 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
405 /* Write 4B data to Target memory or register */
406 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
409 /* Assume range doesn't cross this boundary */
410 if (address >= DRAM_BASE_ADDRESS)
411 return ath10k_pci_diag_write_mem(ar, address, &data,
415 ath10k_pci_write32(ar, address, data);
416 ath10k_pci_sleep(ar);
420 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
422 void __iomem *mem = ath10k_pci_priv(ar)->mem;
424 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
426 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
429 static void ath10k_pci_wait(struct ath10k *ar)
433 while (n-- && !ath10k_pci_target_is_awake(ar))
437 ath10k_warn("Unable to wakeup target\n");
440 void ath10k_do_pci_wake(struct ath10k *ar)
442 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
443 void __iomem *pci_addr = ar_pci->mem;
447 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
449 iowrite32(PCIE_SOC_WAKE_V_MASK,
450 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
451 PCIE_SOC_WAKE_ADDRESS);
453 atomic_inc(&ar_pci->keep_awake_count);
455 if (ar_pci->verified_awake)
459 if (ath10k_pci_target_is_awake(ar)) {
460 ar_pci->verified_awake = true;
464 if (tot_delay > PCIE_WAKE_TIMEOUT) {
465 ath10k_warn("target takes too long to wake up (awake count %d)\n",
466 atomic_read(&ar_pci->keep_awake_count));
471 tot_delay += curr_delay;
478 void ath10k_do_pci_sleep(struct ath10k *ar)
480 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
481 void __iomem *pci_addr = ar_pci->mem;
483 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
485 ar_pci->verified_awake = false;
486 iowrite32(PCIE_SOC_WAKE_RESET,
487 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
488 PCIE_SOC_WAKE_ADDRESS);
493 * FIXME: Handle OOM properly.
496 struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
498 struct ath10k_pci_compl *compl = NULL;
500 spin_lock_bh(&pipe_info->pipe_lock);
501 if (list_empty(&pipe_info->compl_free)) {
502 ath10k_warn("Completion buffers are full\n");
505 compl = list_first_entry(&pipe_info->compl_free,
506 struct ath10k_pci_compl, list);
507 list_del(&compl->list);
509 spin_unlock_bh(&pipe_info->pipe_lock);
513 /* Called by lower (CE) layer when a send to Target completes. */
514 static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
515 void *transfer_context,
518 unsigned int transfer_id)
520 struct ath10k *ar = ce_state->ar;
521 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
522 struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
523 struct ath10k_pci_compl *compl;
524 bool process = false;
528 * For the send completion of an item in sendlist, just
529 * increment num_sends_allowed. The upper layer callback will
530 * be triggered when last fragment is done with send.
532 if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
533 spin_lock_bh(&pipe_info->pipe_lock);
534 pipe_info->num_sends_allowed++;
535 spin_unlock_bh(&pipe_info->pipe_lock);
539 compl = get_free_compl(pipe_info);
543 compl->send_or_recv = HIF_CE_COMPLETE_SEND;
544 compl->ce_state = ce_state;
545 compl->pipe_info = pipe_info;
546 compl->transfer_context = transfer_context;
547 compl->nbytes = nbytes;
548 compl->transfer_id = transfer_id;
552 * Add the completion to the processing queue.
554 spin_lock_bh(&ar_pci->compl_lock);
555 list_add_tail(&compl->list, &ar_pci->compl_process);
556 spin_unlock_bh(&ar_pci->compl_lock);
559 } while (ath10k_ce_completed_send_next(ce_state,
565 * If only some of the items within a sendlist have completed,
566 * don't invoke completion processing until the entire sendlist
572 ath10k_pci_process_ce(ar);
575 /* Called by lower (CE) layer when data is received from the Target. */
576 static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
577 void *transfer_context, u32 ce_data,
579 unsigned int transfer_id,
582 struct ath10k *ar = ce_state->ar;
583 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
584 struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
585 struct ath10k_pci_compl *compl;
589 compl = get_free_compl(pipe_info);
593 compl->send_or_recv = HIF_CE_COMPLETE_RECV;
594 compl->ce_state = ce_state;
595 compl->pipe_info = pipe_info;
596 compl->transfer_context = transfer_context;
597 compl->nbytes = nbytes;
598 compl->transfer_id = transfer_id;
599 compl->flags = flags;
601 skb = transfer_context;
602 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
603 skb->len + skb_tailroom(skb),
606 * Add the completion to the processing queue.
608 spin_lock_bh(&ar_pci->compl_lock);
609 list_add_tail(&compl->list, &ar_pci->compl_process);
610 spin_unlock_bh(&ar_pci->compl_lock);
612 } while (ath10k_ce_completed_recv_next(ce_state,
618 ath10k_pci_process_ce(ar);
621 /* Send the first nbytes bytes of the buffer */
622 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
623 unsigned int transfer_id,
624 unsigned int bytes, struct sk_buff *nbuf)
626 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
627 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
628 struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
629 struct ce_state *ce_hdl = pipe_info->ce_hdl;
630 struct ce_sendlist sendlist;
635 memset(&sendlist, 0, sizeof(struct ce_sendlist));
637 len = min(bytes, nbuf->len);
641 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
643 ath10k_dbg(ATH10K_DBG_PCI,
644 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
645 nbuf->data, (unsigned long long) skb_cb->paddr,
647 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
649 nbuf->data, nbuf->len);
651 ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
653 /* Make sure we have resources to handle this request */
654 spin_lock_bh(&pipe_info->pipe_lock);
655 if (!pipe_info->num_sends_allowed) {
656 ath10k_warn("Pipe: %d is full\n", pipe_id);
657 spin_unlock_bh(&pipe_info->pipe_lock);
660 pipe_info->num_sends_allowed--;
661 spin_unlock_bh(&pipe_info->pipe_lock);
663 ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
665 ath10k_warn("CE send failed: %p\n", nbuf);
670 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
672 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
673 struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
676 spin_lock_bh(&pipe_info->pipe_lock);
677 ret = pipe_info->num_sends_allowed;
678 spin_unlock_bh(&pipe_info->pipe_lock);
683 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
685 u32 reg_dump_area = 0;
686 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
691 ath10k_err("firmware crashed!\n");
692 ath10k_err("hardware name %s version 0x%x\n",
693 ar->hw_params.name, ar->target_version);
694 ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
695 ar->fw_version_minor, ar->fw_version_release,
696 ar->fw_version_build);
698 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
699 if (ath10k_pci_diag_read_mem(ar, host_addr,
700 ®_dump_area, sizeof(u32)) != 0) {
701 ath10k_warn("could not read hi_failure_state\n");
705 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
707 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
709 REG_DUMP_COUNT_QCA988X * sizeof(u32));
711 ath10k_err("could not dump FW Dump Area\n");
715 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
717 ath10k_err("target Register Dump\n");
718 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
719 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
722 reg_dump_values[i + 1],
723 reg_dump_values[i + 2],
724 reg_dump_values[i + 3]);
726 ieee80211_queue_work(ar->hw, &ar->restart_work);
729 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
735 * Decide whether to actually poll for completions, or just
736 * wait for a later chance.
737 * If there seem to be plenty of resources left, then just wait
738 * since checking involves reading a CE register, which is a
739 * relatively expensive operation.
741 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
744 * If at least 50% of the total resources are still available,
745 * don't bother checking again yet.
747 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
750 ath10k_ce_per_engine_service(ar, pipe);
753 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
754 struct ath10k_hif_cb *callbacks)
756 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
758 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
760 memcpy(&ar_pci->msg_callbacks_current, callbacks,
761 sizeof(ar_pci->msg_callbacks_current));
764 static int ath10k_pci_start_ce(struct ath10k *ar)
766 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
767 struct ce_state *ce_diag = ar_pci->ce_diag;
768 const struct ce_attr *attr;
769 struct hif_ce_pipe_info *pipe_info;
770 struct ath10k_pci_compl *compl;
771 int i, pipe_num, completions, disable_interrupts;
773 spin_lock_init(&ar_pci->compl_lock);
774 INIT_LIST_HEAD(&ar_pci->compl_process);
776 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
777 pipe_info = &ar_pci->pipe_info[pipe_num];
779 spin_lock_init(&pipe_info->pipe_lock);
780 INIT_LIST_HEAD(&pipe_info->compl_free);
782 /* Handle Diagnostic CE specially */
783 if (pipe_info->ce_hdl == ce_diag)
786 attr = &host_ce_config_wlan[pipe_num];
789 if (attr->src_nentries) {
790 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
791 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
792 ath10k_pci_ce_send_done,
794 completions += attr->src_nentries;
795 pipe_info->num_sends_allowed = attr->src_nentries - 1;
798 if (attr->dest_nentries) {
799 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
800 ath10k_pci_ce_recv_data);
801 completions += attr->dest_nentries;
804 if (completions == 0)
807 for (i = 0; i < completions; i++) {
808 compl = kmalloc(sizeof(struct ath10k_pci_compl),
811 ath10k_warn("No memory for completion state\n");
812 ath10k_pci_stop_ce(ar);
816 compl->send_or_recv = HIF_CE_COMPLETE_FREE;
817 list_add_tail(&compl->list, &pipe_info->compl_free);
824 static void ath10k_pci_stop_ce(struct ath10k *ar)
826 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
827 struct ath10k_pci_compl *compl;
831 ath10k_ce_disable_interrupts(ar);
833 /* Cancel the pending tasklet */
834 tasklet_kill(&ar_pci->intr_tq);
836 for (i = 0; i < CE_COUNT; i++)
837 tasklet_kill(&ar_pci->pipe_info[i].intr);
839 /* Mark pending completions as aborted, so that upper layers free up
840 * their associated resources */
841 spin_lock_bh(&ar_pci->compl_lock);
842 list_for_each_entry(compl, &ar_pci->compl_process, list) {
843 skb = (struct sk_buff *)compl->transfer_context;
844 ATH10K_SKB_CB(skb)->is_aborted = true;
846 spin_unlock_bh(&ar_pci->compl_lock);
849 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
851 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
852 struct ath10k_pci_compl *compl, *tmp;
853 struct hif_ce_pipe_info *pipe_info;
854 struct sk_buff *netbuf;
857 /* Free pending completions. */
858 spin_lock_bh(&ar_pci->compl_lock);
859 if (!list_empty(&ar_pci->compl_process))
860 ath10k_warn("pending completions still present! possible memory leaks.\n");
862 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
863 list_del(&compl->list);
864 netbuf = (struct sk_buff *)compl->transfer_context;
865 dev_kfree_skb_any(netbuf);
868 spin_unlock_bh(&ar_pci->compl_lock);
870 /* Free unused completions for each pipe. */
871 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
872 pipe_info = &ar_pci->pipe_info[pipe_num];
874 spin_lock_bh(&pipe_info->pipe_lock);
875 list_for_each_entry_safe(compl, tmp,
876 &pipe_info->compl_free, list) {
877 list_del(&compl->list);
880 spin_unlock_bh(&pipe_info->pipe_lock);
884 static void ath10k_pci_process_ce(struct ath10k *ar)
886 struct ath10k_pci *ar_pci = ar->hif.priv;
887 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
888 struct ath10k_pci_compl *compl;
891 int ret, send_done = 0;
893 /* Upper layers aren't ready to handle tx/rx completions in parallel so
894 * we must serialize all completion processing. */
896 spin_lock_bh(&ar_pci->compl_lock);
897 if (ar_pci->compl_processing) {
898 spin_unlock_bh(&ar_pci->compl_lock);
901 ar_pci->compl_processing = true;
902 spin_unlock_bh(&ar_pci->compl_lock);
905 spin_lock_bh(&ar_pci->compl_lock);
906 if (list_empty(&ar_pci->compl_process)) {
907 spin_unlock_bh(&ar_pci->compl_lock);
910 compl = list_first_entry(&ar_pci->compl_process,
911 struct ath10k_pci_compl, list);
912 list_del(&compl->list);
913 spin_unlock_bh(&ar_pci->compl_lock);
915 if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
916 cb->tx_completion(ar,
917 compl->transfer_context,
921 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
923 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
924 compl->pipe_info->pipe_num);
928 skb = (struct sk_buff *)compl->transfer_context;
929 nbytes = compl->nbytes;
931 ath10k_dbg(ATH10K_DBG_PCI,
932 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
934 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
935 "ath10k rx: ", skb->data, nbytes);
937 if (skb->len + skb_tailroom(skb) >= nbytes) {
939 skb_put(skb, nbytes);
940 cb->rx_completion(ar, skb,
941 compl->pipe_info->pipe_num);
943 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
945 skb->len + skb_tailroom(skb));
949 compl->send_or_recv = HIF_CE_COMPLETE_FREE;
952 * Add completion back to the pipe's free list.
954 spin_lock_bh(&compl->pipe_info->pipe_lock);
955 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
956 compl->pipe_info->num_sends_allowed += send_done;
957 spin_unlock_bh(&compl->pipe_info->pipe_lock);
960 spin_lock_bh(&ar_pci->compl_lock);
961 ar_pci->compl_processing = false;
962 spin_unlock_bh(&ar_pci->compl_lock);
965 /* TODO - temporary mapping while we have too few CE's */
966 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
967 u16 service_id, u8 *ul_pipe,
968 u8 *dl_pipe, int *ul_is_polled,
973 /* polling for received messages not supported */
976 switch (service_id) {
977 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
979 * Host->target HTT gets its own pipe, so it can be polled
980 * while other pipes are interrupt driven.
984 * Use the same target->host pipe for HTC ctrl, HTC raw
990 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
991 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
993 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
994 * HTC_CTRL_RSVD_SVC could share the same pipe as the
995 * WMI services. So, if another CE is needed, change
996 * this to *ul_pipe = 3, which frees up CE 0.
1003 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1004 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1005 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1006 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1008 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1014 /* pipe 6 reserved */
1015 /* pipe 7 reserved */
1022 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1027 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1028 u8 *ul_pipe, u8 *dl_pipe)
1030 int ul_is_polled, dl_is_polled;
1032 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1033 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1040 static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
1043 struct ath10k *ar = pipe_info->hif_ce_state;
1044 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1045 struct ce_state *ce_state = pipe_info->ce_hdl;
1046 struct sk_buff *skb;
1050 if (pipe_info->buf_sz == 0)
1053 for (i = 0; i < num; i++) {
1054 skb = dev_alloc_skb(pipe_info->buf_sz);
1056 ath10k_warn("could not allocate skbuff for pipe %d\n",
1062 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1064 ce_data = dma_map_single(ar->dev, skb->data,
1065 skb->len + skb_tailroom(skb),
1068 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1069 ath10k_warn("could not dma map skbuff\n");
1070 dev_kfree_skb_any(skb);
1075 ATH10K_SKB_CB(skb)->paddr = ce_data;
1077 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1079 PCI_DMA_FROMDEVICE);
1081 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1084 ath10k_warn("could not enqueue to pipe %d (%d)\n",
1093 ath10k_pci_rx_pipe_cleanup(pipe_info);
1097 static int ath10k_pci_post_rx(struct ath10k *ar)
1099 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1100 struct hif_ce_pipe_info *pipe_info;
1101 const struct ce_attr *attr;
1102 int pipe_num, ret = 0;
1104 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1105 pipe_info = &ar_pci->pipe_info[pipe_num];
1106 attr = &host_ce_config_wlan[pipe_num];
1108 if (attr->dest_nentries == 0)
1111 ret = ath10k_pci_post_rx_pipe(pipe_info,
1112 attr->dest_nentries - 1);
1114 ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1117 for (; pipe_num >= 0; pipe_num--) {
1118 pipe_info = &ar_pci->pipe_info[pipe_num];
1119 ath10k_pci_rx_pipe_cleanup(pipe_info);
1128 static int ath10k_pci_hif_start(struct ath10k *ar)
1130 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1133 ret = ath10k_pci_start_ce(ar);
1135 ath10k_warn("could not start CE (%d)\n", ret);
1139 /* Post buffers once to start things off. */
1140 ret = ath10k_pci_post_rx(ar);
1142 ath10k_warn("could not post rx pipes (%d)\n", ret);
1146 ar_pci->started = 1;
1150 static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1153 struct ath10k_pci *ar_pci;
1154 struct ce_state *ce_hdl;
1156 struct sk_buff *netbuf;
1159 buf_sz = pipe_info->buf_sz;
1161 /* Unused Copy Engine */
1165 ar = pipe_info->hif_ce_state;
1166 ar_pci = ath10k_pci_priv(ar);
1168 if (!ar_pci->started)
1171 ce_hdl = pipe_info->ce_hdl;
1173 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1175 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1176 netbuf->len + skb_tailroom(netbuf),
1178 dev_kfree_skb_any(netbuf);
1182 static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1185 struct ath10k_pci *ar_pci;
1186 struct ce_state *ce_hdl;
1187 struct sk_buff *netbuf;
1189 unsigned int nbytes;
1193 buf_sz = pipe_info->buf_sz;
1195 /* Unused Copy Engine */
1199 ar = pipe_info->hif_ce_state;
1200 ar_pci = ath10k_pci_priv(ar);
1202 if (!ar_pci->started)
1205 ce_hdl = pipe_info->ce_hdl;
1207 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1208 &ce_data, &nbytes, &id) == 0) {
1209 if (netbuf != CE_SENDLIST_ITEM_CTXT)
1211 * Indicate the completion to higer layer to free
1214 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1215 ar_pci->msg_callbacks_current.tx_completion(ar,
1222 * Cleanup residual buffers for device shutdown:
1223 * buffers that were enqueued for receive
1224 * buffers that were to be sent
1225 * Note: Buffers that had completed but which were
1226 * not yet processed are on a completion queue. They
1227 * are handled when the completion thread shuts down.
1229 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1231 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1234 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1235 struct hif_ce_pipe_info *pipe_info;
1237 pipe_info = &ar_pci->pipe_info[pipe_num];
1238 ath10k_pci_rx_pipe_cleanup(pipe_info);
1239 ath10k_pci_tx_pipe_cleanup(pipe_info);
1243 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1245 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1246 struct hif_ce_pipe_info *pipe_info;
1249 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1250 pipe_info = &ar_pci->pipe_info[pipe_num];
1251 if (pipe_info->ce_hdl) {
1252 ath10k_ce_deinit(pipe_info->ce_hdl);
1253 pipe_info->ce_hdl = NULL;
1254 pipe_info->buf_sz = 0;
1259 static void ath10k_pci_disable_irqs(struct ath10k *ar)
1261 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1264 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1265 disable_irq(ar_pci->pdev->irq + i);
1268 static void ath10k_pci_hif_stop(struct ath10k *ar)
1270 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1272 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1274 /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
1275 * by ath10k_pci_start_intr(). */
1276 ath10k_pci_disable_irqs(ar);
1278 ath10k_pci_stop_ce(ar);
1280 /* At this point, asynchronous threads are stopped, the target should
1281 * not DMA nor interrupt. We process the leftovers and then free
1282 * everything else up. */
1284 ath10k_pci_process_ce(ar);
1285 ath10k_pci_cleanup_ce(ar);
1286 ath10k_pci_buffer_cleanup(ar);
1288 ar_pci->started = 0;
1291 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1292 void *req, u32 req_len,
1293 void *resp, u32 *resp_len)
1295 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1296 struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
1297 struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
1298 dma_addr_t req_paddr = 0;
1299 dma_addr_t resp_paddr = 0;
1300 struct bmi_xfer xfer = {};
1301 void *treq, *tresp = NULL;
1304 if (resp && !resp_len)
1307 if (resp && resp_len && *resp_len == 0)
1310 treq = kmemdup(req, req_len, GFP_KERNEL);
1314 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1315 ret = dma_mapping_error(ar->dev, req_paddr);
1319 if (resp && resp_len) {
1320 tresp = kzalloc(*resp_len, GFP_KERNEL);
1326 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1328 ret = dma_mapping_error(ar->dev, resp_paddr);
1332 xfer.wait_for_resp = true;
1335 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1338 init_completion(&xfer.done);
1340 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1344 ret = wait_for_completion_timeout(&xfer.done,
1345 BMI_COMMUNICATION_TIMEOUT_HZ);
1348 unsigned int unused_nbytes;
1349 unsigned int unused_id;
1352 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1353 &unused_nbytes, &unused_id);
1355 /* non-zero means we did not time out */
1363 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1364 dma_unmap_single(ar->dev, resp_paddr,
1365 *resp_len, DMA_FROM_DEVICE);
1368 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1370 if (ret == 0 && resp_len) {
1371 *resp_len = min(*resp_len, xfer.resp_len);
1372 memcpy(resp, tresp, xfer.resp_len);
1381 static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
1382 void *transfer_context,
1384 unsigned int nbytes,
1385 unsigned int transfer_id)
1387 struct bmi_xfer *xfer = transfer_context;
1389 if (xfer->wait_for_resp)
1392 complete(&xfer->done);
1395 static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
1396 void *transfer_context,
1398 unsigned int nbytes,
1399 unsigned int transfer_id,
1402 struct bmi_xfer *xfer = transfer_context;
1404 if (!xfer->wait_for_resp) {
1405 ath10k_warn("unexpected: BMI data received; ignoring\n");
1409 xfer->resp_len = nbytes;
1410 complete(&xfer->done);
1414 * Map from service/endpoint to Copy Engine.
1415 * This table is derived from the CE_PCI TABLE, above.
1416 * It is passed to the Target at startup for use by firmware.
1418 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1420 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1421 PIPEDIR_OUT, /* out = UL = host -> target */
1425 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1426 PIPEDIR_IN, /* in = DL = target -> host */
1430 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1431 PIPEDIR_OUT, /* out = UL = host -> target */
1435 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1436 PIPEDIR_IN, /* in = DL = target -> host */
1440 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1441 PIPEDIR_OUT, /* out = UL = host -> target */
1445 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1446 PIPEDIR_IN, /* in = DL = target -> host */
1450 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1451 PIPEDIR_OUT, /* out = UL = host -> target */
1455 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1456 PIPEDIR_IN, /* in = DL = target -> host */
1460 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1461 PIPEDIR_OUT, /* out = UL = host -> target */
1465 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1466 PIPEDIR_IN, /* in = DL = target -> host */
1470 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1471 PIPEDIR_OUT, /* out = UL = host -> target */
1472 0, /* could be moved to 3 (share with WMI) */
1475 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1476 PIPEDIR_IN, /* in = DL = target -> host */
1480 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1481 PIPEDIR_OUT, /* out = UL = host -> target */
1485 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1486 PIPEDIR_IN, /* in = DL = target -> host */
1490 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1491 PIPEDIR_OUT, /* out = UL = host -> target */
1495 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1496 PIPEDIR_IN, /* in = DL = target -> host */
1500 /* (Additions here) */
1502 { /* Must be last */
1510 * Send an interrupt to the device to wake up the Target CPU
1511 * so it has an opportunity to notice any changed state.
1513 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1518 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1522 ath10k_warn("Unable to read core ctrl\n");
1526 /* A_INUM_FIRMWARE interrupt to Target CPU */
1527 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1529 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1533 ath10k_warn("Unable to set interrupt mask\n");
1538 static int ath10k_pci_init_config(struct ath10k *ar)
1540 u32 interconnect_targ_addr;
1541 u32 pcie_state_targ_addr = 0;
1542 u32 pipe_cfg_targ_addr = 0;
1543 u32 svc_to_pipe_map = 0;
1544 u32 pcie_config_flags = 0;
1546 u32 ealloc_targ_addr;
1548 u32 flag2_targ_addr;
1551 /* Download to Target the CE Config and the service-to-CE map */
1552 interconnect_targ_addr =
1553 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1555 /* Supply Target-side CE configuration */
1556 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1557 &pcie_state_targ_addr);
1559 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1563 if (pcie_state_targ_addr == 0) {
1565 ath10k_err("Invalid pcie state addr\n");
1569 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1570 offsetof(struct pcie_state,
1572 &pipe_cfg_targ_addr);
1574 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1578 if (pipe_cfg_targ_addr == 0) {
1580 ath10k_err("Invalid pipe cfg addr\n");
1584 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1585 target_ce_config_wlan,
1586 sizeof(target_ce_config_wlan));
1589 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1593 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1594 offsetof(struct pcie_state,
1598 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1602 if (svc_to_pipe_map == 0) {
1604 ath10k_err("Invalid svc_to_pipe map\n");
1608 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1609 target_service_to_ce_map_wlan,
1610 sizeof(target_service_to_ce_map_wlan));
1612 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1616 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1617 offsetof(struct pcie_state,
1619 &pcie_config_flags);
1621 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1625 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1627 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1628 offsetof(struct pcie_state, config_flags),
1630 sizeof(pcie_config_flags));
1632 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1636 /* configure early allocation */
1637 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1639 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1641 ath10k_err("Faile to get early alloc val: %d\n", ret);
1645 /* first bank is switched to IRAM */
1646 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1647 HI_EARLY_ALLOC_MAGIC_MASK);
1648 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1649 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1651 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1653 ath10k_err("Failed to set early alloc val: %d\n", ret);
1657 /* Tell Target to proceed with initialization */
1658 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1660 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1662 ath10k_err("Failed to get option val: %d\n", ret);
1666 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1668 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1670 ath10k_err("Failed to set option val: %d\n", ret);
1679 static int ath10k_pci_ce_init(struct ath10k *ar)
1681 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1682 struct hif_ce_pipe_info *pipe_info;
1683 const struct ce_attr *attr;
1686 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1687 pipe_info = &ar_pci->pipe_info[pipe_num];
1688 pipe_info->pipe_num = pipe_num;
1689 pipe_info->hif_ce_state = ar;
1690 attr = &host_ce_config_wlan[pipe_num];
1692 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1693 if (pipe_info->ce_hdl == NULL) {
1694 ath10k_err("Unable to initialize CE for pipe: %d\n",
1697 /* It is safe to call it here. It checks if ce_hdl is
1698 * valid for each pipe */
1699 ath10k_pci_ce_deinit(ar);
1703 if (pipe_num == ar_pci->ce_count - 1) {
1705 * Reserve the ultimate CE for
1706 * diagnostic Window support
1709 ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1713 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1717 * Initially, establish CE completion handlers for use with BMI.
1718 * These are overwritten with generic handlers after we exit BMI phase.
1720 pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1721 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1722 ath10k_pci_bmi_send_done, 0);
1724 pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1725 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1726 ath10k_pci_bmi_recv_data);
1731 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1733 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1734 u32 fw_indicator_address, fw_indicator;
1736 ath10k_pci_wake(ar);
1738 fw_indicator_address = ar_pci->fw_indicator_address;
1739 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1741 if (fw_indicator & FW_IND_EVENT_PENDING) {
1742 /* ACK: clear Target-side pending event */
1743 ath10k_pci_write32(ar, fw_indicator_address,
1744 fw_indicator & ~FW_IND_EVENT_PENDING);
1746 if (ar_pci->started) {
1747 ath10k_pci_hif_dump_area(ar);
1750 * Probable Target failure before we're prepared
1751 * to handle it. Generally unexpected.
1753 ath10k_warn("early firmware event indicated\n");
1757 ath10k_pci_sleep(ar);
1760 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1762 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1765 ret = ath10k_pci_start_intr(ar);
1767 ath10k_err("could not start interrupt handling (%d)\n", ret);
1772 * Bring the target up cleanly.
1774 * The target may be in an undefined state with an AUX-powered Target
1775 * and a Host in WoW mode. If the Host crashes, loses power, or is
1776 * restarted (without unloading the driver) then the Target is left
1777 * (aux) powered and running. On a subsequent driver load, the Target
1778 * is in an unexpected state. We try to catch that here in order to
1779 * reset the Target and retry the probe.
1781 ath10k_pci_device_reset(ar);
1783 ret = ath10k_pci_reset_target(ar);
1787 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1788 /* Force AWAKE forever */
1789 ath10k_do_pci_wake(ar);
1791 ret = ath10k_pci_ce_init(ar);
1795 ret = ath10k_pci_init_config(ar);
1799 ret = ath10k_pci_wake_target_cpu(ar);
1801 ath10k_err("could not wake up target CPU (%d)\n", ret);
1808 ath10k_pci_ce_deinit(ar);
1810 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1811 ath10k_do_pci_sleep(ar);
1813 ath10k_pci_stop_intr(ar);
1818 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1820 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1822 ath10k_pci_stop_intr(ar);
1824 ath10k_pci_ce_deinit(ar);
1825 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1826 ath10k_do_pci_sleep(ar);
1831 #define ATH10K_PCI_PM_CONTROL 0x44
1833 static int ath10k_pci_hif_suspend(struct ath10k *ar)
1835 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1836 struct pci_dev *pdev = ar_pci->pdev;
1839 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1841 if ((val & 0x000000ff) != 0x3) {
1842 pci_save_state(pdev);
1843 pci_disable_device(pdev);
1844 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1845 (val & 0xffffff00) | 0x03);
1851 static int ath10k_pci_hif_resume(struct ath10k *ar)
1853 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1854 struct pci_dev *pdev = ar_pci->pdev;
1857 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1859 if ((val & 0x000000ff) != 0) {
1860 pci_restore_state(pdev);
1861 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1864 * Suspend/Resume resets the PCI configuration space,
1865 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1866 * to keep PCI Tx retries from interfering with C3 CPU state
1868 pci_read_config_dword(pdev, 0x40, &val);
1870 if ((val & 0x0000ff00) != 0)
1871 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1878 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1879 .send_head = ath10k_pci_hif_send_head,
1880 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
1881 .start = ath10k_pci_hif_start,
1882 .stop = ath10k_pci_hif_stop,
1883 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
1884 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
1885 .send_complete_check = ath10k_pci_hif_send_complete_check,
1886 .set_callbacks = ath10k_pci_hif_set_callbacks,
1887 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
1888 .power_up = ath10k_pci_hif_power_up,
1889 .power_down = ath10k_pci_hif_power_down,
1891 .suspend = ath10k_pci_hif_suspend,
1892 .resume = ath10k_pci_hif_resume,
1896 static void ath10k_pci_ce_tasklet(unsigned long ptr)
1898 struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
1899 struct ath10k_pci *ar_pci = pipe->ar_pci;
1901 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
1904 static void ath10k_msi_err_tasklet(unsigned long data)
1906 struct ath10k *ar = (struct ath10k *)data;
1908 ath10k_pci_fw_interrupt_handler(ar);
1912 * Handler for a per-engine interrupt on a PARTICULAR CE.
1913 * This is used in cases where each CE has a private MSI interrupt.
1915 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
1917 struct ath10k *ar = arg;
1918 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1919 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
1921 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
1922 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
1927 * NOTE: We are able to derive ce_id from irq because we
1928 * use a one-to-one mapping for CE's 0..5.
1929 * CE's 6 & 7 do not use interrupts at all.
1931 * This mapping must be kept in sync with the mapping
1934 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
1938 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
1940 struct ath10k *ar = arg;
1941 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1943 tasklet_schedule(&ar_pci->msi_fw_err);
1948 * Top-level interrupt handler for all PCI interrupts from a Target.
1949 * When a block of MSI interrupts is allocated, this top-level handler
1950 * is not used; instead, we directly call the correct sub-handler.
1952 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
1954 struct ath10k *ar = arg;
1955 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1957 if (ar_pci->num_msi_intrs == 0) {
1959 * IMPORTANT: INTR_CLR regiser has to be set after
1960 * INTR_ENABLE is set to 0, otherwise interrupt can not be
1963 iowrite32(0, ar_pci->mem +
1964 (SOC_CORE_BASE_ADDRESS |
1965 PCIE_INTR_ENABLE_ADDRESS));
1966 iowrite32(PCIE_INTR_FIRMWARE_MASK |
1967 PCIE_INTR_CE_MASK_ALL,
1968 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1969 PCIE_INTR_CLR_ADDRESS));
1971 * IMPORTANT: this extra read transaction is required to
1972 * flush the posted write buffer.
1974 (void) ioread32(ar_pci->mem +
1975 (SOC_CORE_BASE_ADDRESS |
1976 PCIE_INTR_ENABLE_ADDRESS));
1979 tasklet_schedule(&ar_pci->intr_tq);
1984 static void ath10k_pci_tasklet(unsigned long data)
1986 struct ath10k *ar = (struct ath10k *)data;
1987 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1989 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
1990 ath10k_ce_per_engine_service_any(ar);
1992 if (ar_pci->num_msi_intrs == 0) {
1993 /* Enable Legacy PCI line interrupts */
1994 iowrite32(PCIE_INTR_FIRMWARE_MASK |
1995 PCIE_INTR_CE_MASK_ALL,
1996 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1997 PCIE_INTR_ENABLE_ADDRESS));
1999 * IMPORTANT: this extra read transaction is required to
2000 * flush the posted write buffer
2002 (void) ioread32(ar_pci->mem +
2003 (SOC_CORE_BASE_ADDRESS |
2004 PCIE_INTR_ENABLE_ADDRESS));
2008 static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
2010 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2014 ret = pci_enable_msi_block(ar_pci->pdev, num);
2018 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2019 ath10k_pci_msi_fw_handler,
2020 IRQF_SHARED, "ath10k_pci", ar);
2022 ath10k_warn("request_irq(%d) failed %d\n",
2023 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2025 pci_disable_msi(ar_pci->pdev);
2029 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2030 ret = request_irq(ar_pci->pdev->irq + i,
2031 ath10k_pci_per_engine_handler,
2032 IRQF_SHARED, "ath10k_pci", ar);
2034 ath10k_warn("request_irq(%d) failed %d\n",
2035 ar_pci->pdev->irq + i, ret);
2037 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2038 free_irq(ar_pci->pdev->irq + i, ar);
2040 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2041 pci_disable_msi(ar_pci->pdev);
2046 ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
2050 static int ath10k_pci_start_intr_msi(struct ath10k *ar)
2052 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2055 ret = pci_enable_msi(ar_pci->pdev);
2059 ret = request_irq(ar_pci->pdev->irq,
2060 ath10k_pci_interrupt_handler,
2061 IRQF_SHARED, "ath10k_pci", ar);
2063 pci_disable_msi(ar_pci->pdev);
2067 ath10k_info("MSI interrupt handling\n");
2071 static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
2073 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2076 ret = request_irq(ar_pci->pdev->irq,
2077 ath10k_pci_interrupt_handler,
2078 IRQF_SHARED, "ath10k_pci", ar);
2083 * Make sure to wake the Target before enabling Legacy
2086 iowrite32(PCIE_SOC_WAKE_V_MASK,
2087 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2088 PCIE_SOC_WAKE_ADDRESS);
2090 ath10k_pci_wait(ar);
2093 * A potential race occurs here: The CORE_BASE write
2094 * depends on target correctly decoding AXI address but
2095 * host won't know when target writes BAR to CORE_CTRL.
2096 * This write might get lost if target has NOT written BAR.
2097 * For now, fix the race by repeating the write in below
2098 * synchronization checking.
2100 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2101 PCIE_INTR_CE_MASK_ALL,
2102 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2103 PCIE_INTR_ENABLE_ADDRESS));
2104 iowrite32(PCIE_SOC_WAKE_RESET,
2105 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2106 PCIE_SOC_WAKE_ADDRESS);
2108 ath10k_info("legacy interrupt handling\n");
2112 static int ath10k_pci_start_intr(struct ath10k *ar)
2114 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2115 int num = MSI_NUM_REQUEST;
2119 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
2120 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2121 (unsigned long) ar);
2123 for (i = 0; i < CE_COUNT; i++) {
2124 ar_pci->pipe_info[i].ar_pci = ar_pci;
2125 tasklet_init(&ar_pci->pipe_info[i].intr,
2126 ath10k_pci_ce_tasklet,
2127 (unsigned long)&ar_pci->pipe_info[i]);
2130 if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2134 ret = ath10k_pci_start_intr_msix(ar, num);
2138 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
2143 ret = ath10k_pci_start_intr_msi(ar);
2147 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
2152 ret = ath10k_pci_start_intr_legacy(ar);
2155 ar_pci->num_msi_intrs = num;
2156 ar_pci->ce_count = CE_COUNT;
2160 static void ath10k_pci_stop_intr(struct ath10k *ar)
2162 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2165 /* There's at least one interrupt irregardless whether its legacy INTR
2166 * or MSI or MSI-X */
2167 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2168 free_irq(ar_pci->pdev->irq + i, ar);
2170 if (ar_pci->num_msi_intrs > 0)
2171 pci_disable_msi(ar_pci->pdev);
2174 static int ath10k_pci_reset_target(struct ath10k *ar)
2176 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2177 int wait_limit = 300; /* 3 sec */
2179 /* Wait for Target to finish initialization before we proceed. */
2180 iowrite32(PCIE_SOC_WAKE_V_MASK,
2181 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2182 PCIE_SOC_WAKE_ADDRESS);
2184 ath10k_pci_wait(ar);
2186 while (wait_limit-- &&
2187 !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2188 FW_IND_INITIALIZED)) {
2189 if (ar_pci->num_msi_intrs == 0)
2190 /* Fix potential race by repeating CORE_BASE writes */
2191 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2192 PCIE_INTR_CE_MASK_ALL,
2193 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2194 PCIE_INTR_ENABLE_ADDRESS));
2198 if (wait_limit < 0) {
2199 ath10k_err("Target stalled\n");
2200 iowrite32(PCIE_SOC_WAKE_RESET,
2201 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2202 PCIE_SOC_WAKE_ADDRESS);
2206 iowrite32(PCIE_SOC_WAKE_RESET,
2207 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2208 PCIE_SOC_WAKE_ADDRESS);
2213 static void ath10k_pci_device_reset(struct ath10k *ar)
2215 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2216 void __iomem *mem = ar_pci->mem;
2220 if (!SOC_GLOBAL_RESET_ADDRESS)
2226 ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
2227 PCIE_SOC_WAKE_V_MASK);
2228 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2229 if (ath10k_pci_target_is_awake(ar))
2234 /* Put Target, including PCIe, into RESET. */
2235 val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
2237 ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2239 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2240 if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2241 RTC_STATE_COLD_RESET_MASK)
2246 /* Pull Target, including PCIe, out of RESET. */
2248 ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2250 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2251 if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2252 RTC_STATE_COLD_RESET_MASK))
2257 ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2260 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2264 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2265 if (!test_bit(i, ar_pci->features))
2269 case ATH10K_PCI_FEATURE_MSI_X:
2270 ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
2272 case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
2273 ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
2275 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2276 ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
2282 static int ath10k_pci_probe(struct pci_dev *pdev,
2283 const struct pci_device_id *pci_dev)
2288 struct ath10k_pci *ar_pci;
2291 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2293 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2297 ar_pci->pdev = pdev;
2298 ar_pci->dev = &pdev->dev;
2300 switch (pci_dev->device) {
2301 case QCA988X_1_0_DEVICE_ID:
2302 set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
2304 case QCA988X_2_0_DEVICE_ID:
2305 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2309 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2313 if (ath10k_target_ps)
2314 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2316 ath10k_pci_dump_features(ar_pci);
2318 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2320 ath10k_err("ath10k_core_create failed!\n");
2325 /* Enable QCA988X_1.0 HW workarounds */
2326 if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
2327 spin_lock_init(&ar_pci->hw_v1_workaround_lock);
2330 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2331 atomic_set(&ar_pci->keep_awake_count, 0);
2333 pci_set_drvdata(pdev, ar);
2336 * Without any knowledge of the Host, the Target may have been reset or
2337 * power cycled and its Config Space may no longer reflect the PCI
2338 * address space that was assigned earlier by the PCI infrastructure.
2341 ret = pci_assign_resource(pdev, BAR_NUM);
2343 ath10k_err("cannot assign PCI space: %d\n", ret);
2347 ret = pci_enable_device(pdev);
2349 ath10k_err("cannot enable PCI device: %d\n", ret);
2353 /* Request MMIO resources */
2354 ret = pci_request_region(pdev, BAR_NUM, "ath");
2356 ath10k_err("PCI MMIO reservation error: %d\n", ret);
2361 * Target structures have a limit of 32 bit DMA pointers.
2362 * DMA pointers can be wider than 32 bits by default on some systems.
2364 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2366 ath10k_err("32-bit DMA not available: %d\n", ret);
2370 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2372 ath10k_err("cannot enable 32-bit consistent DMA\n");
2376 /* Set bus master bit in PCI_COMMAND to enable DMA */
2377 pci_set_master(pdev);
2380 * Temporary FIX: disable ASPM
2381 * Will be removed after the OTP is programmed
2383 pci_read_config_dword(pdev, 0x80, &lcr_val);
2384 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2386 /* Arrange for access to Target SoC registers. */
2387 mem = pci_iomap(pdev, BAR_NUM, 0);
2389 ath10k_err("PCI iomap error\n");
2396 spin_lock_init(&ar_pci->ce_lock);
2398 ar_pci->cacheline_sz = dma_get_cache_alignment();
2400 ret = ath10k_core_register(ar);
2402 ath10k_err("could not register driver core (%d)\n", ret);
2409 pci_iounmap(pdev, mem);
2411 pci_clear_master(pdev);
2413 pci_release_region(pdev, BAR_NUM);
2415 pci_disable_device(pdev);
2417 pci_set_drvdata(pdev, NULL);
2418 ath10k_core_destroy(ar);
2420 /* call HIF PCI free here */
2426 static void ath10k_pci_remove(struct pci_dev *pdev)
2428 struct ath10k *ar = pci_get_drvdata(pdev);
2429 struct ath10k_pci *ar_pci;
2431 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2436 ar_pci = ath10k_pci_priv(ar);
2441 tasklet_kill(&ar_pci->msi_fw_err);
2443 ath10k_core_unregister(ar);
2445 pci_set_drvdata(pdev, NULL);
2446 pci_iounmap(pdev, ar_pci->mem);
2447 pci_release_region(pdev, BAR_NUM);
2448 pci_clear_master(pdev);
2449 pci_disable_device(pdev);
2451 ath10k_core_destroy(ar);
2455 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2457 static struct pci_driver ath10k_pci_driver = {
2458 .name = "ath10k_pci",
2459 .id_table = ath10k_pci_id_table,
2460 .probe = ath10k_pci_probe,
2461 .remove = ath10k_pci_remove,
2464 static int __init ath10k_pci_init(void)
2468 ret = pci_register_driver(&ath10k_pci_driver);
2470 ath10k_err("pci_register_driver failed [%d]\n", ret);
2474 module_init(ath10k_pci_init);
2476 static void __exit ath10k_pci_exit(void)
2478 pci_unregister_driver(&ath10k_pci_driver);
2481 module_exit(ath10k_pci_exit);
2483 MODULE_AUTHOR("Qualcomm Atheros");
2484 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2485 MODULE_LICENSE("Dual BSD/GPL");
2486 MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
2487 MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
2488 MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
2489 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2490 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2491 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);