]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/wireless/ath/ath10k/pci.c
Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetoot...
[karo-tx-linux.git] / drivers / net / wireless / ath / ath10k / pci.c
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22
23 #include "core.h"
24 #include "debug.h"
25
26 #include "targaddrs.h"
27 #include "bmi.h"
28
29 #include "hif.h"
30 #include "htc.h"
31
32 #include "ce.h"
33 #include "pci.h"
34
35 static unsigned int ath10k_target_ps;
36 module_param(ath10k_target_ps, uint, 0644);
37 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
38
39 #define QCA988X_2_0_DEVICE_ID   (0x003c)
40
41 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
42         { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
43         {0}
44 };
45
46 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
47                                        u32 *data);
48
49 static void ath10k_pci_process_ce(struct ath10k *ar);
50 static int ath10k_pci_post_rx(struct ath10k *ar);
51 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
52                                              int num);
53 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
54 static void ath10k_pci_stop_ce(struct ath10k *ar);
55 static void ath10k_pci_device_reset(struct ath10k *ar);
56 static int ath10k_pci_reset_target(struct ath10k *ar);
57 static int ath10k_pci_start_intr(struct ath10k *ar);
58 static void ath10k_pci_stop_intr(struct ath10k *ar);
59
60 static const struct ce_attr host_ce_config_wlan[] = {
61         /* CE0: host->target HTC control and raw streams */
62         {
63                 .flags = CE_ATTR_FLAGS,
64                 .src_nentries = 16,
65                 .src_sz_max = 256,
66                 .dest_nentries = 0,
67         },
68
69         /* CE1: target->host HTT + HTC control */
70         {
71                 .flags = CE_ATTR_FLAGS,
72                 .src_nentries = 0,
73                 .src_sz_max = 512,
74                 .dest_nentries = 512,
75         },
76
77         /* CE2: target->host WMI */
78         {
79                 .flags = CE_ATTR_FLAGS,
80                 .src_nentries = 0,
81                 .src_sz_max = 2048,
82                 .dest_nentries = 32,
83         },
84
85         /* CE3: host->target WMI */
86         {
87                 .flags = CE_ATTR_FLAGS,
88                 .src_nentries = 32,
89                 .src_sz_max = 2048,
90                 .dest_nentries = 0,
91         },
92
93         /* CE4: host->target HTT */
94         {
95                 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
96                 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
97                 .src_sz_max = 256,
98                 .dest_nentries = 0,
99         },
100
101         /* CE5: unused */
102         {
103                 .flags = CE_ATTR_FLAGS,
104                 .src_nentries = 0,
105                 .src_sz_max = 0,
106                 .dest_nentries = 0,
107         },
108
109         /* CE6: target autonomous hif_memcpy */
110         {
111                 .flags = CE_ATTR_FLAGS,
112                 .src_nentries = 0,
113                 .src_sz_max = 0,
114                 .dest_nentries = 0,
115         },
116
117         /* CE7: ce_diag, the Diagnostic Window */
118         {
119                 .flags = CE_ATTR_FLAGS,
120                 .src_nentries = 2,
121                 .src_sz_max = DIAG_TRANSFER_LIMIT,
122                 .dest_nentries = 2,
123         },
124 };
125
126 /* Target firmware's Copy Engine configuration. */
127 static const struct ce_pipe_config target_ce_config_wlan[] = {
128         /* CE0: host->target HTC control and raw streams */
129         {
130                 .pipenum = 0,
131                 .pipedir = PIPEDIR_OUT,
132                 .nentries = 32,
133                 .nbytes_max = 256,
134                 .flags = CE_ATTR_FLAGS,
135                 .reserved = 0,
136         },
137
138         /* CE1: target->host HTT + HTC control */
139         {
140                 .pipenum = 1,
141                 .pipedir = PIPEDIR_IN,
142                 .nentries = 32,
143                 .nbytes_max = 512,
144                 .flags = CE_ATTR_FLAGS,
145                 .reserved = 0,
146         },
147
148         /* CE2: target->host WMI */
149         {
150                 .pipenum = 2,
151                 .pipedir = PIPEDIR_IN,
152                 .nentries = 32,
153                 .nbytes_max = 2048,
154                 .flags = CE_ATTR_FLAGS,
155                 .reserved = 0,
156         },
157
158         /* CE3: host->target WMI */
159         {
160                 .pipenum = 3,
161                 .pipedir = PIPEDIR_OUT,
162                 .nentries = 32,
163                 .nbytes_max = 2048,
164                 .flags = CE_ATTR_FLAGS,
165                 .reserved = 0,
166         },
167
168         /* CE4: host->target HTT */
169         {
170                 .pipenum = 4,
171                 .pipedir = PIPEDIR_OUT,
172                 .nentries = 256,
173                 .nbytes_max = 256,
174                 .flags = CE_ATTR_FLAGS,
175                 .reserved = 0,
176         },
177
178         /* NB: 50% of src nentries, since tx has 2 frags */
179
180         /* CE5: unused */
181         {
182                 .pipenum = 5,
183                 .pipedir = PIPEDIR_OUT,
184                 .nentries = 32,
185                 .nbytes_max = 2048,
186                 .flags = CE_ATTR_FLAGS,
187                 .reserved = 0,
188         },
189
190         /* CE6: Reserved for target autonomous hif_memcpy */
191         {
192                 .pipenum = 6,
193                 .pipedir = PIPEDIR_INOUT,
194                 .nentries = 32,
195                 .nbytes_max = 4096,
196                 .flags = CE_ATTR_FLAGS,
197                 .reserved = 0,
198         },
199
200         /* CE7 used only by Host */
201 };
202
203 /*
204  * Diagnostic read/write access is provided for startup/config/debug usage.
205  * Caller must guarantee proper alignment, when applicable, and single user
206  * at any moment.
207  */
208 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
209                                     int nbytes)
210 {
211         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
212         int ret = 0;
213         u32 buf;
214         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
215         unsigned int id;
216         unsigned int flags;
217         struct ath10k_ce_pipe *ce_diag;
218         /* Host buffer address in CE space */
219         u32 ce_data;
220         dma_addr_t ce_data_base = 0;
221         void *data_buf = NULL;
222         int i;
223
224         /*
225          * This code cannot handle reads to non-memory space. Redirect to the
226          * register read fn but preserve the multi word read capability of
227          * this fn
228          */
229         if (address < DRAM_BASE_ADDRESS) {
230                 if (!IS_ALIGNED(address, 4) ||
231                     !IS_ALIGNED((unsigned long)data, 4))
232                         return -EIO;
233
234                 while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
235                                            ar, address, (u32 *)data)) == 0)) {
236                         nbytes -= sizeof(u32);
237                         address += sizeof(u32);
238                         data += sizeof(u32);
239                 }
240                 return ret;
241         }
242
243         ce_diag = ar_pci->ce_diag;
244
245         /*
246          * Allocate a temporary bounce buffer to hold caller's data
247          * to be DMA'ed from Target. This guarantees
248          *   1) 4-byte alignment
249          *   2) Buffer in DMA-able space
250          */
251         orig_nbytes = nbytes;
252         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
253                                                          orig_nbytes,
254                                                          &ce_data_base);
255
256         if (!data_buf) {
257                 ret = -ENOMEM;
258                 goto done;
259         }
260         memset(data_buf, 0, orig_nbytes);
261
262         remaining_bytes = orig_nbytes;
263         ce_data = ce_data_base;
264         while (remaining_bytes) {
265                 nbytes = min_t(unsigned int, remaining_bytes,
266                                DIAG_TRANSFER_LIMIT);
267
268                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
269                 if (ret != 0)
270                         goto done;
271
272                 /* Request CE to send from Target(!) address to Host buffer */
273                 /*
274                  * The address supplied by the caller is in the
275                  * Target CPU virtual address space.
276                  *
277                  * In order to use this address with the diagnostic CE,
278                  * convert it from Target CPU virtual address space
279                  * to CE address space
280                  */
281                 ath10k_pci_wake(ar);
282                 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
283                                                      address);
284                 ath10k_pci_sleep(ar);
285
286                 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
287                                  0);
288                 if (ret)
289                         goto done;
290
291                 i = 0;
292                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
293                                                      &completed_nbytes,
294                                                      &id) != 0) {
295                         mdelay(1);
296                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
297                                 ret = -EBUSY;
298                                 goto done;
299                         }
300                 }
301
302                 if (nbytes != completed_nbytes) {
303                         ret = -EIO;
304                         goto done;
305                 }
306
307                 if (buf != (u32) address) {
308                         ret = -EIO;
309                         goto done;
310                 }
311
312                 i = 0;
313                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
314                                                      &completed_nbytes,
315                                                      &id, &flags) != 0) {
316                         mdelay(1);
317
318                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
319                                 ret = -EBUSY;
320                                 goto done;
321                         }
322                 }
323
324                 if (nbytes != completed_nbytes) {
325                         ret = -EIO;
326                         goto done;
327                 }
328
329                 if (buf != ce_data) {
330                         ret = -EIO;
331                         goto done;
332                 }
333
334                 remaining_bytes -= nbytes;
335                 address += nbytes;
336                 ce_data += nbytes;
337         }
338
339 done:
340         if (ret == 0) {
341                 /* Copy data from allocated DMA buf to caller's buf */
342                 WARN_ON_ONCE(orig_nbytes & 3);
343                 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
344                         ((u32 *)data)[i] =
345                                 __le32_to_cpu(((__le32 *)data_buf)[i]);
346                 }
347         } else
348                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
349                            __func__, address);
350
351         if (data_buf)
352                 pci_free_consistent(ar_pci->pdev, orig_nbytes,
353                                     data_buf, ce_data_base);
354
355         return ret;
356 }
357
358 /* Read 4-byte aligned data from Target memory or register */
359 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
360                                        u32 *data)
361 {
362         /* Assume range doesn't cross this boundary */
363         if (address >= DRAM_BASE_ADDRESS)
364                 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
365
366         ath10k_pci_wake(ar);
367         *data = ath10k_pci_read32(ar, address);
368         ath10k_pci_sleep(ar);
369         return 0;
370 }
371
372 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
373                                      const void *data, int nbytes)
374 {
375         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
376         int ret = 0;
377         u32 buf;
378         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
379         unsigned int id;
380         unsigned int flags;
381         struct ath10k_ce_pipe *ce_diag;
382         void *data_buf = NULL;
383         u32 ce_data;    /* Host buffer address in CE space */
384         dma_addr_t ce_data_base = 0;
385         int i;
386
387         ce_diag = ar_pci->ce_diag;
388
389         /*
390          * Allocate a temporary bounce buffer to hold caller's data
391          * to be DMA'ed to Target. This guarantees
392          *   1) 4-byte alignment
393          *   2) Buffer in DMA-able space
394          */
395         orig_nbytes = nbytes;
396         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
397                                                          orig_nbytes,
398                                                          &ce_data_base);
399         if (!data_buf) {
400                 ret = -ENOMEM;
401                 goto done;
402         }
403
404         /* Copy caller's data to allocated DMA buf */
405         WARN_ON_ONCE(orig_nbytes & 3);
406         for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
407                 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
408
409         /*
410          * The address supplied by the caller is in the
411          * Target CPU virtual address space.
412          *
413          * In order to use this address with the diagnostic CE,
414          * convert it from
415          *    Target CPU virtual address space
416          * to
417          *    CE address space
418          */
419         ath10k_pci_wake(ar);
420         address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
421         ath10k_pci_sleep(ar);
422
423         remaining_bytes = orig_nbytes;
424         ce_data = ce_data_base;
425         while (remaining_bytes) {
426                 /* FIXME: check cast */
427                 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
428
429                 /* Set up to receive directly into Target(!) address */
430                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
431                 if (ret != 0)
432                         goto done;
433
434                 /*
435                  * Request CE to send caller-supplied data that
436                  * was copied to bounce buffer to Target(!) address.
437                  */
438                 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
439                                      nbytes, 0, 0);
440                 if (ret != 0)
441                         goto done;
442
443                 i = 0;
444                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
445                                                      &completed_nbytes,
446                                                      &id) != 0) {
447                         mdelay(1);
448
449                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
450                                 ret = -EBUSY;
451                                 goto done;
452                         }
453                 }
454
455                 if (nbytes != completed_nbytes) {
456                         ret = -EIO;
457                         goto done;
458                 }
459
460                 if (buf != ce_data) {
461                         ret = -EIO;
462                         goto done;
463                 }
464
465                 i = 0;
466                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
467                                                      &completed_nbytes,
468                                                      &id, &flags) != 0) {
469                         mdelay(1);
470
471                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
472                                 ret = -EBUSY;
473                                 goto done;
474                         }
475                 }
476
477                 if (nbytes != completed_nbytes) {
478                         ret = -EIO;
479                         goto done;
480                 }
481
482                 if (buf != address) {
483                         ret = -EIO;
484                         goto done;
485                 }
486
487                 remaining_bytes -= nbytes;
488                 address += nbytes;
489                 ce_data += nbytes;
490         }
491
492 done:
493         if (data_buf) {
494                 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
495                                     ce_data_base);
496         }
497
498         if (ret != 0)
499                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
500                            address);
501
502         return ret;
503 }
504
505 /* Write 4B data to Target memory or register */
506 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
507                                         u32 data)
508 {
509         /* Assume range doesn't cross this boundary */
510         if (address >= DRAM_BASE_ADDRESS)
511                 return ath10k_pci_diag_write_mem(ar, address, &data,
512                                                  sizeof(u32));
513
514         ath10k_pci_wake(ar);
515         ath10k_pci_write32(ar, address, data);
516         ath10k_pci_sleep(ar);
517         return 0;
518 }
519
520 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
521 {
522         void __iomem *mem = ath10k_pci_priv(ar)->mem;
523         u32 val;
524         val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
525                        RTC_STATE_ADDRESS);
526         return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
527 }
528
529 static void ath10k_pci_wait(struct ath10k *ar)
530 {
531         int n = 100;
532
533         while (n-- && !ath10k_pci_target_is_awake(ar))
534                 msleep(10);
535
536         if (n < 0)
537                 ath10k_warn("Unable to wakeup target\n");
538 }
539
540 int ath10k_do_pci_wake(struct ath10k *ar)
541 {
542         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
543         void __iomem *pci_addr = ar_pci->mem;
544         int tot_delay = 0;
545         int curr_delay = 5;
546
547         if (atomic_read(&ar_pci->keep_awake_count) == 0) {
548                 /* Force AWAKE */
549                 iowrite32(PCIE_SOC_WAKE_V_MASK,
550                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
551                           PCIE_SOC_WAKE_ADDRESS);
552         }
553         atomic_inc(&ar_pci->keep_awake_count);
554
555         if (ar_pci->verified_awake)
556                 return 0;
557
558         for (;;) {
559                 if (ath10k_pci_target_is_awake(ar)) {
560                         ar_pci->verified_awake = true;
561                         return 0;
562                 }
563
564                 if (tot_delay > PCIE_WAKE_TIMEOUT) {
565                         ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
566                                     PCIE_WAKE_TIMEOUT,
567                                     atomic_read(&ar_pci->keep_awake_count));
568                         return -ETIMEDOUT;
569                 }
570
571                 udelay(curr_delay);
572                 tot_delay += curr_delay;
573
574                 if (curr_delay < 50)
575                         curr_delay += 5;
576         }
577 }
578
579 void ath10k_do_pci_sleep(struct ath10k *ar)
580 {
581         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
582         void __iomem *pci_addr = ar_pci->mem;
583
584         if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
585                 /* Allow sleep */
586                 ar_pci->verified_awake = false;
587                 iowrite32(PCIE_SOC_WAKE_RESET,
588                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
589                           PCIE_SOC_WAKE_ADDRESS);
590         }
591 }
592
593 /*
594  * FIXME: Handle OOM properly.
595  */
596 static inline
597 struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
598 {
599         struct ath10k_pci_compl *compl = NULL;
600
601         spin_lock_bh(&pipe_info->pipe_lock);
602         if (list_empty(&pipe_info->compl_free)) {
603                 ath10k_warn("Completion buffers are full\n");
604                 goto exit;
605         }
606         compl = list_first_entry(&pipe_info->compl_free,
607                                  struct ath10k_pci_compl, list);
608         list_del(&compl->list);
609 exit:
610         spin_unlock_bh(&pipe_info->pipe_lock);
611         return compl;
612 }
613
614 /* Called by lower (CE) layer when a send to Target completes. */
615 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
616 {
617         struct ath10k *ar = ce_state->ar;
618         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
619         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
620         struct ath10k_pci_compl *compl;
621         void *transfer_context;
622         u32 ce_data;
623         unsigned int nbytes;
624         unsigned int transfer_id;
625
626         while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
627                                              &ce_data, &nbytes,
628                                              &transfer_id) == 0) {
629                 compl = get_free_compl(pipe_info);
630                 if (!compl)
631                         break;
632
633                 compl->state = ATH10K_PCI_COMPL_SEND;
634                 compl->ce_state = ce_state;
635                 compl->pipe_info = pipe_info;
636                 compl->skb = transfer_context;
637                 compl->nbytes = nbytes;
638                 compl->transfer_id = transfer_id;
639                 compl->flags = 0;
640
641                 /*
642                  * Add the completion to the processing queue.
643                  */
644                 spin_lock_bh(&ar_pci->compl_lock);
645                 list_add_tail(&compl->list, &ar_pci->compl_process);
646                 spin_unlock_bh(&ar_pci->compl_lock);
647         }
648
649         ath10k_pci_process_ce(ar);
650 }
651
652 /* Called by lower (CE) layer when data is received from the Target. */
653 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
654 {
655         struct ath10k *ar = ce_state->ar;
656         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
657         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
658         struct ath10k_pci_compl *compl;
659         struct sk_buff *skb;
660         void *transfer_context;
661         u32 ce_data;
662         unsigned int nbytes;
663         unsigned int transfer_id;
664         unsigned int flags;
665
666         while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
667                                              &ce_data, &nbytes, &transfer_id,
668                                              &flags) == 0) {
669                 compl = get_free_compl(pipe_info);
670                 if (!compl)
671                         break;
672
673                 compl->state = ATH10K_PCI_COMPL_RECV;
674                 compl->ce_state = ce_state;
675                 compl->pipe_info = pipe_info;
676                 compl->skb = transfer_context;
677                 compl->nbytes = nbytes;
678                 compl->transfer_id = transfer_id;
679                 compl->flags = flags;
680
681                 skb = transfer_context;
682                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
683                                  skb->len + skb_tailroom(skb),
684                                  DMA_FROM_DEVICE);
685                 /*
686                  * Add the completion to the processing queue.
687                  */
688                 spin_lock_bh(&ar_pci->compl_lock);
689                 list_add_tail(&compl->list, &ar_pci->compl_process);
690                 spin_unlock_bh(&ar_pci->compl_lock);
691         }
692
693         ath10k_pci_process_ce(ar);
694 }
695
696 /* Send the first nbytes bytes of the buffer */
697 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
698                                     unsigned int transfer_id,
699                                     unsigned int bytes, struct sk_buff *nbuf)
700 {
701         struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
702         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
703         struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
704         struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
705         unsigned int len;
706         u32 flags = 0;
707         int ret;
708
709         len = min(bytes, nbuf->len);
710         bytes -= len;
711
712         if (len & 3)
713                 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
714
715         ath10k_dbg(ATH10K_DBG_PCI,
716                    "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
717                    nbuf->data, (unsigned long long) skb_cb->paddr,
718                    nbuf->len, len);
719         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
720                         "ath10k tx: data: ",
721                         nbuf->data, nbuf->len);
722
723         /* Make sure we have resources to handle this request */
724         spin_lock_bh(&pipe_info->pipe_lock);
725         if (!pipe_info->num_sends_allowed) {
726                 ath10k_warn("Pipe: %d is full\n", pipe_id);
727                 spin_unlock_bh(&pipe_info->pipe_lock);
728                 return -ENOSR;
729         }
730         pipe_info->num_sends_allowed--;
731         spin_unlock_bh(&pipe_info->pipe_lock);
732
733         ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, transfer_id,
734                                       skb_cb->paddr, len, flags);
735         if (ret)
736                 ath10k_warn("CE send failed: %p\n", nbuf);
737
738         return ret;
739 }
740
741 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
742 {
743         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
744         struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe]);
745         int ret;
746
747         spin_lock_bh(&pipe_info->pipe_lock);
748         ret = pipe_info->num_sends_allowed;
749         spin_unlock_bh(&pipe_info->pipe_lock);
750
751         return ret;
752 }
753
754 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
755 {
756         u32 reg_dump_area = 0;
757         u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
758         u32 host_addr;
759         int ret;
760         u32 i;
761
762         ath10k_err("firmware crashed!\n");
763         ath10k_err("hardware name %s version 0x%x\n",
764                    ar->hw_params.name, ar->target_version);
765         ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
766                    ar->fw_version_minor, ar->fw_version_release,
767                    ar->fw_version_build);
768
769         host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
770         if (ath10k_pci_diag_read_mem(ar, host_addr,
771                                      &reg_dump_area, sizeof(u32)) != 0) {
772                 ath10k_warn("could not read hi_failure_state\n");
773                 return;
774         }
775
776         ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
777
778         ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
779                                        &reg_dump_values[0],
780                                        REG_DUMP_COUNT_QCA988X * sizeof(u32));
781         if (ret != 0) {
782                 ath10k_err("could not dump FW Dump Area\n");
783                 return;
784         }
785
786         BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
787
788         ath10k_err("target Register Dump\n");
789         for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
790                 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
791                            i,
792                            reg_dump_values[i],
793                            reg_dump_values[i + 1],
794                            reg_dump_values[i + 2],
795                            reg_dump_values[i + 3]);
796
797         ieee80211_queue_work(ar->hw, &ar->restart_work);
798 }
799
800 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
801                                                int force)
802 {
803         if (!force) {
804                 int resources;
805                 /*
806                  * Decide whether to actually poll for completions, or just
807                  * wait for a later chance.
808                  * If there seem to be plenty of resources left, then just wait
809                  * since checking involves reading a CE register, which is a
810                  * relatively expensive operation.
811                  */
812                 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
813
814                 /*
815                  * If at least 50% of the total resources are still available,
816                  * don't bother checking again yet.
817                  */
818                 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
819                         return;
820         }
821         ath10k_ce_per_engine_service(ar, pipe);
822 }
823
824 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
825                                          struct ath10k_hif_cb *callbacks)
826 {
827         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
828
829         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
830
831         memcpy(&ar_pci->msg_callbacks_current, callbacks,
832                sizeof(ar_pci->msg_callbacks_current));
833 }
834
835 static int ath10k_pci_start_ce(struct ath10k *ar)
836 {
837         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
838         struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
839         const struct ce_attr *attr;
840         struct ath10k_pci_pipe *pipe_info;
841         struct ath10k_pci_compl *compl;
842         int i, pipe_num, completions, disable_interrupts;
843
844         spin_lock_init(&ar_pci->compl_lock);
845         INIT_LIST_HEAD(&ar_pci->compl_process);
846
847         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
848                 pipe_info = &ar_pci->pipe_info[pipe_num];
849
850                 spin_lock_init(&pipe_info->pipe_lock);
851                 INIT_LIST_HEAD(&pipe_info->compl_free);
852
853                 /* Handle Diagnostic CE specially */
854                 if (pipe_info->ce_hdl == ce_diag)
855                         continue;
856
857                 attr = &host_ce_config_wlan[pipe_num];
858                 completions = 0;
859
860                 if (attr->src_nentries) {
861                         disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
862                         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
863                                                    ath10k_pci_ce_send_done,
864                                                    disable_interrupts);
865                         completions += attr->src_nentries;
866                         pipe_info->num_sends_allowed = attr->src_nentries - 1;
867                 }
868
869                 if (attr->dest_nentries) {
870                         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
871                                                    ath10k_pci_ce_recv_data);
872                         completions += attr->dest_nentries;
873                 }
874
875                 if (completions == 0)
876                         continue;
877
878                 for (i = 0; i < completions; i++) {
879                         compl = kmalloc(sizeof(*compl), GFP_KERNEL);
880                         if (!compl) {
881                                 ath10k_warn("No memory for completion state\n");
882                                 ath10k_pci_stop_ce(ar);
883                                 return -ENOMEM;
884                         }
885
886                         compl->state = ATH10K_PCI_COMPL_FREE;
887                         list_add_tail(&compl->list, &pipe_info->compl_free);
888                 }
889         }
890
891         return 0;
892 }
893
894 static void ath10k_pci_stop_ce(struct ath10k *ar)
895 {
896         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
897         struct ath10k_pci_compl *compl;
898         struct sk_buff *skb;
899         int i;
900
901         ath10k_ce_disable_interrupts(ar);
902
903         /* Cancel the pending tasklet */
904         tasklet_kill(&ar_pci->intr_tq);
905
906         for (i = 0; i < CE_COUNT; i++)
907                 tasklet_kill(&ar_pci->pipe_info[i].intr);
908
909         /* Mark pending completions as aborted, so that upper layers free up
910          * their associated resources */
911         spin_lock_bh(&ar_pci->compl_lock);
912         list_for_each_entry(compl, &ar_pci->compl_process, list) {
913                 skb = compl->skb;
914                 ATH10K_SKB_CB(skb)->is_aborted = true;
915         }
916         spin_unlock_bh(&ar_pci->compl_lock);
917 }
918
919 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
920 {
921         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
922         struct ath10k_pci_compl *compl, *tmp;
923         struct ath10k_pci_pipe *pipe_info;
924         struct sk_buff *netbuf;
925         int pipe_num;
926
927         /* Free pending completions. */
928         spin_lock_bh(&ar_pci->compl_lock);
929         if (!list_empty(&ar_pci->compl_process))
930                 ath10k_warn("pending completions still present! possible memory leaks.\n");
931
932         list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
933                 list_del(&compl->list);
934                 netbuf = compl->skb;
935                 dev_kfree_skb_any(netbuf);
936                 kfree(compl);
937         }
938         spin_unlock_bh(&ar_pci->compl_lock);
939
940         /* Free unused completions for each pipe. */
941         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
942                 pipe_info = &ar_pci->pipe_info[pipe_num];
943
944                 spin_lock_bh(&pipe_info->pipe_lock);
945                 list_for_each_entry_safe(compl, tmp,
946                                          &pipe_info->compl_free, list) {
947                         list_del(&compl->list);
948                         kfree(compl);
949                 }
950                 spin_unlock_bh(&pipe_info->pipe_lock);
951         }
952 }
953
954 static void ath10k_pci_process_ce(struct ath10k *ar)
955 {
956         struct ath10k_pci *ar_pci = ar->hif.priv;
957         struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
958         struct ath10k_pci_compl *compl;
959         struct sk_buff *skb;
960         unsigned int nbytes;
961         int ret, send_done = 0;
962
963         /* Upper layers aren't ready to handle tx/rx completions in parallel so
964          * we must serialize all completion processing. */
965
966         spin_lock_bh(&ar_pci->compl_lock);
967         if (ar_pci->compl_processing) {
968                 spin_unlock_bh(&ar_pci->compl_lock);
969                 return;
970         }
971         ar_pci->compl_processing = true;
972         spin_unlock_bh(&ar_pci->compl_lock);
973
974         for (;;) {
975                 spin_lock_bh(&ar_pci->compl_lock);
976                 if (list_empty(&ar_pci->compl_process)) {
977                         spin_unlock_bh(&ar_pci->compl_lock);
978                         break;
979                 }
980                 compl = list_first_entry(&ar_pci->compl_process,
981                                          struct ath10k_pci_compl, list);
982                 list_del(&compl->list);
983                 spin_unlock_bh(&ar_pci->compl_lock);
984
985                 switch (compl->state) {
986                 case ATH10K_PCI_COMPL_SEND:
987                         cb->tx_completion(ar,
988                                           compl->skb,
989                                           compl->transfer_id);
990                         send_done = 1;
991                         break;
992                 case ATH10K_PCI_COMPL_RECV:
993                         ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
994                         if (ret) {
995                                 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
996                                             compl->pipe_info->pipe_num);
997                                 break;
998                         }
999
1000                         skb = compl->skb;
1001                         nbytes = compl->nbytes;
1002
1003                         ath10k_dbg(ATH10K_DBG_PCI,
1004                                    "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
1005                                    skb, nbytes);
1006                         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
1007                                         "ath10k rx: ", skb->data, nbytes);
1008
1009                         if (skb->len + skb_tailroom(skb) >= nbytes) {
1010                                 skb_trim(skb, 0);
1011                                 skb_put(skb, nbytes);
1012                                 cb->rx_completion(ar, skb,
1013                                                   compl->pipe_info->pipe_num);
1014                         } else {
1015                                 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1016                                             nbytes,
1017                                             skb->len + skb_tailroom(skb));
1018                         }
1019                         break;
1020                 case ATH10K_PCI_COMPL_FREE:
1021                         ath10k_warn("free completion cannot be processed\n");
1022                         break;
1023                 default:
1024                         ath10k_warn("invalid completion state (%d)\n",
1025                                     compl->state);
1026                         break;
1027                 }
1028
1029                 compl->state = ATH10K_PCI_COMPL_FREE;
1030
1031                 /*
1032                  * Add completion back to the pipe's free list.
1033                  */
1034                 spin_lock_bh(&compl->pipe_info->pipe_lock);
1035                 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
1036                 compl->pipe_info->num_sends_allowed += send_done;
1037                 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1038         }
1039
1040         spin_lock_bh(&ar_pci->compl_lock);
1041         ar_pci->compl_processing = false;
1042         spin_unlock_bh(&ar_pci->compl_lock);
1043 }
1044
1045 /* TODO - temporary mapping while we have too few CE's */
1046 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1047                                               u16 service_id, u8 *ul_pipe,
1048                                               u8 *dl_pipe, int *ul_is_polled,
1049                                               int *dl_is_polled)
1050 {
1051         int ret = 0;
1052
1053         /* polling for received messages not supported */
1054         *dl_is_polled = 0;
1055
1056         switch (service_id) {
1057         case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1058                 /*
1059                  * Host->target HTT gets its own pipe, so it can be polled
1060                  * while other pipes are interrupt driven.
1061                  */
1062                 *ul_pipe = 4;
1063                 /*
1064                  * Use the same target->host pipe for HTC ctrl, HTC raw
1065                  * streams, and HTT.
1066                  */
1067                 *dl_pipe = 1;
1068                 break;
1069
1070         case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1071         case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1072                 /*
1073                  * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1074                  * HTC_CTRL_RSVD_SVC could share the same pipe as the
1075                  * WMI services.  So, if another CE is needed, change
1076                  * this to *ul_pipe = 3, which frees up CE 0.
1077                  */
1078                 /* *ul_pipe = 3; */
1079                 *ul_pipe = 0;
1080                 *dl_pipe = 1;
1081                 break;
1082
1083         case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1084         case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1085         case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1086         case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1087
1088         case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1089                 *ul_pipe = 3;
1090                 *dl_pipe = 2;
1091                 break;
1092
1093                 /* pipe 5 unused   */
1094                 /* pipe 6 reserved */
1095                 /* pipe 7 reserved */
1096
1097         default:
1098                 ret = -1;
1099                 break;
1100         }
1101         *ul_is_polled =
1102                 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1103
1104         return ret;
1105 }
1106
1107 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1108                                                 u8 *ul_pipe, u8 *dl_pipe)
1109 {
1110         int ul_is_polled, dl_is_polled;
1111
1112         (void)ath10k_pci_hif_map_service_to_pipe(ar,
1113                                                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1114                                                  ul_pipe,
1115                                                  dl_pipe,
1116                                                  &ul_is_polled,
1117                                                  &dl_is_polled);
1118 }
1119
1120 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1121                                    int num)
1122 {
1123         struct ath10k *ar = pipe_info->hif_ce_state;
1124         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1125         struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1126         struct sk_buff *skb;
1127         dma_addr_t ce_data;
1128         int i, ret = 0;
1129
1130         if (pipe_info->buf_sz == 0)
1131                 return 0;
1132
1133         for (i = 0; i < num; i++) {
1134                 skb = dev_alloc_skb(pipe_info->buf_sz);
1135                 if (!skb) {
1136                         ath10k_warn("could not allocate skbuff for pipe %d\n",
1137                                     num);
1138                         ret = -ENOMEM;
1139                         goto err;
1140                 }
1141
1142                 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1143
1144                 ce_data = dma_map_single(ar->dev, skb->data,
1145                                          skb->len + skb_tailroom(skb),
1146                                          DMA_FROM_DEVICE);
1147
1148                 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1149                         ath10k_warn("could not dma map skbuff\n");
1150                         dev_kfree_skb_any(skb);
1151                         ret = -EIO;
1152                         goto err;
1153                 }
1154
1155                 ATH10K_SKB_CB(skb)->paddr = ce_data;
1156
1157                 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1158                                                pipe_info->buf_sz,
1159                                                PCI_DMA_FROMDEVICE);
1160
1161                 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1162                                                  ce_data);
1163                 if (ret) {
1164                         ath10k_warn("could not enqueue to pipe %d (%d)\n",
1165                                     num, ret);
1166                         goto err;
1167                 }
1168         }
1169
1170         return ret;
1171
1172 err:
1173         ath10k_pci_rx_pipe_cleanup(pipe_info);
1174         return ret;
1175 }
1176
1177 static int ath10k_pci_post_rx(struct ath10k *ar)
1178 {
1179         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1180         struct ath10k_pci_pipe *pipe_info;
1181         const struct ce_attr *attr;
1182         int pipe_num, ret = 0;
1183
1184         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1185                 pipe_info = &ar_pci->pipe_info[pipe_num];
1186                 attr = &host_ce_config_wlan[pipe_num];
1187
1188                 if (attr->dest_nentries == 0)
1189                         continue;
1190
1191                 ret = ath10k_pci_post_rx_pipe(pipe_info,
1192                                               attr->dest_nentries - 1);
1193                 if (ret) {
1194                         ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1195                                     pipe_num);
1196
1197                         for (; pipe_num >= 0; pipe_num--) {
1198                                 pipe_info = &ar_pci->pipe_info[pipe_num];
1199                                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1200                         }
1201                         return ret;
1202                 }
1203         }
1204
1205         return 0;
1206 }
1207
1208 static int ath10k_pci_hif_start(struct ath10k *ar)
1209 {
1210         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1211         int ret;
1212
1213         ret = ath10k_pci_start_ce(ar);
1214         if (ret) {
1215                 ath10k_warn("could not start CE (%d)\n", ret);
1216                 return ret;
1217         }
1218
1219         /* Post buffers once to start things off. */
1220         ret = ath10k_pci_post_rx(ar);
1221         if (ret) {
1222                 ath10k_warn("could not post rx pipes (%d)\n", ret);
1223                 return ret;
1224         }
1225
1226         ar_pci->started = 1;
1227         return 0;
1228 }
1229
1230 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1231 {
1232         struct ath10k *ar;
1233         struct ath10k_pci *ar_pci;
1234         struct ath10k_ce_pipe *ce_hdl;
1235         u32 buf_sz;
1236         struct sk_buff *netbuf;
1237         u32 ce_data;
1238
1239         buf_sz = pipe_info->buf_sz;
1240
1241         /* Unused Copy Engine */
1242         if (buf_sz == 0)
1243                 return;
1244
1245         ar = pipe_info->hif_ce_state;
1246         ar_pci = ath10k_pci_priv(ar);
1247
1248         if (!ar_pci->started)
1249                 return;
1250
1251         ce_hdl = pipe_info->ce_hdl;
1252
1253         while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1254                                           &ce_data) == 0) {
1255                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1256                                  netbuf->len + skb_tailroom(netbuf),
1257                                  DMA_FROM_DEVICE);
1258                 dev_kfree_skb_any(netbuf);
1259         }
1260 }
1261
1262 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1263 {
1264         struct ath10k *ar;
1265         struct ath10k_pci *ar_pci;
1266         struct ath10k_ce_pipe *ce_hdl;
1267         struct sk_buff *netbuf;
1268         u32 ce_data;
1269         unsigned int nbytes;
1270         unsigned int id;
1271         u32 buf_sz;
1272
1273         buf_sz = pipe_info->buf_sz;
1274
1275         /* Unused Copy Engine */
1276         if (buf_sz == 0)
1277                 return;
1278
1279         ar = pipe_info->hif_ce_state;
1280         ar_pci = ath10k_pci_priv(ar);
1281
1282         if (!ar_pci->started)
1283                 return;
1284
1285         ce_hdl = pipe_info->ce_hdl;
1286
1287         while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1288                                           &ce_data, &nbytes, &id) == 0) {
1289                 /*
1290                  * Indicate the completion to higer layer to free
1291                  * the buffer
1292                  */
1293                 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1294                 ar_pci->msg_callbacks_current.tx_completion(ar,
1295                                                             netbuf,
1296                                                             id);
1297         }
1298 }
1299
1300 /*
1301  * Cleanup residual buffers for device shutdown:
1302  *    buffers that were enqueued for receive
1303  *    buffers that were to be sent
1304  * Note: Buffers that had completed but which were
1305  * not yet processed are on a completion queue. They
1306  * are handled when the completion thread shuts down.
1307  */
1308 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1309 {
1310         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1311         int pipe_num;
1312
1313         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1314                 struct ath10k_pci_pipe *pipe_info;
1315
1316                 pipe_info = &ar_pci->pipe_info[pipe_num];
1317                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1318                 ath10k_pci_tx_pipe_cleanup(pipe_info);
1319         }
1320 }
1321
1322 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1323 {
1324         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1325         struct ath10k_pci_pipe *pipe_info;
1326         int pipe_num;
1327
1328         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1329                 pipe_info = &ar_pci->pipe_info[pipe_num];
1330                 if (pipe_info->ce_hdl) {
1331                         ath10k_ce_deinit(pipe_info->ce_hdl);
1332                         pipe_info->ce_hdl = NULL;
1333                         pipe_info->buf_sz = 0;
1334                 }
1335         }
1336 }
1337
1338 static void ath10k_pci_disable_irqs(struct ath10k *ar)
1339 {
1340         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1341         int i;
1342
1343         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1344                 disable_irq(ar_pci->pdev->irq + i);
1345 }
1346
1347 static void ath10k_pci_hif_stop(struct ath10k *ar)
1348 {
1349         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1350
1351         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1352
1353         /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
1354          * by ath10k_pci_start_intr(). */
1355         ath10k_pci_disable_irqs(ar);
1356
1357         ath10k_pci_stop_ce(ar);
1358
1359         /* At this point, asynchronous threads are stopped, the target should
1360          * not DMA nor interrupt. We process the leftovers and then free
1361          * everything else up. */
1362
1363         ath10k_pci_process_ce(ar);
1364         ath10k_pci_cleanup_ce(ar);
1365         ath10k_pci_buffer_cleanup(ar);
1366
1367         ar_pci->started = 0;
1368 }
1369
1370 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1371                                            void *req, u32 req_len,
1372                                            void *resp, u32 *resp_len)
1373 {
1374         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1375         struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1376         struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1377         struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1378         struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1379         dma_addr_t req_paddr = 0;
1380         dma_addr_t resp_paddr = 0;
1381         struct bmi_xfer xfer = {};
1382         void *treq, *tresp = NULL;
1383         int ret = 0;
1384
1385         if (resp && !resp_len)
1386                 return -EINVAL;
1387
1388         if (resp && resp_len && *resp_len == 0)
1389                 return -EINVAL;
1390
1391         treq = kmemdup(req, req_len, GFP_KERNEL);
1392         if (!treq)
1393                 return -ENOMEM;
1394
1395         req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1396         ret = dma_mapping_error(ar->dev, req_paddr);
1397         if (ret)
1398                 goto err_dma;
1399
1400         if (resp && resp_len) {
1401                 tresp = kzalloc(*resp_len, GFP_KERNEL);
1402                 if (!tresp) {
1403                         ret = -ENOMEM;
1404                         goto err_req;
1405                 }
1406
1407                 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1408                                             DMA_FROM_DEVICE);
1409                 ret = dma_mapping_error(ar->dev, resp_paddr);
1410                 if (ret)
1411                         goto err_req;
1412
1413                 xfer.wait_for_resp = true;
1414                 xfer.resp_len = 0;
1415
1416                 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1417         }
1418
1419         init_completion(&xfer.done);
1420
1421         ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1422         if (ret)
1423                 goto err_resp;
1424
1425         ret = wait_for_completion_timeout(&xfer.done,
1426                                           BMI_COMMUNICATION_TIMEOUT_HZ);
1427         if (ret <= 0) {
1428                 u32 unused_buffer;
1429                 unsigned int unused_nbytes;
1430                 unsigned int unused_id;
1431
1432                 ret = -ETIMEDOUT;
1433                 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1434                                            &unused_nbytes, &unused_id);
1435         } else {
1436                 /* non-zero means we did not time out */
1437                 ret = 0;
1438         }
1439
1440 err_resp:
1441         if (resp) {
1442                 u32 unused_buffer;
1443
1444                 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1445                 dma_unmap_single(ar->dev, resp_paddr,
1446                                  *resp_len, DMA_FROM_DEVICE);
1447         }
1448 err_req:
1449         dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1450
1451         if (ret == 0 && resp_len) {
1452                 *resp_len = min(*resp_len, xfer.resp_len);
1453                 memcpy(resp, tresp, xfer.resp_len);
1454         }
1455 err_dma:
1456         kfree(treq);
1457         kfree(tresp);
1458
1459         return ret;
1460 }
1461
1462 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1463 {
1464         struct bmi_xfer *xfer;
1465         u32 ce_data;
1466         unsigned int nbytes;
1467         unsigned int transfer_id;
1468
1469         if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1470                                           &nbytes, &transfer_id))
1471                 return;
1472
1473         if (xfer->wait_for_resp)
1474                 return;
1475
1476         complete(&xfer->done);
1477 }
1478
1479 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1480 {
1481         struct bmi_xfer *xfer;
1482         u32 ce_data;
1483         unsigned int nbytes;
1484         unsigned int transfer_id;
1485         unsigned int flags;
1486
1487         if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1488                                           &nbytes, &transfer_id, &flags))
1489                 return;
1490
1491         if (!xfer->wait_for_resp) {
1492                 ath10k_warn("unexpected: BMI data received; ignoring\n");
1493                 return;
1494         }
1495
1496         xfer->resp_len = nbytes;
1497         complete(&xfer->done);
1498 }
1499
1500 /*
1501  * Map from service/endpoint to Copy Engine.
1502  * This table is derived from the CE_PCI TABLE, above.
1503  * It is passed to the Target at startup for use by firmware.
1504  */
1505 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1506         {
1507                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1508                  PIPEDIR_OUT,           /* out = UL = host -> target */
1509                  3,
1510         },
1511         {
1512                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1513                  PIPEDIR_IN,            /* in = DL = target -> host */
1514                  2,
1515         },
1516         {
1517                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1518                  PIPEDIR_OUT,           /* out = UL = host -> target */
1519                  3,
1520         },
1521         {
1522                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1523                  PIPEDIR_IN,            /* in = DL = target -> host */
1524                  2,
1525         },
1526         {
1527                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1528                  PIPEDIR_OUT,           /* out = UL = host -> target */
1529                  3,
1530         },
1531         {
1532                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1533                  PIPEDIR_IN,            /* in = DL = target -> host */
1534                  2,
1535         },
1536         {
1537                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1538                  PIPEDIR_OUT,           /* out = UL = host -> target */
1539                  3,
1540         },
1541         {
1542                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1543                  PIPEDIR_IN,            /* in = DL = target -> host */
1544                  2,
1545         },
1546         {
1547                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1548                  PIPEDIR_OUT,           /* out = UL = host -> target */
1549                  3,
1550         },
1551         {
1552                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1553                  PIPEDIR_IN,            /* in = DL = target -> host */
1554                  2,
1555         },
1556         {
1557                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1558                  PIPEDIR_OUT,           /* out = UL = host -> target */
1559                  0,             /* could be moved to 3 (share with WMI) */
1560         },
1561         {
1562                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1563                  PIPEDIR_IN,            /* in = DL = target -> host */
1564                  1,
1565         },
1566         {
1567                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1568                  PIPEDIR_OUT,           /* out = UL = host -> target */
1569                  0,
1570         },
1571         {
1572                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1573                  PIPEDIR_IN,            /* in = DL = target -> host */
1574                  1,
1575         },
1576         {
1577                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1578                  PIPEDIR_OUT,           /* out = UL = host -> target */
1579                  4,
1580         },
1581         {
1582                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1583                  PIPEDIR_IN,            /* in = DL = target -> host */
1584                  1,
1585         },
1586
1587         /* (Additions here) */
1588
1589         {                               /* Must be last */
1590                  0,
1591                  0,
1592                  0,
1593         },
1594 };
1595
1596 /*
1597  * Send an interrupt to the device to wake up the Target CPU
1598  * so it has an opportunity to notice any changed state.
1599  */
1600 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1601 {
1602         int ret;
1603         u32 core_ctrl;
1604
1605         ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1606                                               CORE_CTRL_ADDRESS,
1607                                           &core_ctrl);
1608         if (ret) {
1609                 ath10k_warn("Unable to read core ctrl\n");
1610                 return ret;
1611         }
1612
1613         /* A_INUM_FIRMWARE interrupt to Target CPU */
1614         core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1615
1616         ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1617                                                CORE_CTRL_ADDRESS,
1618                                            core_ctrl);
1619         if (ret)
1620                 ath10k_warn("Unable to set interrupt mask\n");
1621
1622         return ret;
1623 }
1624
1625 static int ath10k_pci_init_config(struct ath10k *ar)
1626 {
1627         u32 interconnect_targ_addr;
1628         u32 pcie_state_targ_addr = 0;
1629         u32 pipe_cfg_targ_addr = 0;
1630         u32 svc_to_pipe_map = 0;
1631         u32 pcie_config_flags = 0;
1632         u32 ealloc_value;
1633         u32 ealloc_targ_addr;
1634         u32 flag2_value;
1635         u32 flag2_targ_addr;
1636         int ret = 0;
1637
1638         /* Download to Target the CE Config and the service-to-CE map */
1639         interconnect_targ_addr =
1640                 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1641
1642         /* Supply Target-side CE configuration */
1643         ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1644                                           &pcie_state_targ_addr);
1645         if (ret != 0) {
1646                 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1647                 return ret;
1648         }
1649
1650         if (pcie_state_targ_addr == 0) {
1651                 ret = -EIO;
1652                 ath10k_err("Invalid pcie state addr\n");
1653                 return ret;
1654         }
1655
1656         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1657                                           offsetof(struct pcie_state,
1658                                                    pipe_cfg_addr),
1659                                           &pipe_cfg_targ_addr);
1660         if (ret != 0) {
1661                 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1662                 return ret;
1663         }
1664
1665         if (pipe_cfg_targ_addr == 0) {
1666                 ret = -EIO;
1667                 ath10k_err("Invalid pipe cfg addr\n");
1668                 return ret;
1669         }
1670
1671         ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1672                                  target_ce_config_wlan,
1673                                  sizeof(target_ce_config_wlan));
1674
1675         if (ret != 0) {
1676                 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1677                 return ret;
1678         }
1679
1680         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1681                                           offsetof(struct pcie_state,
1682                                                    svc_to_pipe_map),
1683                                           &svc_to_pipe_map);
1684         if (ret != 0) {
1685                 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1686                 return ret;
1687         }
1688
1689         if (svc_to_pipe_map == 0) {
1690                 ret = -EIO;
1691                 ath10k_err("Invalid svc_to_pipe map\n");
1692                 return ret;
1693         }
1694
1695         ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1696                                  target_service_to_ce_map_wlan,
1697                                  sizeof(target_service_to_ce_map_wlan));
1698         if (ret != 0) {
1699                 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1700                 return ret;
1701         }
1702
1703         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1704                                           offsetof(struct pcie_state,
1705                                                    config_flags),
1706                                           &pcie_config_flags);
1707         if (ret != 0) {
1708                 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1709                 return ret;
1710         }
1711
1712         pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1713
1714         ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1715                                  offsetof(struct pcie_state, config_flags),
1716                                  &pcie_config_flags,
1717                                  sizeof(pcie_config_flags));
1718         if (ret != 0) {
1719                 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1720                 return ret;
1721         }
1722
1723         /* configure early allocation */
1724         ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1725
1726         ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1727         if (ret != 0) {
1728                 ath10k_err("Faile to get early alloc val: %d\n", ret);
1729                 return ret;
1730         }
1731
1732         /* first bank is switched to IRAM */
1733         ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1734                          HI_EARLY_ALLOC_MAGIC_MASK);
1735         ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1736                          HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1737
1738         ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1739         if (ret != 0) {
1740                 ath10k_err("Failed to set early alloc val: %d\n", ret);
1741                 return ret;
1742         }
1743
1744         /* Tell Target to proceed with initialization */
1745         flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1746
1747         ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1748         if (ret != 0) {
1749                 ath10k_err("Failed to get option val: %d\n", ret);
1750                 return ret;
1751         }
1752
1753         flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1754
1755         ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1756         if (ret != 0) {
1757                 ath10k_err("Failed to set option val: %d\n", ret);
1758                 return ret;
1759         }
1760
1761         return 0;
1762 }
1763
1764
1765
1766 static int ath10k_pci_ce_init(struct ath10k *ar)
1767 {
1768         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1769         struct ath10k_pci_pipe *pipe_info;
1770         const struct ce_attr *attr;
1771         int pipe_num;
1772
1773         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1774                 pipe_info = &ar_pci->pipe_info[pipe_num];
1775                 pipe_info->pipe_num = pipe_num;
1776                 pipe_info->hif_ce_state = ar;
1777                 attr = &host_ce_config_wlan[pipe_num];
1778
1779                 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1780                 if (pipe_info->ce_hdl == NULL) {
1781                         ath10k_err("Unable to initialize CE for pipe: %d\n",
1782                                    pipe_num);
1783
1784                         /* It is safe to call it here. It checks if ce_hdl is
1785                          * valid for each pipe */
1786                         ath10k_pci_ce_deinit(ar);
1787                         return -1;
1788                 }
1789
1790                 if (pipe_num == ar_pci->ce_count - 1) {
1791                         /*
1792                          * Reserve the ultimate CE for
1793                          * diagnostic Window support
1794                          */
1795                         ar_pci->ce_diag =
1796                         ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1797                         continue;
1798                 }
1799
1800                 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1801         }
1802
1803         /*
1804          * Initially, establish CE completion handlers for use with BMI.
1805          * These are overwritten with generic handlers after we exit BMI phase.
1806          */
1807         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1808         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1809                                    ath10k_pci_bmi_send_done, 0);
1810
1811         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1812         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1813                                    ath10k_pci_bmi_recv_data);
1814
1815         return 0;
1816 }
1817
1818 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1819 {
1820         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1821         u32 fw_indicator_address, fw_indicator;
1822
1823         ath10k_pci_wake(ar);
1824
1825         fw_indicator_address = ar_pci->fw_indicator_address;
1826         fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1827
1828         if (fw_indicator & FW_IND_EVENT_PENDING) {
1829                 /* ACK: clear Target-side pending event */
1830                 ath10k_pci_write32(ar, fw_indicator_address,
1831                                    fw_indicator & ~FW_IND_EVENT_PENDING);
1832
1833                 if (ar_pci->started) {
1834                         ath10k_pci_hif_dump_area(ar);
1835                 } else {
1836                         /*
1837                          * Probable Target failure before we're prepared
1838                          * to handle it.  Generally unexpected.
1839                          */
1840                         ath10k_warn("early firmware event indicated\n");
1841                 }
1842         }
1843
1844         ath10k_pci_sleep(ar);
1845 }
1846
1847 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1848 {
1849         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1850         int ret;
1851
1852         ret = ath10k_pci_start_intr(ar);
1853         if (ret) {
1854                 ath10k_err("could not start interrupt handling (%d)\n", ret);
1855                 goto err;
1856         }
1857
1858         /*
1859          * Bring the target up cleanly.
1860          *
1861          * The target may be in an undefined state with an AUX-powered Target
1862          * and a Host in WoW mode. If the Host crashes, loses power, or is
1863          * restarted (without unloading the driver) then the Target is left
1864          * (aux) powered and running. On a subsequent driver load, the Target
1865          * is in an unexpected state. We try to catch that here in order to
1866          * reset the Target and retry the probe.
1867          */
1868         ath10k_pci_device_reset(ar);
1869
1870         ret = ath10k_pci_reset_target(ar);
1871         if (ret)
1872                 goto err_irq;
1873
1874         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1875                 /* Force AWAKE forever */
1876                 ath10k_do_pci_wake(ar);
1877
1878         ret = ath10k_pci_ce_init(ar);
1879         if (ret)
1880                 goto err_ps;
1881
1882         ret = ath10k_pci_init_config(ar);
1883         if (ret)
1884                 goto err_ce;
1885
1886         ret = ath10k_pci_wake_target_cpu(ar);
1887         if (ret) {
1888                 ath10k_err("could not wake up target CPU (%d)\n", ret);
1889                 goto err_ce;
1890         }
1891
1892         return 0;
1893
1894 err_ce:
1895         ath10k_pci_ce_deinit(ar);
1896 err_ps:
1897         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1898                 ath10k_do_pci_sleep(ar);
1899 err_irq:
1900         ath10k_pci_stop_intr(ar);
1901 err:
1902         return ret;
1903 }
1904
1905 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1906 {
1907         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1908
1909         ath10k_pci_stop_intr(ar);
1910
1911         ath10k_pci_ce_deinit(ar);
1912         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1913                 ath10k_do_pci_sleep(ar);
1914 }
1915
1916 #ifdef CONFIG_PM
1917
1918 #define ATH10K_PCI_PM_CONTROL 0x44
1919
1920 static int ath10k_pci_hif_suspend(struct ath10k *ar)
1921 {
1922         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1923         struct pci_dev *pdev = ar_pci->pdev;
1924         u32 val;
1925
1926         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1927
1928         if ((val & 0x000000ff) != 0x3) {
1929                 pci_save_state(pdev);
1930                 pci_disable_device(pdev);
1931                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1932                                        (val & 0xffffff00) | 0x03);
1933         }
1934
1935         return 0;
1936 }
1937
1938 static int ath10k_pci_hif_resume(struct ath10k *ar)
1939 {
1940         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1941         struct pci_dev *pdev = ar_pci->pdev;
1942         u32 val;
1943
1944         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1945
1946         if ((val & 0x000000ff) != 0) {
1947                 pci_restore_state(pdev);
1948                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1949                                        val & 0xffffff00);
1950                 /*
1951                  * Suspend/Resume resets the PCI configuration space,
1952                  * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1953                  * to keep PCI Tx retries from interfering with C3 CPU state
1954                  */
1955                 pci_read_config_dword(pdev, 0x40, &val);
1956
1957                 if ((val & 0x0000ff00) != 0)
1958                         pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1959         }
1960
1961         return 0;
1962 }
1963 #endif
1964
1965 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1966         .send_head              = ath10k_pci_hif_send_head,
1967         .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
1968         .start                  = ath10k_pci_hif_start,
1969         .stop                   = ath10k_pci_hif_stop,
1970         .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
1971         .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
1972         .send_complete_check    = ath10k_pci_hif_send_complete_check,
1973         .set_callbacks          = ath10k_pci_hif_set_callbacks,
1974         .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
1975         .power_up               = ath10k_pci_hif_power_up,
1976         .power_down             = ath10k_pci_hif_power_down,
1977 #ifdef CONFIG_PM
1978         .suspend                = ath10k_pci_hif_suspend,
1979         .resume                 = ath10k_pci_hif_resume,
1980 #endif
1981 };
1982
1983 static void ath10k_pci_ce_tasklet(unsigned long ptr)
1984 {
1985         struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
1986         struct ath10k_pci *ar_pci = pipe->ar_pci;
1987
1988         ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
1989 }
1990
1991 static void ath10k_msi_err_tasklet(unsigned long data)
1992 {
1993         struct ath10k *ar = (struct ath10k *)data;
1994
1995         ath10k_pci_fw_interrupt_handler(ar);
1996 }
1997
1998 /*
1999  * Handler for a per-engine interrupt on a PARTICULAR CE.
2000  * This is used in cases where each CE has a private MSI interrupt.
2001  */
2002 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2003 {
2004         struct ath10k *ar = arg;
2005         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2006         int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2007
2008         if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2009                 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2010                 return IRQ_HANDLED;
2011         }
2012
2013         /*
2014          * NOTE: We are able to derive ce_id from irq because we
2015          * use a one-to-one mapping for CE's 0..5.
2016          * CE's 6 & 7 do not use interrupts at all.
2017          *
2018          * This mapping must be kept in sync with the mapping
2019          * used by firmware.
2020          */
2021         tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2022         return IRQ_HANDLED;
2023 }
2024
2025 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2026 {
2027         struct ath10k *ar = arg;
2028         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2029
2030         tasklet_schedule(&ar_pci->msi_fw_err);
2031         return IRQ_HANDLED;
2032 }
2033
2034 /*
2035  * Top-level interrupt handler for all PCI interrupts from a Target.
2036  * When a block of MSI interrupts is allocated, this top-level handler
2037  * is not used; instead, we directly call the correct sub-handler.
2038  */
2039 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2040 {
2041         struct ath10k *ar = arg;
2042         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2043
2044         if (ar_pci->num_msi_intrs == 0) {
2045                 /*
2046                  * IMPORTANT: INTR_CLR regiser has to be set after
2047                  * INTR_ENABLE is set to 0, otherwise interrupt can not be
2048                  * really cleared.
2049                  */
2050                 iowrite32(0, ar_pci->mem +
2051                           (SOC_CORE_BASE_ADDRESS |
2052                            PCIE_INTR_ENABLE_ADDRESS));
2053                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2054                           PCIE_INTR_CE_MASK_ALL,
2055                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2056                                          PCIE_INTR_CLR_ADDRESS));
2057                 /*
2058                  * IMPORTANT: this extra read transaction is required to
2059                  * flush the posted write buffer.
2060                  */
2061                 (void) ioread32(ar_pci->mem +
2062                                 (SOC_CORE_BASE_ADDRESS |
2063                                  PCIE_INTR_ENABLE_ADDRESS));
2064         }
2065
2066         tasklet_schedule(&ar_pci->intr_tq);
2067
2068         return IRQ_HANDLED;
2069 }
2070
2071 static void ath10k_pci_tasklet(unsigned long data)
2072 {
2073         struct ath10k *ar = (struct ath10k *)data;
2074         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2075
2076         ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2077         ath10k_ce_per_engine_service_any(ar);
2078
2079         if (ar_pci->num_msi_intrs == 0) {
2080                 /* Enable Legacy PCI line interrupts */
2081                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2082                           PCIE_INTR_CE_MASK_ALL,
2083                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2084                                          PCIE_INTR_ENABLE_ADDRESS));
2085                 /*
2086                  * IMPORTANT: this extra read transaction is required to
2087                  * flush the posted write buffer
2088                  */
2089                 (void) ioread32(ar_pci->mem +
2090                                 (SOC_CORE_BASE_ADDRESS |
2091                                  PCIE_INTR_ENABLE_ADDRESS));
2092         }
2093 }
2094
2095 static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
2096 {
2097         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2098         int ret;
2099         int i;
2100
2101         ret = pci_enable_msi_block(ar_pci->pdev, num);
2102         if (ret)
2103                 return ret;
2104
2105         ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2106                           ath10k_pci_msi_fw_handler,
2107                           IRQF_SHARED, "ath10k_pci", ar);
2108         if (ret) {
2109                 ath10k_warn("request_irq(%d) failed %d\n",
2110                             ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2111
2112                 pci_disable_msi(ar_pci->pdev);
2113                 return ret;
2114         }
2115
2116         for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2117                 ret = request_irq(ar_pci->pdev->irq + i,
2118                                   ath10k_pci_per_engine_handler,
2119                                   IRQF_SHARED, "ath10k_pci", ar);
2120                 if (ret) {
2121                         ath10k_warn("request_irq(%d) failed %d\n",
2122                                     ar_pci->pdev->irq + i, ret);
2123
2124                         for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2125                                 free_irq(ar_pci->pdev->irq + i, ar);
2126
2127                         free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2128                         pci_disable_msi(ar_pci->pdev);
2129                         return ret;
2130                 }
2131         }
2132
2133         ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
2134         return 0;
2135 }
2136
2137 static int ath10k_pci_start_intr_msi(struct ath10k *ar)
2138 {
2139         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2140         int ret;
2141
2142         ret = pci_enable_msi(ar_pci->pdev);
2143         if (ret < 0)
2144                 return ret;
2145
2146         ret = request_irq(ar_pci->pdev->irq,
2147                           ath10k_pci_interrupt_handler,
2148                           IRQF_SHARED, "ath10k_pci", ar);
2149         if (ret < 0) {
2150                 pci_disable_msi(ar_pci->pdev);
2151                 return ret;
2152         }
2153
2154         ath10k_info("MSI interrupt handling\n");
2155         return 0;
2156 }
2157
2158 static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
2159 {
2160         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2161         int ret;
2162
2163         ret = request_irq(ar_pci->pdev->irq,
2164                           ath10k_pci_interrupt_handler,
2165                           IRQF_SHARED, "ath10k_pci", ar);
2166         if (ret < 0)
2167                 return ret;
2168
2169         /*
2170          * Make sure to wake the Target before enabling Legacy
2171          * Interrupt.
2172          */
2173         iowrite32(PCIE_SOC_WAKE_V_MASK,
2174                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2175                   PCIE_SOC_WAKE_ADDRESS);
2176
2177         ath10k_pci_wait(ar);
2178
2179         /*
2180          * A potential race occurs here: The CORE_BASE write
2181          * depends on target correctly decoding AXI address but
2182          * host won't know when target writes BAR to CORE_CTRL.
2183          * This write might get lost if target has NOT written BAR.
2184          * For now, fix the race by repeating the write in below
2185          * synchronization checking.
2186          */
2187         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2188                   PCIE_INTR_CE_MASK_ALL,
2189                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2190                                  PCIE_INTR_ENABLE_ADDRESS));
2191         iowrite32(PCIE_SOC_WAKE_RESET,
2192                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2193                   PCIE_SOC_WAKE_ADDRESS);
2194
2195         ath10k_info("legacy interrupt handling\n");
2196         return 0;
2197 }
2198
2199 static int ath10k_pci_start_intr(struct ath10k *ar)
2200 {
2201         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2202         int num = MSI_NUM_REQUEST;
2203         int ret;
2204         int i;
2205
2206         tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
2207         tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2208                      (unsigned long) ar);
2209
2210         for (i = 0; i < CE_COUNT; i++) {
2211                 ar_pci->pipe_info[i].ar_pci = ar_pci;
2212                 tasklet_init(&ar_pci->pipe_info[i].intr,
2213                              ath10k_pci_ce_tasklet,
2214                              (unsigned long)&ar_pci->pipe_info[i]);
2215         }
2216
2217         if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2218                 num = 1;
2219
2220         if (num > 1) {
2221                 ret = ath10k_pci_start_intr_msix(ar, num);
2222                 if (ret == 0)
2223                         goto exit;
2224
2225                 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
2226                 num = 1;
2227         }
2228
2229         if (num == 1) {
2230                 ret = ath10k_pci_start_intr_msi(ar);
2231                 if (ret == 0)
2232                         goto exit;
2233
2234                 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
2235                             ret);
2236                 num = 0;
2237         }
2238
2239         ret = ath10k_pci_start_intr_legacy(ar);
2240
2241 exit:
2242         ar_pci->num_msi_intrs = num;
2243         ar_pci->ce_count = CE_COUNT;
2244         return ret;
2245 }
2246
2247 static void ath10k_pci_stop_intr(struct ath10k *ar)
2248 {
2249         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2250         int i;
2251
2252         /* There's at least one interrupt irregardless whether its legacy INTR
2253          * or MSI or MSI-X */
2254         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2255                 free_irq(ar_pci->pdev->irq + i, ar);
2256
2257         if (ar_pci->num_msi_intrs > 0)
2258                 pci_disable_msi(ar_pci->pdev);
2259 }
2260
2261 static int ath10k_pci_reset_target(struct ath10k *ar)
2262 {
2263         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2264         int wait_limit = 300; /* 3 sec */
2265
2266         /* Wait for Target to finish initialization before we proceed. */
2267         iowrite32(PCIE_SOC_WAKE_V_MASK,
2268                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2269                   PCIE_SOC_WAKE_ADDRESS);
2270
2271         ath10k_pci_wait(ar);
2272
2273         while (wait_limit-- &&
2274                !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2275                  FW_IND_INITIALIZED)) {
2276                 if (ar_pci->num_msi_intrs == 0)
2277                         /* Fix potential race by repeating CORE_BASE writes */
2278                         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2279                                   PCIE_INTR_CE_MASK_ALL,
2280                                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2281                                                  PCIE_INTR_ENABLE_ADDRESS));
2282                 mdelay(10);
2283         }
2284
2285         if (wait_limit < 0) {
2286                 ath10k_err("Target stalled\n");
2287                 iowrite32(PCIE_SOC_WAKE_RESET,
2288                           ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2289                           PCIE_SOC_WAKE_ADDRESS);
2290                 return -EIO;
2291         }
2292
2293         iowrite32(PCIE_SOC_WAKE_RESET,
2294                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2295                   PCIE_SOC_WAKE_ADDRESS);
2296
2297         return 0;
2298 }
2299
2300 static void ath10k_pci_device_reset(struct ath10k *ar)
2301 {
2302         int i;
2303         u32 val;
2304
2305         if (!SOC_GLOBAL_RESET_ADDRESS)
2306                 return;
2307
2308         ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
2309                                PCIE_SOC_WAKE_V_MASK);
2310         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2311                 if (ath10k_pci_target_is_awake(ar))
2312                         break;
2313                 msleep(1);
2314         }
2315
2316         /* Put Target, including PCIe, into RESET. */
2317         val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2318         val |= 1;
2319         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2320
2321         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2322                 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2323                                           RTC_STATE_COLD_RESET_MASK)
2324                         break;
2325                 msleep(1);
2326         }
2327
2328         /* Pull Target, including PCIe, out of RESET. */
2329         val &= ~1;
2330         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2331
2332         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2333                 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2334                                             RTC_STATE_COLD_RESET_MASK))
2335                         break;
2336                 msleep(1);
2337         }
2338
2339         ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2340 }
2341
2342 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2343 {
2344         int i;
2345
2346         for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2347                 if (!test_bit(i, ar_pci->features))
2348                         continue;
2349
2350                 switch (i) {
2351                 case ATH10K_PCI_FEATURE_MSI_X:
2352                         ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2353                         break;
2354                 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2355                         ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2356                         break;
2357                 }
2358         }
2359 }
2360
2361 static int ath10k_pci_probe(struct pci_dev *pdev,
2362                             const struct pci_device_id *pci_dev)
2363 {
2364         void __iomem *mem;
2365         int ret = 0;
2366         struct ath10k *ar;
2367         struct ath10k_pci *ar_pci;
2368         u32 lcr_val, chip_id;
2369
2370         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2371
2372         ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2373         if (ar_pci == NULL)
2374                 return -ENOMEM;
2375
2376         ar_pci->pdev = pdev;
2377         ar_pci->dev = &pdev->dev;
2378
2379         switch (pci_dev->device) {
2380         case QCA988X_2_0_DEVICE_ID:
2381                 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2382                 break;
2383         default:
2384                 ret = -ENODEV;
2385                 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2386                 goto err_ar_pci;
2387         }
2388
2389         if (ath10k_target_ps)
2390                 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2391
2392         ath10k_pci_dump_features(ar_pci);
2393
2394         ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2395         if (!ar) {
2396                 ath10k_err("ath10k_core_create failed!\n");
2397                 ret = -EINVAL;
2398                 goto err_ar_pci;
2399         }
2400
2401         ar_pci->ar = ar;
2402         ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2403         atomic_set(&ar_pci->keep_awake_count, 0);
2404
2405         pci_set_drvdata(pdev, ar);
2406
2407         /*
2408          * Without any knowledge of the Host, the Target may have been reset or
2409          * power cycled and its Config Space may no longer reflect the PCI
2410          * address space that was assigned earlier by the PCI infrastructure.
2411          * Refresh it now.
2412          */
2413         ret = pci_assign_resource(pdev, BAR_NUM);
2414         if (ret) {
2415                 ath10k_err("cannot assign PCI space: %d\n", ret);
2416                 goto err_ar;
2417         }
2418
2419         ret = pci_enable_device(pdev);
2420         if (ret) {
2421                 ath10k_err("cannot enable PCI device: %d\n", ret);
2422                 goto err_ar;
2423         }
2424
2425         /* Request MMIO resources */
2426         ret = pci_request_region(pdev, BAR_NUM, "ath");
2427         if (ret) {
2428                 ath10k_err("PCI MMIO reservation error: %d\n", ret);
2429                 goto err_device;
2430         }
2431
2432         /*
2433          * Target structures have a limit of 32 bit DMA pointers.
2434          * DMA pointers can be wider than 32 bits by default on some systems.
2435          */
2436         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2437         if (ret) {
2438                 ath10k_err("32-bit DMA not available: %d\n", ret);
2439                 goto err_region;
2440         }
2441
2442         ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2443         if (ret) {
2444                 ath10k_err("cannot enable 32-bit consistent DMA\n");
2445                 goto err_region;
2446         }
2447
2448         /* Set bus master bit in PCI_COMMAND to enable DMA */
2449         pci_set_master(pdev);
2450
2451         /*
2452          * Temporary FIX: disable ASPM
2453          * Will be removed after the OTP is programmed
2454          */
2455         pci_read_config_dword(pdev, 0x80, &lcr_val);
2456         pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2457
2458         /* Arrange for access to Target SoC registers. */
2459         mem = pci_iomap(pdev, BAR_NUM, 0);
2460         if (!mem) {
2461                 ath10k_err("PCI iomap error\n");
2462                 ret = -EIO;
2463                 goto err_master;
2464         }
2465
2466         ar_pci->mem = mem;
2467
2468         spin_lock_init(&ar_pci->ce_lock);
2469
2470         ret = ath10k_do_pci_wake(ar);
2471         if (ret) {
2472                 ath10k_err("Failed to get chip id: %d\n", ret);
2473                 return ret;
2474         }
2475
2476         chip_id = ath10k_pci_read32(ar,
2477                                     RTC_SOC_BASE_ADDRESS + SOC_CHIP_ID_ADDRESS);
2478
2479         ath10k_do_pci_sleep(ar);
2480
2481         ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2482
2483         ret = ath10k_core_register(ar, chip_id);
2484         if (ret) {
2485                 ath10k_err("could not register driver core (%d)\n", ret);
2486                 goto err_iomap;
2487         }
2488
2489         return 0;
2490
2491 err_iomap:
2492         pci_iounmap(pdev, mem);
2493 err_master:
2494         pci_clear_master(pdev);
2495 err_region:
2496         pci_release_region(pdev, BAR_NUM);
2497 err_device:
2498         pci_disable_device(pdev);
2499 err_ar:
2500         ath10k_core_destroy(ar);
2501 err_ar_pci:
2502         /* call HIF PCI free here */
2503         kfree(ar_pci);
2504
2505         return ret;
2506 }
2507
2508 static void ath10k_pci_remove(struct pci_dev *pdev)
2509 {
2510         struct ath10k *ar = pci_get_drvdata(pdev);
2511         struct ath10k_pci *ar_pci;
2512
2513         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2514
2515         if (!ar)
2516                 return;
2517
2518         ar_pci = ath10k_pci_priv(ar);
2519
2520         if (!ar_pci)
2521                 return;
2522
2523         tasklet_kill(&ar_pci->msi_fw_err);
2524
2525         ath10k_core_unregister(ar);
2526
2527         pci_iounmap(pdev, ar_pci->mem);
2528         pci_release_region(pdev, BAR_NUM);
2529         pci_clear_master(pdev);
2530         pci_disable_device(pdev);
2531
2532         ath10k_core_destroy(ar);
2533         kfree(ar_pci);
2534 }
2535
2536 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2537
2538 static struct pci_driver ath10k_pci_driver = {
2539         .name = "ath10k_pci",
2540         .id_table = ath10k_pci_id_table,
2541         .probe = ath10k_pci_probe,
2542         .remove = ath10k_pci_remove,
2543 };
2544
2545 static int __init ath10k_pci_init(void)
2546 {
2547         int ret;
2548
2549         ret = pci_register_driver(&ath10k_pci_driver);
2550         if (ret)
2551                 ath10k_err("pci_register_driver failed [%d]\n", ret);
2552
2553         return ret;
2554 }
2555 module_init(ath10k_pci_init);
2556
2557 static void __exit ath10k_pci_exit(void)
2558 {
2559         pci_unregister_driver(&ath10k_pci_driver);
2560 }
2561
2562 module_exit(ath10k_pci_exit);
2563
2564 MODULE_AUTHOR("Qualcomm Atheros");
2565 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2566 MODULE_LICENSE("Dual BSD/GPL");
2567 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2568 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2569 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);