2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 * Support for Copy Engine hardware, which is mainly used for
25 * communication between Host and Target over a PCIe interconnect.
29 * A single CopyEngine (CE) comprises two "rings":
33 * Each ring consists of a number of descriptors which specify
34 * an address, length, and meta-data.
36 * Typically, one side of the PCIe interconnect (Host or Target)
37 * controls one ring and the other side controls the other ring.
38 * The source side chooses when to initiate a transfer and it
39 * chooses what to send (buffer address, length). The destination
40 * side keeps a supply of "anonymous receive buffers" available and
41 * it handles incoming data as it arrives (when the destination
42 * recieves an interrupt).
44 * The sender may send a simple buffer (address/length) or it may
45 * send a small list of buffers. When a small list is sent, hardware
46 * "gathers" these and they end up in a single destination buffer
47 * with a single interrupt.
49 * There are several "contexts" managed by this layer -- more, it
50 * may seem -- than should be needed. These are provided mainly for
51 * maximum flexibility and especially to facilitate a simpler HIF
52 * implementation. There are per-CopyEngine recv, send, and watermark
53 * contexts. These are supplied by the caller when a recv, send,
54 * or watermark handler is established and they are echoed back to
55 * the caller when the respective callbacks are invoked. There is
56 * also a per-transfer context supplied by the caller when a buffer
57 * (or sendlist) is sent and when a buffer is enqueued for recv.
58 * These per-transfer contexts are echoed back to the caller when
59 * the buffer is sent/received.
62 static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
66 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
69 static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
72 return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
75 static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
79 ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
82 static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
85 return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
88 static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
91 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
94 static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
98 ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
101 static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
105 ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
108 static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
112 u32 ctrl1_addr = ath10k_pci_read32((ar),
113 (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
115 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
116 (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) |
117 CE_CTRL1_DMAX_LENGTH_SET(n));
120 static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
124 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
126 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
127 (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
128 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
131 static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
135 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
137 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
138 (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
139 CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
142 static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
145 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
148 static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
152 ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
155 static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
159 ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
162 static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
166 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
168 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
169 (addr & ~SRC_WATERMARK_HIGH_MASK) |
170 SRC_WATERMARK_HIGH_SET(n));
173 static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
177 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
179 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
180 (addr & ~SRC_WATERMARK_LOW_MASK) |
181 SRC_WATERMARK_LOW_SET(n));
184 static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
188 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
190 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
191 (addr & ~DST_WATERMARK_HIGH_MASK) |
192 DST_WATERMARK_HIGH_SET(n));
195 static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
199 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
201 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
202 (addr & ~DST_WATERMARK_LOW_MASK) |
203 DST_WATERMARK_LOW_SET(n));
206 static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
209 u32 host_ie_addr = ath10k_pci_read32(ar,
210 ce_ctrl_addr + HOST_IE_ADDRESS);
212 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
213 host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
216 static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
219 u32 host_ie_addr = ath10k_pci_read32(ar,
220 ce_ctrl_addr + HOST_IE_ADDRESS);
222 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
223 host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
226 static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
229 u32 host_ie_addr = ath10k_pci_read32(ar,
230 ce_ctrl_addr + HOST_IE_ADDRESS);
232 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
233 host_ie_addr & ~CE_WATERMARK_MASK);
236 static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
239 u32 misc_ie_addr = ath10k_pci_read32(ar,
240 ce_ctrl_addr + MISC_IE_ADDRESS);
242 ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
243 misc_ie_addr | CE_ERROR_MASK);
246 static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
250 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
255 * Guts of ath10k_ce_send, used by both ath10k_ce_send and
256 * ath10k_ce_sendlist_send.
257 * The caller takes responsibility for any needed locking.
259 static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
260 void *per_transfer_context,
263 unsigned int transfer_id,
266 struct ath10k *ar = ce_state->ar;
267 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
268 struct ce_desc *desc, *sdesc;
269 unsigned int nentries_mask = src_ring->nentries_mask;
270 unsigned int sw_index = src_ring->sw_index;
271 unsigned int write_index = src_ring->write_index;
272 u32 ctrl_addr = ce_state->ctrl_addr;
276 if (nbytes > ce_state->src_sz_max)
277 ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
278 __func__, nbytes, ce_state->src_sz_max);
280 ret = ath10k_pci_wake(ar);
284 if (unlikely(CE_RING_DELTA(nentries_mask,
285 write_index, sw_index - 1) <= 0)) {
290 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
292 sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index);
294 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
296 if (flags & CE_SEND_FLAG_GATHER)
297 desc_flags |= CE_DESC_FLAGS_GATHER;
298 if (flags & CE_SEND_FLAG_BYTE_SWAP)
299 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
301 sdesc->addr = __cpu_to_le32(buffer);
302 sdesc->nbytes = __cpu_to_le16(nbytes);
303 sdesc->flags = __cpu_to_le16(desc_flags);
307 src_ring->per_transfer_context[write_index] = per_transfer_context;
309 /* Update Source Ring Write Index */
310 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
313 if (!(flags & CE_SEND_FLAG_GATHER))
314 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
316 src_ring->write_index = write_index;
318 ath10k_pci_sleep(ar);
322 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
323 void *per_transfer_context,
326 unsigned int transfer_id,
329 struct ath10k *ar = ce_state->ar;
330 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
333 spin_lock_bh(&ar_pci->ce_lock);
334 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
335 buffer, nbytes, transfer_id, flags);
336 spin_unlock_bh(&ar_pci->ce_lock);
341 int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
342 void *per_transfer_context,
343 unsigned int transfer_id,
344 u32 paddr, unsigned int nbytes,
347 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
348 struct ath10k *ar = ce_state->ar;
349 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
350 unsigned int nentries_mask = src_ring->nentries_mask;
351 unsigned int sw_index;
352 unsigned int write_index;
353 int delta, ret = -ENOMEM;
355 spin_lock_bh(&ar_pci->ce_lock);
357 sw_index = src_ring->sw_index;
358 write_index = src_ring->write_index;
360 delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
363 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
367 ath10k_warn("CE send failed: %d\n", ret);
370 spin_unlock_bh(&ar_pci->ce_lock);
375 int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
376 void *per_recv_context,
379 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
380 u32 ctrl_addr = ce_state->ctrl_addr;
381 struct ath10k *ar = ce_state->ar;
382 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
383 unsigned int nentries_mask = dest_ring->nentries_mask;
384 unsigned int write_index;
385 unsigned int sw_index;
388 spin_lock_bh(&ar_pci->ce_lock);
389 write_index = dest_ring->write_index;
390 sw_index = dest_ring->sw_index;
392 ret = ath10k_pci_wake(ar);
396 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
397 struct ce_desc *base = dest_ring->base_addr_owner_space;
398 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
400 /* Update destination descriptor */
401 desc->addr = __cpu_to_le32(buffer);
404 dest_ring->per_transfer_context[write_index] =
407 /* Update Destination Ring Write Index */
408 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
409 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
410 dest_ring->write_index = write_index;
415 ath10k_pci_sleep(ar);
418 spin_unlock_bh(&ar_pci->ce_lock);
424 * Guts of ath10k_ce_completed_recv_next.
425 * The caller takes responsibility for any necessary locking.
427 static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
428 void **per_transfer_contextp,
430 unsigned int *nbytesp,
431 unsigned int *transfer_idp,
432 unsigned int *flagsp)
434 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
435 unsigned int nentries_mask = dest_ring->nentries_mask;
436 unsigned int sw_index = dest_ring->sw_index;
438 struct ce_desc *base = dest_ring->base_addr_owner_space;
439 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
440 struct ce_desc sdesc;
443 /* Copy in one go for performance reasons */
446 nbytes = __le16_to_cpu(sdesc.nbytes);
449 * This closes a relatively unusual race where the Host
450 * sees the updated DRRI before the update to the
451 * corresponding descriptor has completed. We treat this
452 * as a descriptor that is not yet done.
459 /* Return data from completed destination descriptor */
460 *bufferp = __le32_to_cpu(sdesc.addr);
462 *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
464 if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
465 *flagsp = CE_RECV_FLAG_SWAPPED;
469 if (per_transfer_contextp)
470 *per_transfer_contextp =
471 dest_ring->per_transfer_context[sw_index];
474 dest_ring->per_transfer_context[sw_index] = NULL;
476 /* Update sw_index */
477 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
478 dest_ring->sw_index = sw_index;
483 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
484 void **per_transfer_contextp,
486 unsigned int *nbytesp,
487 unsigned int *transfer_idp,
488 unsigned int *flagsp)
490 struct ath10k *ar = ce_state->ar;
491 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
494 spin_lock_bh(&ar_pci->ce_lock);
495 ret = ath10k_ce_completed_recv_next_nolock(ce_state,
496 per_transfer_contextp,
498 transfer_idp, flagsp);
499 spin_unlock_bh(&ar_pci->ce_lock);
504 int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
505 void **per_transfer_contextp,
508 struct ath10k_ce_ring *dest_ring;
509 unsigned int nentries_mask;
510 unsigned int sw_index;
511 unsigned int write_index;
514 struct ath10k_pci *ar_pci;
516 dest_ring = ce_state->dest_ring;
522 ar_pci = ath10k_pci_priv(ar);
524 spin_lock_bh(&ar_pci->ce_lock);
526 nentries_mask = dest_ring->nentries_mask;
527 sw_index = dest_ring->sw_index;
528 write_index = dest_ring->write_index;
529 if (write_index != sw_index) {
530 struct ce_desc *base = dest_ring->base_addr_owner_space;
531 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
533 /* Return data from completed destination descriptor */
534 *bufferp = __le32_to_cpu(desc->addr);
536 if (per_transfer_contextp)
537 *per_transfer_contextp =
538 dest_ring->per_transfer_context[sw_index];
541 dest_ring->per_transfer_context[sw_index] = NULL;
543 /* Update sw_index */
544 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
545 dest_ring->sw_index = sw_index;
551 spin_unlock_bh(&ar_pci->ce_lock);
557 * Guts of ath10k_ce_completed_send_next.
558 * The caller takes responsibility for any necessary locking.
560 static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
561 void **per_transfer_contextp,
563 unsigned int *nbytesp,
564 unsigned int *transfer_idp)
566 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
567 u32 ctrl_addr = ce_state->ctrl_addr;
568 struct ath10k *ar = ce_state->ar;
569 unsigned int nentries_mask = src_ring->nentries_mask;
570 unsigned int sw_index = src_ring->sw_index;
571 struct ce_desc *sdesc, *sbase;
572 unsigned int read_index;
575 if (src_ring->hw_index == sw_index) {
577 * The SW completion index has caught up with the cached
578 * version of the HW completion index.
579 * Update the cached HW completion index to see whether
580 * the SW has really caught up to the HW, or if the cached
581 * value of the HW index has become stale.
584 ret = ath10k_pci_wake(ar);
589 ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
590 src_ring->hw_index &= nentries_mask;
592 ath10k_pci_sleep(ar);
595 read_index = src_ring->hw_index;
597 if ((read_index == sw_index) || (read_index == 0xffffffff))
600 sbase = src_ring->shadow_base;
601 sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
603 /* Return data from completed source descriptor */
604 *bufferp = __le32_to_cpu(sdesc->addr);
605 *nbytesp = __le16_to_cpu(sdesc->nbytes);
606 *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
607 CE_DESC_FLAGS_META_DATA);
609 if (per_transfer_contextp)
610 *per_transfer_contextp =
611 src_ring->per_transfer_context[sw_index];
614 src_ring->per_transfer_context[sw_index] = NULL;
616 /* Update sw_index */
617 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
618 src_ring->sw_index = sw_index;
623 /* NB: Modeled after ath10k_ce_completed_send_next */
624 int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
625 void **per_transfer_contextp,
627 unsigned int *nbytesp,
628 unsigned int *transfer_idp)
630 struct ath10k_ce_ring *src_ring;
631 unsigned int nentries_mask;
632 unsigned int sw_index;
633 unsigned int write_index;
636 struct ath10k_pci *ar_pci;
638 src_ring = ce_state->src_ring;
644 ar_pci = ath10k_pci_priv(ar);
646 spin_lock_bh(&ar_pci->ce_lock);
648 nentries_mask = src_ring->nentries_mask;
649 sw_index = src_ring->sw_index;
650 write_index = src_ring->write_index;
652 if (write_index != sw_index) {
653 struct ce_desc *base = src_ring->base_addr_owner_space;
654 struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
656 /* Return data from completed source descriptor */
657 *bufferp = __le32_to_cpu(desc->addr);
658 *nbytesp = __le16_to_cpu(desc->nbytes);
659 *transfer_idp = MS(__le16_to_cpu(desc->flags),
660 CE_DESC_FLAGS_META_DATA);
662 if (per_transfer_contextp)
663 *per_transfer_contextp =
664 src_ring->per_transfer_context[sw_index];
667 src_ring->per_transfer_context[sw_index] = NULL;
669 /* Update sw_index */
670 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
671 src_ring->sw_index = sw_index;
677 spin_unlock_bh(&ar_pci->ce_lock);
682 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
683 void **per_transfer_contextp,
685 unsigned int *nbytesp,
686 unsigned int *transfer_idp)
688 struct ath10k *ar = ce_state->ar;
689 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
692 spin_lock_bh(&ar_pci->ce_lock);
693 ret = ath10k_ce_completed_send_next_nolock(ce_state,
694 per_transfer_contextp,
697 spin_unlock_bh(&ar_pci->ce_lock);
703 * Guts of interrupt handler for per-engine interrupts on a particular CE.
705 * Invokes registered callbacks for recv_complete,
706 * send_complete, and watermarks.
708 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
710 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
711 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
712 u32 ctrl_addr = ce_state->ctrl_addr;
715 ret = ath10k_pci_wake(ar);
719 spin_lock_bh(&ar_pci->ce_lock);
721 /* Clear the copy-complete interrupts that will be handled here. */
722 ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
723 HOST_IS_COPY_COMPLETE_MASK);
725 spin_unlock_bh(&ar_pci->ce_lock);
727 if (ce_state->recv_cb)
728 ce_state->recv_cb(ce_state);
730 if (ce_state->send_cb)
731 ce_state->send_cb(ce_state);
733 spin_lock_bh(&ar_pci->ce_lock);
736 * Misc CE interrupts are not being handled, but still need
739 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
741 spin_unlock_bh(&ar_pci->ce_lock);
742 ath10k_pci_sleep(ar);
746 * Handler for per-engine interrupts on ALL active CEs.
747 * This is used in cases where the system is sharing a
748 * single interrput for all CEs
751 void ath10k_ce_per_engine_service_any(struct ath10k *ar)
753 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
757 ret = ath10k_pci_wake(ar);
761 intr_summary = CE_INTERRUPT_SUMMARY(ar);
763 for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
764 if (intr_summary & (1 << ce_id))
765 intr_summary &= ~(1 << ce_id);
767 /* no intr pending on this CE */
770 ath10k_ce_per_engine_service(ar, ce_id);
773 ath10k_pci_sleep(ar);
777 * Adjust interrupts for the copy complete handler.
778 * If it's needed for either send or recv, then unmask
779 * this interrupt; otherwise, mask it.
781 * Called with ce_lock held.
783 static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
784 int disable_copy_compl_intr)
786 u32 ctrl_addr = ce_state->ctrl_addr;
787 struct ath10k *ar = ce_state->ar;
790 ret = ath10k_pci_wake(ar);
794 if ((!disable_copy_compl_intr) &&
795 (ce_state->send_cb || ce_state->recv_cb))
796 ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
798 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
800 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
802 ath10k_pci_sleep(ar);
805 void ath10k_ce_disable_interrupts(struct ath10k *ar)
807 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
810 ret = ath10k_pci_wake(ar);
814 for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
815 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
816 u32 ctrl_addr = ce_state->ctrl_addr;
818 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
820 ath10k_pci_sleep(ar);
823 void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
824 void (*send_cb)(struct ath10k_ce_pipe *),
825 int disable_interrupts)
827 struct ath10k *ar = ce_state->ar;
828 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
830 spin_lock_bh(&ar_pci->ce_lock);
831 ce_state->send_cb = send_cb;
832 ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts);
833 spin_unlock_bh(&ar_pci->ce_lock);
836 void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
837 void (*recv_cb)(struct ath10k_ce_pipe *))
839 struct ath10k *ar = ce_state->ar;
840 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
842 spin_lock_bh(&ar_pci->ce_lock);
843 ce_state->recv_cb = recv_cb;
844 ath10k_ce_per_engine_handler_adjust(ce_state, 0);
845 spin_unlock_bh(&ar_pci->ce_lock);
848 static int ath10k_ce_init_src_ring(struct ath10k *ar,
850 struct ath10k_ce_pipe *ce_state,
851 const struct ce_attr *attr)
853 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
854 struct ath10k_ce_ring *src_ring;
855 unsigned int nentries = attr->src_nentries;
856 unsigned int ce_nbytes;
857 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
858 dma_addr_t base_addr;
861 nentries = roundup_pow_of_two(nentries);
863 if (ce_state->src_ring) {
864 WARN_ON(ce_state->src_ring->nentries != nentries);
868 ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
869 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
873 ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
874 src_ring = ce_state->src_ring;
876 ptr += sizeof(struct ath10k_ce_ring);
877 src_ring->nentries = nentries;
878 src_ring->nentries_mask = nentries - 1;
880 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
881 src_ring->sw_index &= src_ring->nentries_mask;
882 src_ring->hw_index = src_ring->sw_index;
884 src_ring->write_index =
885 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
886 src_ring->write_index &= src_ring->nentries_mask;
888 src_ring->per_transfer_context = (void **)ptr;
891 * Legacy platforms that do not support cache
892 * coherent DMA are unsupported
894 src_ring->base_addr_owner_space_unaligned =
895 pci_alloc_consistent(ar_pci->pdev,
896 (nentries * sizeof(struct ce_desc) +
899 if (!src_ring->base_addr_owner_space_unaligned) {
900 kfree(ce_state->src_ring);
901 ce_state->src_ring = NULL;
905 src_ring->base_addr_ce_space_unaligned = base_addr;
907 src_ring->base_addr_owner_space = PTR_ALIGN(
908 src_ring->base_addr_owner_space_unaligned,
910 src_ring->base_addr_ce_space = ALIGN(
911 src_ring->base_addr_ce_space_unaligned,
915 * Also allocate a shadow src ring in regular
916 * mem to use for faster access.
918 src_ring->shadow_base_unaligned =
919 kmalloc((nentries * sizeof(struct ce_desc) +
920 CE_DESC_RING_ALIGN), GFP_KERNEL);
921 if (!src_ring->shadow_base_unaligned) {
922 pci_free_consistent(ar_pci->pdev,
923 (nentries * sizeof(struct ce_desc) +
925 src_ring->base_addr_owner_space,
926 src_ring->base_addr_ce_space);
927 kfree(ce_state->src_ring);
928 ce_state->src_ring = NULL;
932 src_ring->shadow_base = PTR_ALIGN(
933 src_ring->shadow_base_unaligned,
936 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
937 src_ring->base_addr_ce_space);
938 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
939 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
940 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
941 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
942 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
944 ath10k_dbg(ATH10K_DBG_BOOT,
945 "boot ce src ring id %d entries %d base_addr %p\n",
946 ce_id, nentries, src_ring->base_addr_owner_space);
951 static int ath10k_ce_init_dest_ring(struct ath10k *ar,
953 struct ath10k_ce_pipe *ce_state,
954 const struct ce_attr *attr)
956 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
957 struct ath10k_ce_ring *dest_ring;
958 unsigned int nentries = attr->dest_nentries;
959 unsigned int ce_nbytes;
960 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
961 dma_addr_t base_addr;
964 nentries = roundup_pow_of_two(nentries);
966 if (ce_state->dest_ring) {
967 WARN_ON(ce_state->dest_ring->nentries != nentries);
971 ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
972 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
976 ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
977 dest_ring = ce_state->dest_ring;
979 ptr += sizeof(struct ath10k_ce_ring);
980 dest_ring->nentries = nentries;
981 dest_ring->nentries_mask = nentries - 1;
983 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
984 dest_ring->sw_index &= dest_ring->nentries_mask;
985 dest_ring->write_index =
986 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
987 dest_ring->write_index &= dest_ring->nentries_mask;
989 dest_ring->per_transfer_context = (void **)ptr;
992 * Legacy platforms that do not support cache
993 * coherent DMA are unsupported
995 dest_ring->base_addr_owner_space_unaligned =
996 pci_alloc_consistent(ar_pci->pdev,
997 (nentries * sizeof(struct ce_desc) +
1000 if (!dest_ring->base_addr_owner_space_unaligned) {
1001 kfree(ce_state->dest_ring);
1002 ce_state->dest_ring = NULL;
1006 dest_ring->base_addr_ce_space_unaligned = base_addr;
1009 * Correctly initialize memory to 0 to prevent garbage
1010 * data crashing system when download firmware
1012 memset(dest_ring->base_addr_owner_space_unaligned, 0,
1013 nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
1015 dest_ring->base_addr_owner_space = PTR_ALIGN(
1016 dest_ring->base_addr_owner_space_unaligned,
1017 CE_DESC_RING_ALIGN);
1018 dest_ring->base_addr_ce_space = ALIGN(
1019 dest_ring->base_addr_ce_space_unaligned,
1020 CE_DESC_RING_ALIGN);
1022 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
1023 dest_ring->base_addr_ce_space);
1024 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
1025 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
1026 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
1027 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
1029 ath10k_dbg(ATH10K_DBG_BOOT,
1030 "boot ce dest ring id %d entries %d base_addr %p\n",
1031 ce_id, nentries, dest_ring->base_addr_owner_space);
1036 static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
1038 const struct ce_attr *attr)
1040 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1041 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1042 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1044 spin_lock_bh(&ar_pci->ce_lock);
1047 ce_state->id = ce_id;
1048 ce_state->ctrl_addr = ctrl_addr;
1049 ce_state->attr_flags = attr->flags;
1050 ce_state->src_sz_max = attr->src_sz_max;
1052 spin_unlock_bh(&ar_pci->ce_lock);
1058 * Initialize a Copy Engine based on caller-supplied attributes.
1059 * This may be called once to initialize both source and destination
1060 * rings or it may be called twice for separate source and destination
1061 * initialization. It may be that only one side or the other is
1062 * initialized by software/firmware.
1064 struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
1066 const struct ce_attr *attr)
1068 struct ath10k_ce_pipe *ce_state;
1069 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1072 ret = ath10k_pci_wake(ar);
1076 ce_state = ath10k_ce_init_state(ar, ce_id, attr);
1078 ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
1082 if (attr->src_nentries) {
1083 ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
1085 ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
1087 ath10k_ce_deinit(ce_state);
1092 if (attr->dest_nentries) {
1093 ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
1095 ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
1097 ath10k_ce_deinit(ce_state);
1102 /* Enable CE error interrupts */
1103 ath10k_ce_error_intr_enable(ar, ctrl_addr);
1105 ath10k_pci_sleep(ar);
1110 void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
1112 struct ath10k *ar = ce_state->ar;
1113 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1115 if (ce_state->src_ring) {
1116 kfree(ce_state->src_ring->shadow_base_unaligned);
1117 pci_free_consistent(ar_pci->pdev,
1118 (ce_state->src_ring->nentries *
1119 sizeof(struct ce_desc) +
1120 CE_DESC_RING_ALIGN),
1121 ce_state->src_ring->base_addr_owner_space,
1122 ce_state->src_ring->base_addr_ce_space);
1123 kfree(ce_state->src_ring);
1126 if (ce_state->dest_ring) {
1127 pci_free_consistent(ar_pci->pdev,
1128 (ce_state->dest_ring->nentries *
1129 sizeof(struct ce_desc) +
1130 CE_DESC_RING_ALIGN),
1131 ce_state->dest_ring->base_addr_owner_space,
1132 ce_state->dest_ring->base_addr_ce_space);
1133 kfree(ce_state->dest_ring);
1136 ce_state->src_ring = NULL;
1137 ce_state->dest_ring = NULL;