struct ce_sendlist is useless as we always add just one buffer onto it.
And most importantly, it's ugly as it doesn't use skb properly.
Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
return ret;
}
-void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
- unsigned int nbytes, u32 flags)
-{
- unsigned int num_items = sendlist->num_items;
- struct ce_sendlist_item *item;
-
- item = &sendlist->item[num_items];
- item->data = buffer;
- item->u.nbytes = nbytes;
- item->flags = flags;
- sendlist->num_items++;
-}
-
int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
- struct ce_sendlist *sendlist,
- unsigned int transfer_id)
+ unsigned int transfer_id,
+ u32 paddr, unsigned int nbytes,
+ u32 flags)
{
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
- struct ce_sendlist_item *item;
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned int nentries_mask = src_ring->nentries_mask;
- unsigned int num_items = sendlist->num_items;
unsigned int sw_index;
unsigned int write_index;
- int i, delta, ret = -ENOMEM;
+ int delta, ret = -ENOMEM;
spin_lock_bh(&ar_pci->ce_lock);
delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
- if (delta >= num_items) {
- /*
- * Handle all but the last item uniformly.
- */
- for (i = 0; i < num_items - 1; i++) {
- item = &sendlist->item[i];
- ret = ath10k_ce_send_nolock(ce_state,
- CE_SENDLIST_ITEM_CTXT,
- (u32) item->data,
- item->u.nbytes, transfer_id,
- item->flags |
- CE_SEND_FLAG_GATHER);
- if (ret)
- ath10k_warn("CE send failed for item: %d\n", i);
- }
- /*
- * Provide valid context pointer for final item.
- */
- item = &sendlist->item[i];
+ if (delta >= 1) {
ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
- (u32) item->data, item->u.nbytes,
- transfer_id, item->flags);
+ paddr, nbytes,
+ transfer_id, flags);
if (ret)
- ath10k_warn("CE send failed for last item: %d\n", i);
+ ath10k_warn("CE send failed: %d\n", ret);
}
spin_unlock_bh(&ar_pci->ce_lock);
/* Descriptor rings must be aligned to this boundary */
#define CE_DESC_RING_ALIGN 8
-#define CE_SENDLIST_ITEMS_MAX 12
#define CE_SEND_FLAG_GATHER 0x00010000
/*
struct ath10k_ce_ring *dest_ring;
};
-struct ce_sendlist_item {
- /* e.g. buffer or desc list */
- dma_addr_t data;
- union {
- /* simple buffer */
- unsigned int nbytes;
- /* Rx descriptor list */
- unsigned int ndesc;
- } u;
- /* externally-specified flags; OR-ed with internal flags */
- u32 flags;
-};
-
-struct ce_sendlist {
- unsigned int num_items;
- struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
-};
-
/* Copy Engine settable attributes */
struct ce_attr;
void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts);
-/* Append a simple buffer (address/length) to a sendlist. */
-void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
- u32 buffer,
- unsigned int nbytes,
- /* OR-ed with internal flags */
- u32 flags);
-
/*
* Queue a "sendlist" of buffers to be sent using gather to a single
* anonymous destination buffer
* Implemenation note: Pushes multiple buffers with Gather to Source ring.
*/
int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
- void *per_transfer_send_context,
- struct ce_sendlist *sendlist,
- /* 14 bits */
- unsigned int transfer_id);
+ void *per_transfer_context,
+ unsigned int transfer_id,
+ u32 paddr, unsigned int nbytes,
+ u32 flags);
/*==================Recv=======================*/
unsigned int dest_nentries;
};
-/*
- * When using sendlist_send to transfer multiple buffer fragments, the
- * transfer context of each fragment, except last one, will be filled
- * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
- * each fragment done with send and the transfer context would be
- * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
- * status of a send completion.
- */
-#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
-
#define SR_BA_ADDRESS 0x0000
#define SR_SIZE_ADDRESS 0x0004
#define DR_BA_ADDRESS 0x0008
while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
&ce_data, &nbytes,
&transfer_id) == 0) {
- /*
- * For the send completion of an item in sendlist, just
- * increment num_sends_allowed. The upper layer callback will
- * be triggered when last fragment is done with send.
- */
- if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
- spin_lock_bh(&pipe_info->pipe_lock);
- pipe_info->num_sends_allowed++;
- spin_unlock_bh(&pipe_info->pipe_lock);
- continue;
- }
+ spin_lock_bh(&pipe_info->pipe_lock);
+ pipe_info->num_sends_allowed++;
+ spin_unlock_bh(&pipe_info->pipe_lock);
compl = get_free_compl(pipe_info);
if (!compl)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
- struct ce_sendlist sendlist;
unsigned int len;
u32 flags = 0;
int ret;
- memset(&sendlist, 0, sizeof(struct ce_sendlist));
-
len = min(bytes, nbuf->len);
bytes -= len;
"ath10k tx: data: ",
nbuf->data, nbuf->len);
- ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
-
/* Make sure we have resources to handle this request */
spin_lock_bh(&pipe_info->pipe_lock);
if (!pipe_info->num_sends_allowed) {
pipe_info->num_sends_allowed--;
spin_unlock_bh(&pipe_info->pipe_lock);
- ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
+ ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, transfer_id,
+ skb_cb->paddr, len, flags);
if (ret)
ath10k_warn("CE send failed: %p\n", nbuf);
while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
&ce_data, &nbytes, &id) == 0) {
- if (netbuf != CE_SENDLIST_ITEM_CTXT) {
- /*
- * Indicate the completion to higer layer to free
- * the buffer
- */
- ATH10K_SKB_CB(netbuf)->is_aborted = true;
- ar_pci->msg_callbacks_current.tx_completion(ar,
- netbuf,
- id);
- }
+ /*
+ * Indicate the completion to higer layer to free
+ * the buffer
+ */
+ ATH10K_SKB_CB(netbuf)->is_aborted = true;
+ ar_pci->msg_callbacks_current.tx_completion(ar,
+ netbuf,
+ id);
}
}