2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/spinlock.h>
49 #include <linux/seqlock.h>
50 #include <linux/netdevice.h>
51 #include <linux/moduleparam.h>
52 #include <linux/bitops.h>
53 #include <linux/timer.h>
54 #include <linux/vmalloc.h>
55 #include <linux/highmem.h>
64 /* must be a power of 2 >= 64 <= 32768 */
65 #define SDMA_DESCQ_CNT 2048
66 #define SDMA_DESC_INTR 64
67 #define INVALID_TAIL 0xffff
69 static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
70 module_param(sdma_descq_cnt, uint, S_IRUGO);
71 MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
73 static uint sdma_idle_cnt = 250;
74 module_param(sdma_idle_cnt, uint, S_IRUGO);
75 MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)");
78 module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO);
79 MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use");
81 static uint sdma_desct_intr = SDMA_DESC_INTR;
82 module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR);
83 MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt");
85 #define SDMA_WAIT_BATCH_SIZE 20
86 /* max wait time for a SDMA engine to indicate it has halted */
87 #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
88 /* all SDMA engine errors that cause a halt */
90 #define SD(name) SEND_DMA_##name
91 #define ALL_SDMA_ENG_HALT_ERRS \
92 (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
93 | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
94 | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
95 | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
96 | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
97 | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
98 | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
99 | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
100 | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
101 | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
102 | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
103 | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
104 | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
105 | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
106 | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
107 | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
108 | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
109 | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
111 /* sdma_sendctrl operations */
112 #define SDMA_SENDCTRL_OP_ENABLE BIT(0)
113 #define SDMA_SENDCTRL_OP_INTENABLE BIT(1)
114 #define SDMA_SENDCTRL_OP_HALT BIT(2)
115 #define SDMA_SENDCTRL_OP_CLEANUP BIT(3)
117 /* handle long defines */
118 #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
119 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
120 #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
121 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
123 static const char * const sdma_state_names[] = {
124 [sdma_state_s00_hw_down] = "s00_HwDown",
125 [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait",
126 [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait",
127 [sdma_state_s20_idle] = "s20_Idle",
128 [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
129 [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
130 [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
131 [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait",
132 [sdma_state_s80_hw_freeze] = "s80_HwFreeze",
133 [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean",
134 [sdma_state_s99_running] = "s99_Running",
137 #ifdef CONFIG_SDMA_VERBOSITY
138 static const char * const sdma_event_names[] = {
139 [sdma_event_e00_go_hw_down] = "e00_GoHwDown",
140 [sdma_event_e10_go_hw_start] = "e10_GoHwStart",
141 [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone",
142 [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone",
143 [sdma_event_e30_go_running] = "e30_GoRunning",
144 [sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
145 [sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
146 [sdma_event_e60_hw_halted] = "e60_HwHalted",
147 [sdma_event_e70_go_idle] = "e70_GoIdle",
148 [sdma_event_e80_hw_freeze] = "e80_HwFreeze",
149 [sdma_event_e81_hw_frozen] = "e81_HwFrozen",
150 [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze",
151 [sdma_event_e85_link_down] = "e85_LinkDown",
152 [sdma_event_e90_sw_halted] = "e90_SwHalted",
156 static const struct sdma_set_state_action sdma_action_table[] = {
157 [sdma_state_s00_hw_down] = {
158 .go_s99_running_tofalse = 1,
164 [sdma_state_s10_hw_start_up_halt_wait] = {
170 [sdma_state_s15_hw_start_up_clean_wait] = {
176 [sdma_state_s20_idle] = {
182 [sdma_state_s30_sw_clean_up_wait] = {
188 [sdma_state_s40_hw_clean_up_wait] = {
194 [sdma_state_s50_hw_halt_wait] = {
200 [sdma_state_s60_idle_halt_wait] = {
201 .go_s99_running_tofalse = 1,
207 [sdma_state_s80_hw_freeze] = {
213 [sdma_state_s82_freeze_sw_clean] = {
219 [sdma_state_s99_running] = {
224 .go_s99_running_totrue = 1,
228 #define SDMA_TAIL_UPDATE_THRESH 0x1F
230 /* declare all statics here rather than keep sorting */
231 static void sdma_complete(struct kref *);
232 static void sdma_finalput(struct sdma_state *);
233 static void sdma_get(struct sdma_state *);
234 static void sdma_hw_clean_up_task(unsigned long);
235 static void sdma_put(struct sdma_state *);
236 static void sdma_set_state(struct sdma_engine *, enum sdma_states);
237 static void sdma_start_hw_clean_up(struct sdma_engine *);
238 static void sdma_sw_clean_up_task(unsigned long);
239 static void sdma_sendctrl(struct sdma_engine *, unsigned);
240 static void init_sdma_regs(struct sdma_engine *, u32, uint);
241 static void sdma_process_event(
242 struct sdma_engine *sde,
243 enum sdma_events event);
244 static void __sdma_process_event(
245 struct sdma_engine *sde,
246 enum sdma_events event);
247 static void dump_sdma_state(struct sdma_engine *sde);
248 static void sdma_make_progress(struct sdma_engine *sde, u64 status);
249 static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail);
250 static void sdma_flush_descq(struct sdma_engine *sde);
253 * sdma_state_name() - return state string from enum
256 static const char *sdma_state_name(enum sdma_states state)
258 return sdma_state_names[state];
261 static void sdma_get(struct sdma_state *ss)
266 static void sdma_complete(struct kref *kref)
268 struct sdma_state *ss =
269 container_of(kref, struct sdma_state, kref);
274 static void sdma_put(struct sdma_state *ss)
276 kref_put(&ss->kref, sdma_complete);
279 static void sdma_finalput(struct sdma_state *ss)
282 wait_for_completion(&ss->comp);
285 static inline void write_sde_csr(
286 struct sdma_engine *sde,
290 write_kctxt_csr(sde->dd, sde->this_idx, offset0, value);
293 static inline u64 read_sde_csr(
294 struct sdma_engine *sde,
297 return read_kctxt_csr(sde->dd, sde->this_idx, offset0);
301 * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
302 * sdma engine 'sde' to drop to 0.
304 static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
307 u64 off = 8 * sde->this_idx;
308 struct hfi1_devdata *dd = sde->dd;
315 reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS);
317 reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK;
318 reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT;
321 /* counter is reest if accupancy count changes */
325 /* timed out - bounce the link */
326 dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
327 __func__, sde->this_idx, (u32)reg);
328 queue_work(dd->pport->hfi1_wq,
329 &dd->pport->link_bounce_work);
337 * sdma_wait() - wait for packet egress to complete for all SDMA engines,
338 * and pause for credit return.
340 void sdma_wait(struct hfi1_devdata *dd)
344 for (i = 0; i < dd->num_sdma; i++) {
345 struct sdma_engine *sde = &dd->per_sdma[i];
347 sdma_wait_for_packet_egress(sde, 0);
351 static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt)
355 if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT))
358 reg &= SD(DESC_CNT_CNT_MASK);
359 reg <<= SD(DESC_CNT_CNT_SHIFT);
360 write_sde_csr(sde, SD(DESC_CNT), reg);
363 static inline void complete_tx(struct sdma_engine *sde,
364 struct sdma_txreq *tx,
367 /* protect against complete modifying */
368 struct iowait *wait = tx->wait;
369 callback_t complete = tx->complete;
371 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
372 trace_hfi1_sdma_out_sn(sde, tx->sn);
373 if (WARN_ON_ONCE(sde->head_sn != tx->sn))
374 dd_dev_err(sde->dd, "expected %llu got %llu\n",
375 sde->head_sn, tx->sn);
378 __sdma_txclean(sde->dd, tx);
380 (*complete)(tx, res);
381 if (wait && iowait_sdma_dec(wait))
382 iowait_drain_wakeup(wait);
386 * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
388 * Depending on timing there can be txreqs in two places:
389 * - in the descq ring
390 * - in the flush list
392 * To avoid ordering issues the descq ring needs to be flushed
393 * first followed by the flush list.
395 * This routine is called from two places
396 * - From a work queue item
397 * - Directly from the state machine just before setting the
400 * Must be called with head_lock held
403 static void sdma_flush(struct sdma_engine *sde)
405 struct sdma_txreq *txp, *txp_next;
406 LIST_HEAD(flushlist);
409 /* flush from head to tail */
410 sdma_flush_descq(sde);
411 spin_lock_irqsave(&sde->flushlist_lock, flags);
412 /* copy flush list */
413 list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
414 list_del_init(&txp->list);
415 list_add_tail(&txp->list, &flushlist);
417 spin_unlock_irqrestore(&sde->flushlist_lock, flags);
418 /* flush from flush list */
419 list_for_each_entry_safe(txp, txp_next, &flushlist, list)
420 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
424 * Fields a work request for flushing the descq ring
427 * If the engine has been brought to running during
428 * the scheduling delay, the flush is ignored, assuming
429 * that the process of bringing the engine to running
430 * would have done this flush prior to going to running.
433 static void sdma_field_flush(struct work_struct *work)
436 struct sdma_engine *sde =
437 container_of(work, struct sdma_engine, flush_worker);
439 write_seqlock_irqsave(&sde->head_lock, flags);
440 if (!__sdma_running(sde))
442 write_sequnlock_irqrestore(&sde->head_lock, flags);
445 static void sdma_err_halt_wait(struct work_struct *work)
447 struct sdma_engine *sde = container_of(work, struct sdma_engine,
450 unsigned long timeout;
452 timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT);
454 statuscsr = read_sde_csr(sde, SD(STATUS));
455 statuscsr &= SD(STATUS_ENG_HALTED_SMASK);
458 if (time_after(jiffies, timeout)) {
460 "SDMA engine %d - timeout waiting for engine to halt\n",
463 * Continue anyway. This could happen if there was
464 * an uncorrectable error in the wrong spot.
468 usleep_range(80, 120);
471 sdma_process_event(sde, sdma_event_e15_hw_halt_done);
474 static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
476 if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
478 struct hfi1_devdata *dd = sde->dd;
480 for (index = 0; index < dd->num_sdma; index++) {
481 struct sdma_engine *curr_sdma = &dd->per_sdma[index];
483 if (curr_sdma != sde)
484 curr_sdma->progress_check_head =
485 curr_sdma->descq_head;
488 "SDMA engine %d - check scheduled\n",
490 mod_timer(&sde->err_progress_check_timer, jiffies + 10);
494 static void sdma_err_progress_check(unsigned long data)
497 struct sdma_engine *sde = (struct sdma_engine *)data;
499 dd_dev_err(sde->dd, "SDE progress check event\n");
500 for (index = 0; index < sde->dd->num_sdma; index++) {
501 struct sdma_engine *curr_sde = &sde->dd->per_sdma[index];
504 /* check progress on each engine except the current one */
508 * We must lock interrupts when acquiring sde->lock,
509 * to avoid a deadlock if interrupt triggers and spins on
510 * the same lock on same CPU
512 spin_lock_irqsave(&curr_sde->tail_lock, flags);
513 write_seqlock(&curr_sde->head_lock);
515 /* skip non-running queues */
516 if (curr_sde->state.current_state != sdma_state_s99_running) {
517 write_sequnlock(&curr_sde->head_lock);
518 spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
522 if ((curr_sde->descq_head != curr_sde->descq_tail) &&
523 (curr_sde->descq_head ==
524 curr_sde->progress_check_head))
525 __sdma_process_event(curr_sde,
526 sdma_event_e90_sw_halted);
527 write_sequnlock(&curr_sde->head_lock);
528 spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
530 schedule_work(&sde->err_halt_worker);
533 static void sdma_hw_clean_up_task(unsigned long opaque)
535 struct sdma_engine *sde = (struct sdma_engine *)opaque;
539 #ifdef CONFIG_SDMA_VERBOSITY
540 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
541 sde->this_idx, slashstrip(__FILE__), __LINE__,
544 statuscsr = read_sde_csr(sde, SD(STATUS));
545 statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK);
551 sdma_process_event(sde, sdma_event_e25_hw_clean_up_done);
554 static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
556 smp_read_barrier_depends(); /* see sdma_update_tail() */
557 return sde->tx_ring[sde->tx_head & sde->sdma_mask];
561 * flush ring for recovery
563 static void sdma_flush_descq(struct sdma_engine *sde)
567 struct sdma_txreq *txp = get_txhead(sde);
569 /* The reason for some of the complexity of this code is that
570 * not all descriptors have corresponding txps. So, we have to
571 * be able to skip over descs until we wander into the range of
572 * the next txp on the list.
574 head = sde->descq_head & sde->sdma_mask;
575 tail = sde->descq_tail & sde->sdma_mask;
576 while (head != tail) {
577 /* advance head, wrap if needed */
578 head = ++sde->descq_head & sde->sdma_mask;
579 /* if now past this txp's descs, do the callback */
580 if (txp && txp->next_descq_idx == head) {
581 /* remove from list */
582 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
583 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
584 trace_hfi1_sdma_progress(sde, head, tail, txp);
585 txp = get_txhead(sde);
590 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
593 static void sdma_sw_clean_up_task(unsigned long opaque)
595 struct sdma_engine *sde = (struct sdma_engine *)opaque;
598 spin_lock_irqsave(&sde->tail_lock, flags);
599 write_seqlock(&sde->head_lock);
602 * At this point, the following should always be true:
603 * - We are halted, so no more descriptors are getting retired.
604 * - We are not running, so no one is submitting new work.
605 * - Only we can send the e40_sw_cleaned, so we can't start
606 * running again until we say so. So, the active list and
607 * descq are ours to play with.
611 * In the error clean up sequence, software clean must be called
612 * before the hardware clean so we can use the hardware head in
613 * the progress routine. A hardware clean or SPC unfreeze will
614 * reset the hardware head.
616 * Process all retired requests. The progress routine will use the
617 * latest physical hardware head - we are not running so speed does
620 sdma_make_progress(sde, 0);
625 * Reset our notion of head and tail.
626 * Note that the HW registers have been reset via an earlier
631 sde->desc_avail = sdma_descq_freecnt(sde);
634 __sdma_process_event(sde, sdma_event_e40_sw_cleaned);
636 write_sequnlock(&sde->head_lock);
637 spin_unlock_irqrestore(&sde->tail_lock, flags);
640 static void sdma_sw_tear_down(struct sdma_engine *sde)
642 struct sdma_state *ss = &sde->state;
644 /* Releasing this reference means the state machine has stopped. */
647 /* stop waiting for all unfreeze events to complete */
648 atomic_set(&sde->dd->sdma_unfreeze_count, -1);
649 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
652 static void sdma_start_hw_clean_up(struct sdma_engine *sde)
654 tasklet_hi_schedule(&sde->sdma_hw_clean_up_task);
657 static void sdma_set_state(struct sdma_engine *sde,
658 enum sdma_states next_state)
660 struct sdma_state *ss = &sde->state;
661 const struct sdma_set_state_action *action = sdma_action_table;
664 trace_hfi1_sdma_state(
666 sdma_state_names[ss->current_state],
667 sdma_state_names[next_state]);
669 /* debugging bookkeeping */
670 ss->previous_state = ss->current_state;
671 ss->previous_op = ss->current_op;
672 ss->current_state = next_state;
674 if (ss->previous_state != sdma_state_s99_running &&
675 next_state == sdma_state_s99_running)
678 if (action[next_state].op_enable)
679 op |= SDMA_SENDCTRL_OP_ENABLE;
681 if (action[next_state].op_intenable)
682 op |= SDMA_SENDCTRL_OP_INTENABLE;
684 if (action[next_state].op_halt)
685 op |= SDMA_SENDCTRL_OP_HALT;
687 if (action[next_state].op_cleanup)
688 op |= SDMA_SENDCTRL_OP_CLEANUP;
690 if (action[next_state].go_s99_running_tofalse)
691 ss->go_s99_running = 0;
693 if (action[next_state].go_s99_running_totrue)
694 ss->go_s99_running = 1;
697 sdma_sendctrl(sde, ss->current_op);
701 * sdma_get_descq_cnt() - called when device probed
703 * Return a validated descq count.
705 * This is currently only used in the verbs initialization to build the tx
708 * This will probably be deleted in favor of a more scalable approach to
712 u16 sdma_get_descq_cnt(void)
714 u16 count = sdma_descq_cnt;
717 return SDMA_DESCQ_CNT;
718 /* count must be a power of 2 greater than 64 and less than
719 * 32768. Otherwise return default.
721 if (!is_power_of_2(count))
722 return SDMA_DESCQ_CNT;
723 if (count < 64 || count > 32768)
724 return SDMA_DESCQ_CNT;
729 * sdma_engine_get_vl() - return vl for a given sdma engine
732 * This function returns the vl mapped to a given engine, or an error if
733 * the mapping can't be found. The mapping fields are protected by RCU.
735 int sdma_engine_get_vl(struct sdma_engine *sde)
737 struct hfi1_devdata *dd = sde->dd;
738 struct sdma_vl_map *m;
741 if (sde->this_idx >= TXE_NUM_SDMA_ENGINES)
745 m = rcu_dereference(dd->sdma_map);
750 vl = m->engine_to_vl[sde->this_idx];
757 * sdma_select_engine_vl() - select sdma engine
759 * @selector: a spreading factor
763 * This function returns an engine based on the selector and a vl. The
764 * mapping fields are protected by RCU.
766 struct sdma_engine *sdma_select_engine_vl(
767 struct hfi1_devdata *dd,
771 struct sdma_vl_map *m;
772 struct sdma_map_elem *e;
773 struct sdma_engine *rval;
775 /* NOTE This should only happen if SC->VL changed after the initial
776 * checks on the QP/AH
777 * Default will return engine 0 below
785 m = rcu_dereference(dd->sdma_map);
788 return &dd->per_sdma[0];
790 e = m->map[vl & m->mask];
791 rval = e->sde[selector & e->mask];
795 rval = !rval ? &dd->per_sdma[0] : rval;
796 trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
801 * sdma_select_engine_sc() - select sdma engine
803 * @selector: a spreading factor
807 * This function returns an engine based on the selector and an sc.
809 struct sdma_engine *sdma_select_engine_sc(
810 struct hfi1_devdata *dd,
814 u8 vl = sc_to_vlt(dd, sc5);
816 return sdma_select_engine_vl(dd, selector, vl);
819 struct sdma_rht_map_elem {
822 struct sdma_engine *sde[0];
825 struct sdma_rht_node {
826 unsigned long cpu_id;
827 struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED];
828 struct rhash_head node;
831 #define NR_CPUS_HINT 192
833 static const struct rhashtable_params sdma_rht_params = {
834 .nelem_hint = NR_CPUS_HINT,
835 .head_offset = offsetof(struct sdma_rht_node, node),
836 .key_offset = offsetof(struct sdma_rht_node, cpu_id),
837 .key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id),
840 .automatic_shrinking = true,
844 * sdma_select_user_engine() - select sdma engine based on user setup
846 * @selector: a spreading factor
849 * This function returns an sdma engine for a user sdma request.
850 * User defined sdma engine affinity setting is honored when applicable,
851 * otherwise system default sdma engine mapping is used. To ensure correct
852 * ordering, the mapping from <selector, vl> to sde must remain unchanged.
854 struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
857 struct sdma_rht_node *rht_node;
858 struct sdma_engine *sde = NULL;
859 const struct cpumask *current_mask = tsk_cpus_allowed(current);
860 unsigned long cpu_id;
863 * To ensure that always the same sdma engine(s) will be
864 * selected make sure the process is pinned to this CPU only.
866 if (cpumask_weight(current_mask) != 1)
869 cpu_id = smp_processor_id();
871 rht_node = rhashtable_lookup_fast(&dd->sdma_rht, &cpu_id,
874 if (rht_node && rht_node->map[vl]) {
875 struct sdma_rht_map_elem *map = rht_node->map[vl];
877 sde = map->sde[selector & map->mask];
885 return sdma_select_engine_vl(dd, selector, vl);
888 static void sdma_populate_sde_map(struct sdma_rht_map_elem *map)
892 for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++)
893 map->sde[map->ctr + i] = map->sde[i];
896 static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map,
897 struct sdma_engine *sde)
901 /* only need to check the first ctr entries for a match */
902 for (i = 0; i < map->ctr; i++) {
903 if (map->sde[i] == sde) {
904 memmove(&map->sde[i], &map->sde[i + 1],
905 (map->ctr - i - 1) * sizeof(map->sde[0]));
907 pow = roundup_pow_of_two(map->ctr ? : 1);
909 sdma_populate_sde_map(map);
916 * Prevents concurrent reads and writes of the sdma engine cpu_mask
918 static DEFINE_MUTEX(process_to_sde_mutex);
920 ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
923 struct hfi1_devdata *dd = sde->dd;
924 cpumask_var_t mask, new_mask;
928 vl = sdma_engine_get_vl(sde);
929 if (unlikely(vl < 0))
932 ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
936 ret = zalloc_cpumask_var(&new_mask, GFP_KERNEL);
938 free_cpumask_var(mask);
941 ret = cpulist_parse(buf, mask);
945 if (!cpumask_subset(mask, cpu_online_mask)) {
946 dd_dev_warn(sde->dd, "Invalid CPU mask\n");
951 sz = sizeof(struct sdma_rht_map_elem) +
952 (TXE_NUM_SDMA_ENGINES * sizeof(struct sdma_engine *));
954 mutex_lock(&process_to_sde_mutex);
956 for_each_cpu(cpu, mask) {
957 struct sdma_rht_node *rht_node;
959 /* Check if we have this already mapped */
960 if (cpumask_test_cpu(cpu, &sde->cpu_mask)) {
961 cpumask_set_cpu(cpu, new_mask);
965 rht_node = rhashtable_lookup_fast(&dd->sdma_rht, &cpu,
968 rht_node = kzalloc(sizeof(*rht_node), GFP_KERNEL);
974 rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
975 if (!rht_node->map[vl]) {
980 rht_node->cpu_id = cpu;
981 rht_node->map[vl]->mask = 0;
982 rht_node->map[vl]->ctr = 1;
983 rht_node->map[vl]->sde[0] = sde;
985 ret = rhashtable_insert_fast(&dd->sdma_rht,
989 kfree(rht_node->map[vl]);
991 dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n",
999 /* Add new user mappings */
1000 if (!rht_node->map[vl])
1001 rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
1003 if (!rht_node->map[vl]) {
1008 rht_node->map[vl]->ctr++;
1009 ctr = rht_node->map[vl]->ctr;
1010 rht_node->map[vl]->sde[ctr - 1] = sde;
1011 pow = roundup_pow_of_two(ctr);
1012 rht_node->map[vl]->mask = pow - 1;
1014 /* Populate the sde map table */
1015 sdma_populate_sde_map(rht_node->map[vl]);
1017 cpumask_set_cpu(cpu, new_mask);
1020 /* Clean up old mappings */
1021 for_each_cpu(cpu, cpu_online_mask) {
1022 struct sdma_rht_node *rht_node;
1024 /* Don't cleanup sdes that are set in the new mask */
1025 if (cpumask_test_cpu(cpu, mask))
1028 rht_node = rhashtable_lookup_fast(&dd->sdma_rht, &cpu,
1034 /* Remove mappings for old sde */
1035 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
1036 if (rht_node->map[i])
1037 sdma_cleanup_sde_map(rht_node->map[i],
1040 /* Free empty hash table entries */
1041 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
1042 if (!rht_node->map[i])
1045 if (rht_node->map[i]->ctr) {
1052 ret = rhashtable_remove_fast(&dd->sdma_rht,
1057 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
1058 kfree(rht_node->map[i]);
1065 cpumask_copy(&sde->cpu_mask, new_mask);
1067 mutex_unlock(&process_to_sde_mutex);
1069 free_cpumask_var(mask);
1070 free_cpumask_var(new_mask);
1071 return ret ? : strnlen(buf, PAGE_SIZE);
1074 ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf)
1076 mutex_lock(&process_to_sde_mutex);
1077 if (cpumask_empty(&sde->cpu_mask))
1078 snprintf(buf, PAGE_SIZE, "%s\n", "empty");
1080 cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask);
1081 mutex_unlock(&process_to_sde_mutex);
1082 return strnlen(buf, PAGE_SIZE);
1085 static void sdma_rht_free(void *ptr, void *arg)
1087 struct sdma_rht_node *rht_node = ptr;
1090 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
1091 kfree(rht_node->map[i]);
1097 * sdma_seqfile_dump_cpu_list() - debugfs dump the cpu to sdma mappings
1102 * This routine dumps the process to sde mappings per cpu
1104 void sdma_seqfile_dump_cpu_list(struct seq_file *s,
1105 struct hfi1_devdata *dd,
1106 unsigned long cpuid)
1108 struct sdma_rht_node *rht_node;
1111 rht_node = rhashtable_lookup_fast(&dd->sdma_rht, &cpuid,
1116 seq_printf(s, "cpu%3lu: ", cpuid);
1117 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
1118 if (!rht_node->map[i] || !rht_node->map[i]->ctr)
1121 seq_printf(s, " vl%d: [", i);
1123 for (j = 0; j < rht_node->map[i]->ctr; j++) {
1124 if (!rht_node->map[i]->sde[j])
1130 seq_printf(s, " sdma%2d",
1131 rht_node->map[i]->sde[j]->this_idx);
1140 * Free the indicated map struct
1142 static void sdma_map_free(struct sdma_vl_map *m)
1146 for (i = 0; m && i < m->actual_vls; i++)
1152 * Handle RCU callback
1154 static void sdma_map_rcu_callback(struct rcu_head *list)
1156 struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list);
1162 * sdma_map_init - called when # vls change
1164 * @port: port number
1165 * @num_vls: number of vls
1166 * @vl_engines: per vl engine mapping (optional)
1168 * This routine changes the mapping based on the number of vls.
1170 * vl_engines is used to specify a non-uniform vl/engine loading. NULL
1171 * implies auto computing the loading and giving each VLs a uniform
1172 * distribution of engines per VL.
1174 * The auto algorithm computes the sde_per_vl and the number of extra
1175 * engines. Any extra engines are added from the last VL on down.
1177 * rcu locking is used here to control access to the mapping fields.
1179 * If either the num_vls or num_sdma are non-power of 2, the array sizes
1180 * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
1181 * up to the next highest power of 2 and the first entry is reused
1182 * in a round robin fashion.
1184 * If an error occurs the map change is not done and the mapping is
1188 int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
1191 int extra, sde_per_vl;
1193 u8 lvl_engines[OPA_MAX_VLS];
1194 struct sdma_vl_map *oldmap, *newmap;
1196 if (!(dd->flags & HFI1_HAS_SEND_DMA))
1200 /* truncate divide */
1201 sde_per_vl = dd->num_sdma / num_vls;
1203 extra = dd->num_sdma % num_vls;
1204 vl_engines = lvl_engines;
1205 /* add extras from last vl down */
1206 for (i = num_vls - 1; i >= 0; i--, extra--)
1207 vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0);
1211 sizeof(struct sdma_vl_map) +
1212 roundup_pow_of_two(num_vls) *
1213 sizeof(struct sdma_map_elem *),
1217 newmap->actual_vls = num_vls;
1218 newmap->vls = roundup_pow_of_two(num_vls);
1219 newmap->mask = (1 << ilog2(newmap->vls)) - 1;
1220 /* initialize back-map */
1221 for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++)
1222 newmap->engine_to_vl[i] = -1;
1223 for (i = 0; i < newmap->vls; i++) {
1224 /* save for wrap around */
1225 int first_engine = engine;
1227 if (i < newmap->actual_vls) {
1228 int sz = roundup_pow_of_two(vl_engines[i]);
1230 /* only allocate once */
1231 newmap->map[i] = kzalloc(
1232 sizeof(struct sdma_map_elem) +
1233 sz * sizeof(struct sdma_engine *),
1235 if (!newmap->map[i])
1237 newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
1238 /* assign engines */
1239 for (j = 0; j < sz; j++) {
1240 newmap->map[i]->sde[j] =
1241 &dd->per_sdma[engine];
1242 if (++engine >= first_engine + vl_engines[i])
1243 /* wrap back to first engine */
1244 engine = first_engine;
1246 /* assign back-map */
1247 for (j = 0; j < vl_engines[i]; j++)
1248 newmap->engine_to_vl[first_engine + j] = i;
1250 /* just re-use entry without allocating */
1251 newmap->map[i] = newmap->map[i % num_vls];
1253 engine = first_engine + vl_engines[i];
1255 /* newmap in hand, save old map */
1256 spin_lock_irq(&dd->sde_map_lock);
1257 oldmap = rcu_dereference_protected(dd->sdma_map,
1258 lockdep_is_held(&dd->sde_map_lock));
1260 /* publish newmap */
1261 rcu_assign_pointer(dd->sdma_map, newmap);
1263 spin_unlock_irq(&dd->sde_map_lock);
1264 /* success, free any old map after grace period */
1266 call_rcu(&oldmap->list, sdma_map_rcu_callback);
1269 /* free any partial allocation */
1270 sdma_map_free(newmap);
1275 * Clean up allocated memory.
1277 * This routine is can be called regardless of the success of sdma_init()
1280 static void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
1283 struct sdma_engine *sde;
1285 if (dd->sdma_pad_dma) {
1286 dma_free_coherent(&dd->pcidev->dev, 4,
1287 (void *)dd->sdma_pad_dma,
1289 dd->sdma_pad_dma = NULL;
1290 dd->sdma_pad_phys = 0;
1292 if (dd->sdma_heads_dma) {
1293 dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size,
1294 (void *)dd->sdma_heads_dma,
1295 dd->sdma_heads_phys);
1296 dd->sdma_heads_dma = NULL;
1297 dd->sdma_heads_phys = 0;
1299 for (i = 0; dd->per_sdma && i < num_engines; ++i) {
1300 sde = &dd->per_sdma[i];
1302 sde->head_dma = NULL;
1308 sde->descq_cnt * sizeof(u64[2]),
1313 sde->descq_phys = 0;
1315 kvfree(sde->tx_ring);
1316 sde->tx_ring = NULL;
1318 spin_lock_irq(&dd->sde_map_lock);
1319 sdma_map_free(rcu_access_pointer(dd->sdma_map));
1320 RCU_INIT_POINTER(dd->sdma_map, NULL);
1321 spin_unlock_irq(&dd->sde_map_lock);
1323 kfree(dd->per_sdma);
1324 dd->per_sdma = NULL;
1328 * sdma_init() - called when device probed
1330 * @port: port number (currently only zero)
1332 * sdma_init initializes the specified number of engines.
1334 * The code initializes each sde, its csrs. Interrupts
1335 * are not required to be enabled.
1338 * 0 - success, -errno on failure
1340 int sdma_init(struct hfi1_devdata *dd, u8 port)
1343 struct sdma_engine *sde;
1346 struct hfi1_pportdata *ppd = dd->pport + port;
1347 u32 per_sdma_credits;
1348 uint idle_cnt = sdma_idle_cnt;
1349 size_t num_engines = dd->chip_sdma_engines;
1351 if (!HFI1_CAP_IS_KSET(SDMA)) {
1352 HFI1_CAP_CLEAR(SDMA_AHG);
1356 /* can't exceed chip support */
1357 mod_num_sdma <= dd->chip_sdma_engines &&
1358 /* count must be >= vls */
1359 mod_num_sdma >= num_vls)
1360 num_engines = mod_num_sdma;
1362 dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
1363 dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", dd->chip_sdma_engines);
1364 dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
1365 dd->chip_sdma_mem_size);
1368 dd->chip_sdma_mem_size / (num_engines * SDMA_BLOCK_SIZE);
1370 /* set up freeze waitqueue */
1371 init_waitqueue_head(&dd->sdma_unfreeze_wq);
1372 atomic_set(&dd->sdma_unfreeze_count, 0);
1374 descq_cnt = sdma_get_descq_cnt();
1375 dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
1376 num_engines, descq_cnt);
1378 /* alloc memory for array of send engines */
1379 dd->per_sdma = kcalloc(num_engines, sizeof(*dd->per_sdma), GFP_KERNEL);
1383 idle_cnt = ns_to_cclock(dd, idle_cnt);
1384 if (!sdma_desct_intr)
1385 sdma_desct_intr = SDMA_DESC_INTR;
1387 /* Allocate memory for SendDMA descriptor FIFOs */
1388 for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1389 sde = &dd->per_sdma[this_idx];
1392 sde->this_idx = this_idx;
1393 sde->descq_cnt = descq_cnt;
1394 sde->desc_avail = sdma_descq_freecnt(sde);
1395 sde->sdma_shift = ilog2(descq_cnt);
1396 sde->sdma_mask = (1 << sde->sdma_shift) - 1;
1398 /* Create a mask specifically for each interrupt source */
1399 sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES +
1401 sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES +
1403 sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES +
1405 /* Create a combined mask to cover all 3 interrupt sources */
1406 sde->imask = sde->int_mask | sde->progress_mask |
1409 spin_lock_init(&sde->tail_lock);
1410 seqlock_init(&sde->head_lock);
1411 spin_lock_init(&sde->senddmactrl_lock);
1412 spin_lock_init(&sde->flushlist_lock);
1413 /* insure there is always a zero bit */
1414 sde->ahg_bits = 0xfffffffe00000000ULL;
1416 sdma_set_state(sde, sdma_state_s00_hw_down);
1418 /* set up reference counting */
1419 kref_init(&sde->state.kref);
1420 init_completion(&sde->state.comp);
1422 INIT_LIST_HEAD(&sde->flushlist);
1423 INIT_LIST_HEAD(&sde->dmawait);
1426 get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
1430 SDMA_DESC1_HEAD_TO_HOST_FLAG;
1433 SDMA_DESC1_INT_REQ_FLAG;
1435 tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
1436 (unsigned long)sde);
1438 tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
1439 (unsigned long)sde);
1440 INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
1441 INIT_WORK(&sde->flush_worker, sdma_field_flush);
1443 sde->progress_check_head = 0;
1445 setup_timer(&sde->err_progress_check_timer,
1446 sdma_err_progress_check, (unsigned long)sde);
1448 sde->descq = dma_zalloc_coherent(
1450 descq_cnt * sizeof(u64[2]),
1457 kcalloc(descq_cnt, sizeof(struct sdma_txreq *),
1462 sizeof(struct sdma_txreq *) *
1468 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
1469 /* Allocate memory for DMA of head registers to memory */
1470 dd->sdma_heads_dma = dma_zalloc_coherent(
1472 dd->sdma_heads_size,
1473 &dd->sdma_heads_phys,
1476 if (!dd->sdma_heads_dma) {
1477 dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
1481 /* Allocate memory for pad */
1482 dd->sdma_pad_dma = dma_zalloc_coherent(
1488 if (!dd->sdma_pad_dma) {
1489 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
1493 /* assign each engine to different cacheline and init registers */
1494 curr_head = (void *)dd->sdma_heads_dma;
1495 for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1496 unsigned long phys_offset;
1498 sde = &dd->per_sdma[this_idx];
1500 sde->head_dma = curr_head;
1501 curr_head += L1_CACHE_BYTES;
1502 phys_offset = (unsigned long)sde->head_dma -
1503 (unsigned long)dd->sdma_heads_dma;
1504 sde->head_phys = dd->sdma_heads_phys + phys_offset;
1505 init_sdma_regs(sde, per_sdma_credits, idle_cnt);
1507 dd->flags |= HFI1_HAS_SEND_DMA;
1508 dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0;
1509 dd->num_sdma = num_engines;
1510 if (sdma_map_init(dd, port, ppd->vls_operational, NULL))
1513 if (rhashtable_init(&dd->sdma_rht, &sdma_rht_params))
1516 dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
1520 sdma_clean(dd, num_engines);
1525 * sdma_all_running() - called when the link goes up
1528 * This routine moves all engines to the running state.
1530 void sdma_all_running(struct hfi1_devdata *dd)
1532 struct sdma_engine *sde;
1535 /* move all engines to running */
1536 for (i = 0; i < dd->num_sdma; ++i) {
1537 sde = &dd->per_sdma[i];
1538 sdma_process_event(sde, sdma_event_e30_go_running);
1543 * sdma_all_idle() - called when the link goes down
1546 * This routine moves all engines to the idle state.
1548 void sdma_all_idle(struct hfi1_devdata *dd)
1550 struct sdma_engine *sde;
1553 /* idle all engines */
1554 for (i = 0; i < dd->num_sdma; ++i) {
1555 sde = &dd->per_sdma[i];
1556 sdma_process_event(sde, sdma_event_e70_go_idle);
1561 * sdma_start() - called to kick off state processing for all engines
1564 * This routine is for kicking off the state processing for all required
1565 * sdma engines. Interrupts need to be working at this point.
1568 void sdma_start(struct hfi1_devdata *dd)
1571 struct sdma_engine *sde;
1573 /* kick off the engines state processing */
1574 for (i = 0; i < dd->num_sdma; ++i) {
1575 sde = &dd->per_sdma[i];
1576 sdma_process_event(sde, sdma_event_e10_go_hw_start);
1581 * sdma_exit() - used when module is removed
1584 void sdma_exit(struct hfi1_devdata *dd)
1587 struct sdma_engine *sde;
1589 for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
1591 sde = &dd->per_sdma[this_idx];
1592 if (!list_empty(&sde->dmawait))
1593 dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
1595 sdma_process_event(sde, sdma_event_e00_go_hw_down);
1597 del_timer_sync(&sde->err_progress_check_timer);
1600 * This waits for the state machine to exit so it is not
1601 * necessary to kill the sdma_sw_clean_up_task to make sure
1602 * it is not running.
1604 sdma_finalput(&sde->state);
1606 sdma_clean(dd, dd->num_sdma);
1607 rhashtable_free_and_destroy(&dd->sdma_rht, sdma_rht_free, NULL);
1611 * unmap the indicated descriptor
1613 static inline void sdma_unmap_desc(
1614 struct hfi1_devdata *dd,
1615 struct sdma_desc *descp)
1617 switch (sdma_mapping_type(descp)) {
1618 case SDMA_MAP_SINGLE:
1621 sdma_mapping_addr(descp),
1622 sdma_mapping_len(descp),
1628 sdma_mapping_addr(descp),
1629 sdma_mapping_len(descp),
1636 * return the mode as indicated by the first
1637 * descriptor in the tx.
1639 static inline u8 ahg_mode(struct sdma_txreq *tx)
1641 return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1642 >> SDMA_DESC1_HEADER_MODE_SHIFT;
1646 * __sdma_txclean() - clean tx of mappings, descp *kmalloc's
1647 * @dd: hfi1_devdata for unmapping
1648 * @tx: tx request to clean
1650 * This is used in the progress routine to clean the tx or
1651 * by the ULP to toss an in-process tx build.
1653 * The code can be called multiple times without issue.
1656 void __sdma_txclean(
1657 struct hfi1_devdata *dd,
1658 struct sdma_txreq *tx)
1663 u8 skip = 0, mode = ahg_mode(tx);
1666 sdma_unmap_desc(dd, &tx->descp[0]);
1667 /* determine number of AHG descriptors to skip */
1668 if (mode > SDMA_AHG_APPLY_UPDATE1)
1670 for (i = 1 + skip; i < tx->num_desc; i++)
1671 sdma_unmap_desc(dd, &tx->descp[i]);
1674 kfree(tx->coalesce_buf);
1675 tx->coalesce_buf = NULL;
1676 /* kmalloc'ed descp */
1677 if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) {
1678 tx->desc_limit = ARRAY_SIZE(tx->descs);
1683 static inline u16 sdma_gethead(struct sdma_engine *sde)
1685 struct hfi1_devdata *dd = sde->dd;
1689 #ifdef CONFIG_SDMA_VERBOSITY
1690 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1691 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1695 use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
1696 (dd->flags & HFI1_HAS_SDMA_TIMEOUT);
1697 hwhead = use_dmahead ?
1698 (u16)le64_to_cpu(*sde->head_dma) :
1699 (u16)read_sde_csr(sde, SD(HEAD));
1701 if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
1707 swhead = sde->descq_head & sde->sdma_mask;
1708 /* this code is really bad for cache line trading */
1709 swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1710 cnt = sde->descq_cnt;
1712 if (swhead < swtail)
1714 sane = (hwhead >= swhead) & (hwhead <= swtail);
1715 else if (swhead > swtail)
1716 /* wrapped around */
1717 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
1721 sane = (hwhead == swhead);
1723 if (unlikely(!sane)) {
1724 dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
1726 use_dmahead ? "dma" : "kreg",
1727 hwhead, swhead, swtail, cnt);
1729 /* try one more time, using csr */
1733 /* proceed as if no progress */
1741 * This is called when there are send DMA descriptors that might be
1744 * This is called with head_lock held.
1746 static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail)
1748 struct iowait *wait, *nw;
1749 struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
1750 unsigned i, n = 0, seq;
1751 struct sdma_txreq *stx;
1752 struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
1754 #ifdef CONFIG_SDMA_VERBOSITY
1755 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
1756 slashstrip(__FILE__), __LINE__, __func__);
1757 dd_dev_err(sde->dd, "avail: %u\n", avail);
1761 seq = read_seqbegin(&dev->iowait_lock);
1762 if (!list_empty(&sde->dmawait)) {
1763 /* at least one item */
1764 write_seqlock(&dev->iowait_lock);
1765 /* Harvest waiters wanting DMA descriptors */
1766 list_for_each_entry_safe(
1775 if (n == ARRAY_SIZE(waits))
1777 if (!list_empty(&wait->tx_head)) {
1778 stx = list_first_entry(
1782 num_desc = stx->num_desc;
1784 if (num_desc > avail)
1787 list_del_init(&wait->list);
1790 write_sequnlock(&dev->iowait_lock);
1793 } while (read_seqretry(&dev->iowait_lock, seq));
1795 for (i = 0; i < n; i++)
1796 waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
1799 /* head_lock must be held */
1800 static void sdma_make_progress(struct sdma_engine *sde, u64 status)
1802 struct sdma_txreq *txp = NULL;
1805 int idle_check_done = 0;
1807 hwhead = sdma_gethead(sde);
1809 /* The reason for some of the complexity of this code is that
1810 * not all descriptors have corresponding txps. So, we have to
1811 * be able to skip over descs until we wander into the range of
1812 * the next txp on the list.
1816 txp = get_txhead(sde);
1817 swhead = sde->descq_head & sde->sdma_mask;
1818 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1819 while (swhead != hwhead) {
1820 /* advance head, wrap if needed */
1821 swhead = ++sde->descq_head & sde->sdma_mask;
1823 /* if now past this txp's descs, do the callback */
1824 if (txp && txp->next_descq_idx == swhead) {
1825 /* remove from list */
1826 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
1827 complete_tx(sde, txp, SDMA_TXREQ_S_OK);
1828 /* see if there is another txp */
1829 txp = get_txhead(sde);
1831 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1836 * The SDMA idle interrupt is not guaranteed to be ordered with respect
1837 * to updates to the the dma_head location in host memory. The head
1838 * value read might not be fully up to date. If there are pending
1839 * descriptors and the SDMA idle interrupt fired then read from the
1840 * CSR SDMA head instead to get the latest value from the hardware.
1841 * The hardware SDMA head should be read at most once in this invocation
1842 * of sdma_make_progress(..) which is ensured by idle_check_done flag
1844 if ((status & sde->idle_mask) && !idle_check_done) {
1847 swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1848 if (swtail != hwhead) {
1849 hwhead = (u16)read_sde_csr(sde, SD(HEAD));
1850 idle_check_done = 1;
1855 sde->last_status = status;
1857 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
1861 * sdma_engine_interrupt() - interrupt handler for engine
1863 * @status: sdma interrupt reason
1865 * Status is a mask of the 3 possible interrupts for this engine. It will
1866 * contain bits _only_ for this SDMA engine. It will contain at least one
1867 * bit, it may contain more.
1869 void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
1871 trace_hfi1_sdma_engine_interrupt(sde, status);
1872 write_seqlock(&sde->head_lock);
1873 sdma_set_desc_cnt(sde, sdma_desct_intr);
1874 if (status & sde->idle_mask)
1875 sde->idle_int_cnt++;
1876 else if (status & sde->progress_mask)
1877 sde->progress_int_cnt++;
1878 else if (status & sde->int_mask)
1879 sde->sdma_int_cnt++;
1880 sdma_make_progress(sde, status);
1881 write_sequnlock(&sde->head_lock);
1885 * sdma_engine_error() - error handler for engine
1887 * @status: sdma interrupt reason
1889 void sdma_engine_error(struct sdma_engine *sde, u64 status)
1891 unsigned long flags;
1893 #ifdef CONFIG_SDMA_VERBOSITY
1894 dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
1896 (unsigned long long)status,
1897 sdma_state_names[sde->state.current_state]);
1899 spin_lock_irqsave(&sde->tail_lock, flags);
1900 write_seqlock(&sde->head_lock);
1901 if (status & ALL_SDMA_ENG_HALT_ERRS)
1902 __sdma_process_event(sde, sdma_event_e60_hw_halted);
1903 if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) {
1905 "SDMA (%u) engine error: 0x%llx state %s\n",
1907 (unsigned long long)status,
1908 sdma_state_names[sde->state.current_state]);
1909 dump_sdma_state(sde);
1911 write_sequnlock(&sde->head_lock);
1912 spin_unlock_irqrestore(&sde->tail_lock, flags);
1915 static void sdma_sendctrl(struct sdma_engine *sde, unsigned op)
1917 u64 set_senddmactrl = 0;
1918 u64 clr_senddmactrl = 0;
1919 unsigned long flags;
1921 #ifdef CONFIG_SDMA_VERBOSITY
1922 dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
1924 (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0,
1925 (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0,
1926 (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0,
1927 (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0);
1930 if (op & SDMA_SENDCTRL_OP_ENABLE)
1931 set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1933 clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1935 if (op & SDMA_SENDCTRL_OP_INTENABLE)
1936 set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1938 clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1940 if (op & SDMA_SENDCTRL_OP_HALT)
1941 set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1943 clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1945 spin_lock_irqsave(&sde->senddmactrl_lock, flags);
1947 sde->p_senddmactrl |= set_senddmactrl;
1948 sde->p_senddmactrl &= ~clr_senddmactrl;
1950 if (op & SDMA_SENDCTRL_OP_CLEANUP)
1951 write_sde_csr(sde, SD(CTRL),
1952 sde->p_senddmactrl |
1953 SD(CTRL_SDMA_CLEANUP_SMASK));
1955 write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl);
1957 spin_unlock_irqrestore(&sde->senddmactrl_lock, flags);
1959 #ifdef CONFIG_SDMA_VERBOSITY
1960 sdma_dumpstate(sde);
1964 static void sdma_setlengen(struct sdma_engine *sde)
1966 #ifdef CONFIG_SDMA_VERBOSITY
1967 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1968 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1972 * Set SendDmaLenGen and clear-then-set the MSB of the generation
1973 * count to enable generation checking and load the internal
1974 * generation counter.
1976 write_sde_csr(sde, SD(LEN_GEN),
1977 (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT));
1978 write_sde_csr(sde, SD(LEN_GEN),
1979 ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) |
1980 (4ULL << SD(LEN_GEN_GENERATION_SHIFT)));
1983 static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
1985 /* Commit writes to memory and advance the tail on the chip */
1986 smp_wmb(); /* see get_txhead() */
1987 writeq(tail, sde->tail_csr);
1991 * This is called when changing to state s10_hw_start_up_halt_wait as
1992 * a result of send buffer errors or send DMA descriptor errors.
1994 static void sdma_hw_start_up(struct sdma_engine *sde)
1998 #ifdef CONFIG_SDMA_VERBOSITY
1999 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
2000 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
2003 sdma_setlengen(sde);
2004 sdma_update_tail(sde, 0); /* Set SendDmaTail */
2007 reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) <<
2008 SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT);
2009 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
2013 * set_sdma_integrity
2015 * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
2017 static void set_sdma_integrity(struct sdma_engine *sde)
2019 struct hfi1_devdata *dd = sde->dd;
2021 write_sde_csr(sde, SD(CHECK_ENABLE),
2022 hfi1_pkt_base_sdma_integrity(dd));
2025 static void init_sdma_regs(
2026 struct sdma_engine *sde,
2031 #ifdef CONFIG_SDMA_VERBOSITY
2032 struct hfi1_devdata *dd = sde->dd;
2034 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n",
2035 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
2038 write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys);
2039 sdma_setlengen(sde);
2040 sdma_update_tail(sde, 0); /* Set SendDmaTail */
2041 write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt);
2042 write_sde_csr(sde, SD(DESC_CNT), 0);
2043 write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
2044 write_sde_csr(sde, SD(MEMORY),
2045 ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
2046 ((u64)(credits * sde->this_idx) <<
2047 SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
2048 write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
2049 set_sdma_integrity(sde);
2050 opmask = OPCODE_CHECK_MASK_DISABLED;
2051 opval = OPCODE_CHECK_VAL_DISABLED;
2052 write_sde_csr(sde, SD(CHECK_OPCODE),
2053 (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
2054 (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
2057 #ifdef CONFIG_SDMA_VERBOSITY
2059 #define sdma_dumpstate_helper0(reg) do { \
2060 csr = read_csr(sde->dd, reg); \
2061 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
2064 #define sdma_dumpstate_helper(reg) do { \
2065 csr = read_sde_csr(sde, reg); \
2066 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
2067 #reg, sde->this_idx, csr); \
2070 #define sdma_dumpstate_helper2(reg) do { \
2071 csr = read_csr(sde->dd, reg + (8 * i)); \
2072 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
2076 void sdma_dumpstate(struct sdma_engine *sde)
2081 sdma_dumpstate_helper(SD(CTRL));
2082 sdma_dumpstate_helper(SD(STATUS));
2083 sdma_dumpstate_helper0(SD(ERR_STATUS));
2084 sdma_dumpstate_helper0(SD(ERR_MASK));
2085 sdma_dumpstate_helper(SD(ENG_ERR_STATUS));
2086 sdma_dumpstate_helper(SD(ENG_ERR_MASK));
2088 for (i = 0; i < CCE_NUM_INT_CSRS; ++i) {
2089 sdma_dumpstate_helper2(CCE_INT_STATUS);
2090 sdma_dumpstate_helper2(CCE_INT_MASK);
2091 sdma_dumpstate_helper2(CCE_INT_BLOCKED);
2094 sdma_dumpstate_helper(SD(TAIL));
2095 sdma_dumpstate_helper(SD(HEAD));
2096 sdma_dumpstate_helper(SD(PRIORITY_THLD));
2097 sdma_dumpstate_helper(SD(IDLE_CNT));
2098 sdma_dumpstate_helper(SD(RELOAD_CNT));
2099 sdma_dumpstate_helper(SD(DESC_CNT));
2100 sdma_dumpstate_helper(SD(DESC_FETCHED_CNT));
2101 sdma_dumpstate_helper(SD(MEMORY));
2102 sdma_dumpstate_helper0(SD(ENGINES));
2103 sdma_dumpstate_helper0(SD(MEM_SIZE));
2104 /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */
2105 sdma_dumpstate_helper(SD(BASE_ADDR));
2106 sdma_dumpstate_helper(SD(LEN_GEN));
2107 sdma_dumpstate_helper(SD(HEAD_ADDR));
2108 sdma_dumpstate_helper(SD(CHECK_ENABLE));
2109 sdma_dumpstate_helper(SD(CHECK_VL));
2110 sdma_dumpstate_helper(SD(CHECK_JOB_KEY));
2111 sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY));
2112 sdma_dumpstate_helper(SD(CHECK_SLID));
2113 sdma_dumpstate_helper(SD(CHECK_OPCODE));
2117 static void dump_sdma_state(struct sdma_engine *sde)
2119 struct hw_sdma_desc *descq;
2120 struct hw_sdma_desc *descqp;
2125 u16 head, tail, cnt;
2127 head = sde->descq_head & sde->sdma_mask;
2128 tail = sde->descq_tail & sde->sdma_mask;
2129 cnt = sdma_descq_freecnt(sde);
2133 "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
2134 sde->this_idx, head, tail, cnt,
2135 !list_empty(&sde->flushlist));
2137 /* print info for each entry in the descriptor queue */
2138 while (head != tail) {
2139 char flags[6] = { 'x', 'x', 'x', 'x', 0 };
2141 descqp = &sde->descq[head];
2142 desc[0] = le64_to_cpu(descqp->qw[0]);
2143 desc[1] = le64_to_cpu(descqp->qw[1]);
2144 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
2145 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
2147 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
2148 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
2149 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
2150 & SDMA_DESC0_PHY_ADDR_MASK;
2151 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
2152 & SDMA_DESC1_GENERATION_MASK;
2153 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
2154 & SDMA_DESC0_BYTE_COUNT_MASK;
2156 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
2157 head, flags, addr, gen, len);
2159 "\tdesc0:0x%016llx desc1 0x%016llx\n",
2161 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
2163 "\taidx: %u amode: %u alen: %u\n",
2165 SDMA_DESC1_HEADER_INDEX_SMASK) >>
2166 SDMA_DESC1_HEADER_INDEX_SHIFT),
2168 SDMA_DESC1_HEADER_MODE_SMASK) >>
2169 SDMA_DESC1_HEADER_MODE_SHIFT),
2171 SDMA_DESC1_HEADER_DWS_SMASK) >>
2172 SDMA_DESC1_HEADER_DWS_SHIFT));
2174 head &= sde->sdma_mask;
2179 "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
2181 * sdma_seqfile_dump_sde() - debugfs dump of sde
2183 * @sde: send dma engine to dump
2185 * This routine dumps the sde to the indicated seq file.
2187 void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
2190 struct hw_sdma_desc *descqp;
2196 head = sde->descq_head & sde->sdma_mask;
2197 tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
2198 seq_printf(s, SDE_FMT, sde->this_idx,
2200 sdma_state_name(sde->state.current_state),
2201 (unsigned long long)read_sde_csr(sde, SD(CTRL)),
2202 (unsigned long long)read_sde_csr(sde, SD(STATUS)),
2203 (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)),
2204 (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail,
2205 (unsigned long long)read_sde_csr(sde, SD(HEAD)), head,
2206 (unsigned long long)le64_to_cpu(*sde->head_dma),
2207 (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
2208 (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
2209 (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
2210 (unsigned long long)sde->last_status,
2211 (unsigned long long)sde->ahg_bits,
2216 !list_empty(&sde->flushlist),
2217 sde->descq_full_count,
2218 (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
2220 /* print info for each entry in the descriptor queue */
2221 while (head != tail) {
2222 char flags[6] = { 'x', 'x', 'x', 'x', 0 };
2224 descqp = &sde->descq[head];
2225 desc[0] = le64_to_cpu(descqp->qw[0]);
2226 desc[1] = le64_to_cpu(descqp->qw[1]);
2227 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
2228 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
2230 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
2231 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
2232 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
2233 & SDMA_DESC0_PHY_ADDR_MASK;
2234 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
2235 & SDMA_DESC1_GENERATION_MASK;
2236 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
2237 & SDMA_DESC0_BYTE_COUNT_MASK;
2239 "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
2240 head, flags, addr, gen, len);
2241 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
2242 seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
2244 SDMA_DESC1_HEADER_INDEX_SMASK) >>
2245 SDMA_DESC1_HEADER_INDEX_SHIFT),
2247 SDMA_DESC1_HEADER_MODE_SMASK) >>
2248 SDMA_DESC1_HEADER_MODE_SHIFT));
2249 head = (head + 1) & sde->sdma_mask;
2254 * add the generation number into
2255 * the qw1 and return
2257 static inline u64 add_gen(struct sdma_engine *sde, u64 qw1)
2259 u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3;
2261 qw1 &= ~SDMA_DESC1_GENERATION_SMASK;
2262 qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK)
2263 << SDMA_DESC1_GENERATION_SHIFT;
2268 * This routine submits the indicated tx
2270 * Space has already been guaranteed and
2271 * tail side of ring is locked.
2273 * The hardware tail update is done
2274 * in the caller and that is facilitated
2275 * by returning the new tail.
2277 * There is special case logic for ahg
2278 * to not add the generation number for
2279 * up to 2 descriptors that follow the
2283 static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
2287 struct sdma_desc *descp = tx->descp;
2288 u8 skip = 0, mode = ahg_mode(tx);
2290 tail = sde->descq_tail & sde->sdma_mask;
2291 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
2292 sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1]));
2293 trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1],
2294 tail, &sde->descq[tail]);
2295 tail = ++sde->descq_tail & sde->sdma_mask;
2297 if (mode > SDMA_AHG_APPLY_UPDATE1)
2299 for (i = 1; i < tx->num_desc; i++, descp++) {
2302 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
2304 /* edits don't have generation */
2308 /* replace generation with real one for non-edits */
2309 qw1 = add_gen(sde, descp->qw[1]);
2311 sde->descq[tail].qw[1] = cpu_to_le64(qw1);
2312 trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1,
2313 tail, &sde->descq[tail]);
2314 tail = ++sde->descq_tail & sde->sdma_mask;
2316 tx->next_descq_idx = tail;
2317 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2318 tx->sn = sde->tail_sn++;
2319 trace_hfi1_sdma_in_sn(sde, tx->sn);
2320 WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]);
2322 sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx;
2323 sde->desc_avail -= tx->num_desc;
2328 * Check for progress
2330 static int sdma_check_progress(
2331 struct sdma_engine *sde,
2332 struct iowait *wait,
2333 struct sdma_txreq *tx)
2337 sde->desc_avail = sdma_descq_freecnt(sde);
2338 if (tx->num_desc <= sde->desc_avail)
2340 /* pulse the head_lock */
2341 if (wait && wait->sleep) {
2344 seq = raw_seqcount_begin(
2345 (const seqcount_t *)&sde->head_lock.seqcount);
2346 ret = wait->sleep(sde, wait, tx, seq);
2348 sde->desc_avail = sdma_descq_freecnt(sde);
2356 * sdma_send_txreq() - submit a tx req to ring
2357 * @sde: sdma engine to use
2358 * @wait: wait structure to use when full (may be NULL)
2359 * @tx: sdma_txreq to submit
2361 * The call submits the tx into the ring. If a iowait structure is non-NULL
2362 * the packet will be queued to the list in wait.
2365 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
2366 * ring (wait == NULL)
2367 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2369 int sdma_send_txreq(struct sdma_engine *sde,
2370 struct iowait *wait,
2371 struct sdma_txreq *tx)
2375 unsigned long flags;
2377 /* user should have supplied entire packet */
2378 if (unlikely(tx->tlen))
2381 spin_lock_irqsave(&sde->tail_lock, flags);
2383 if (unlikely(!__sdma_running(sde)))
2385 if (unlikely(tx->num_desc > sde->desc_avail))
2387 tail = submit_tx(sde, tx);
2389 iowait_sdma_inc(wait);
2390 sdma_update_tail(sde, tail);
2392 spin_unlock_irqrestore(&sde->tail_lock, flags);
2396 iowait_sdma_inc(wait);
2397 tx->next_descq_idx = 0;
2398 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2399 tx->sn = sde->tail_sn++;
2400 trace_hfi1_sdma_in_sn(sde, tx->sn);
2402 spin_lock(&sde->flushlist_lock);
2403 list_add_tail(&tx->list, &sde->flushlist);
2404 spin_unlock(&sde->flushlist_lock);
2407 wait->count += tx->num_desc;
2409 schedule_work(&sde->flush_worker);
2413 ret = sdma_check_progress(sde, wait, tx);
2414 if (ret == -EAGAIN) {
2418 sde->descq_full_count++;
2423 * sdma_send_txlist() - submit a list of tx req to ring
2424 * @sde: sdma engine to use
2425 * @wait: wait structure to use when full (may be NULL)
2426 * @tx_list: list of sdma_txreqs to submit
2427 * @count: pointer to a u32 which, after return will contain the total number of
2428 * sdma_txreqs removed from the tx_list. This will include sdma_txreqs
2429 * whose SDMA descriptors are submitted to the ring and the sdma_txreqs
2430 * which are added to SDMA engine flush list if the SDMA engine state is
2433 * The call submits the list into the ring.
2435 * If the iowait structure is non-NULL and not equal to the iowait list
2436 * the unprocessed part of the list will be appended to the list in wait.
2438 * In all cases, the tx_list will be updated so the head of the tx_list is
2439 * the list of descriptors that have yet to be transmitted.
2441 * The intent of this call is to provide a more efficient
2442 * way of submitting multiple packets to SDMA while holding the tail
2447 * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
2448 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2450 int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
2451 struct list_head *tx_list, u32 *count_out)
2453 struct sdma_txreq *tx, *tx_next;
2455 unsigned long flags;
2456 u16 tail = INVALID_TAIL;
2457 u32 submit_count = 0, flush_count = 0, total_count;
2459 spin_lock_irqsave(&sde->tail_lock, flags);
2461 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2463 if (unlikely(!__sdma_running(sde)))
2465 if (unlikely(tx->num_desc > sde->desc_avail))
2467 if (unlikely(tx->tlen)) {
2471 list_del_init(&tx->list);
2472 tail = submit_tx(sde, tx);
2474 if (tail != INVALID_TAIL &&
2475 (submit_count & SDMA_TAIL_UPDATE_THRESH) == 0) {
2476 sdma_update_tail(sde, tail);
2477 tail = INVALID_TAIL;
2481 total_count = submit_count + flush_count;
2483 iowait_sdma_add(wait, total_count);
2484 if (tail != INVALID_TAIL)
2485 sdma_update_tail(sde, tail);
2486 spin_unlock_irqrestore(&sde->tail_lock, flags);
2487 *count_out = total_count;
2490 spin_lock(&sde->flushlist_lock);
2491 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2493 list_del_init(&tx->list);
2494 tx->next_descq_idx = 0;
2495 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2496 tx->sn = sde->tail_sn++;
2497 trace_hfi1_sdma_in_sn(sde, tx->sn);
2499 list_add_tail(&tx->list, &sde->flushlist);
2503 wait->count += tx->num_desc;
2506 spin_unlock(&sde->flushlist_lock);
2507 schedule_work(&sde->flush_worker);
2511 ret = sdma_check_progress(sde, wait, tx);
2512 if (ret == -EAGAIN) {
2516 sde->descq_full_count++;
2520 static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event)
2522 unsigned long flags;
2524 spin_lock_irqsave(&sde->tail_lock, flags);
2525 write_seqlock(&sde->head_lock);
2527 __sdma_process_event(sde, event);
2529 if (sde->state.current_state == sdma_state_s99_running)
2530 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
2532 write_sequnlock(&sde->head_lock);
2533 spin_unlock_irqrestore(&sde->tail_lock, flags);
2536 static void __sdma_process_event(struct sdma_engine *sde,
2537 enum sdma_events event)
2539 struct sdma_state *ss = &sde->state;
2540 int need_progress = 0;
2542 /* CONFIG SDMA temporary */
2543 #ifdef CONFIG_SDMA_VERBOSITY
2544 dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx,
2545 sdma_state_names[ss->current_state],
2546 sdma_event_names[event]);
2549 switch (ss->current_state) {
2550 case sdma_state_s00_hw_down:
2552 case sdma_event_e00_go_hw_down:
2554 case sdma_event_e30_go_running:
2556 * If down, but running requested (usually result
2557 * of link up, then we need to start up.
2558 * This can happen when hw down is requested while
2559 * bringing the link up with traffic active on
2562 ss->go_s99_running = 1;
2563 /* fall through and start dma engine */
2564 case sdma_event_e10_go_hw_start:
2565 /* This reference means the state machine is started */
2566 sdma_get(&sde->state);
2568 sdma_state_s10_hw_start_up_halt_wait);
2570 case sdma_event_e15_hw_halt_done:
2572 case sdma_event_e25_hw_clean_up_done:
2574 case sdma_event_e40_sw_cleaned:
2575 sdma_sw_tear_down(sde);
2577 case sdma_event_e50_hw_cleaned:
2579 case sdma_event_e60_hw_halted:
2581 case sdma_event_e70_go_idle:
2583 case sdma_event_e80_hw_freeze:
2585 case sdma_event_e81_hw_frozen:
2587 case sdma_event_e82_hw_unfreeze:
2589 case sdma_event_e85_link_down:
2591 case sdma_event_e90_sw_halted:
2596 case sdma_state_s10_hw_start_up_halt_wait:
2598 case sdma_event_e00_go_hw_down:
2599 sdma_set_state(sde, sdma_state_s00_hw_down);
2600 sdma_sw_tear_down(sde);
2602 case sdma_event_e10_go_hw_start:
2604 case sdma_event_e15_hw_halt_done:
2606 sdma_state_s15_hw_start_up_clean_wait);
2607 sdma_start_hw_clean_up(sde);
2609 case sdma_event_e25_hw_clean_up_done:
2611 case sdma_event_e30_go_running:
2612 ss->go_s99_running = 1;
2614 case sdma_event_e40_sw_cleaned:
2616 case sdma_event_e50_hw_cleaned:
2618 case sdma_event_e60_hw_halted:
2619 schedule_work(&sde->err_halt_worker);
2621 case sdma_event_e70_go_idle:
2622 ss->go_s99_running = 0;
2624 case sdma_event_e80_hw_freeze:
2626 case sdma_event_e81_hw_frozen:
2628 case sdma_event_e82_hw_unfreeze:
2630 case sdma_event_e85_link_down:
2632 case sdma_event_e90_sw_halted:
2637 case sdma_state_s15_hw_start_up_clean_wait:
2639 case sdma_event_e00_go_hw_down:
2640 sdma_set_state(sde, sdma_state_s00_hw_down);
2641 sdma_sw_tear_down(sde);
2643 case sdma_event_e10_go_hw_start:
2645 case sdma_event_e15_hw_halt_done:
2647 case sdma_event_e25_hw_clean_up_done:
2648 sdma_hw_start_up(sde);
2649 sdma_set_state(sde, ss->go_s99_running ?
2650 sdma_state_s99_running :
2651 sdma_state_s20_idle);
2653 case sdma_event_e30_go_running:
2654 ss->go_s99_running = 1;
2656 case sdma_event_e40_sw_cleaned:
2658 case sdma_event_e50_hw_cleaned:
2660 case sdma_event_e60_hw_halted:
2662 case sdma_event_e70_go_idle:
2663 ss->go_s99_running = 0;
2665 case sdma_event_e80_hw_freeze:
2667 case sdma_event_e81_hw_frozen:
2669 case sdma_event_e82_hw_unfreeze:
2671 case sdma_event_e85_link_down:
2673 case sdma_event_e90_sw_halted:
2678 case sdma_state_s20_idle:
2680 case sdma_event_e00_go_hw_down:
2681 sdma_set_state(sde, sdma_state_s00_hw_down);
2682 sdma_sw_tear_down(sde);
2684 case sdma_event_e10_go_hw_start:
2686 case sdma_event_e15_hw_halt_done:
2688 case sdma_event_e25_hw_clean_up_done:
2690 case sdma_event_e30_go_running:
2691 sdma_set_state(sde, sdma_state_s99_running);
2692 ss->go_s99_running = 1;
2694 case sdma_event_e40_sw_cleaned:
2696 case sdma_event_e50_hw_cleaned:
2698 case sdma_event_e60_hw_halted:
2699 sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
2700 schedule_work(&sde->err_halt_worker);
2702 case sdma_event_e70_go_idle:
2704 case sdma_event_e85_link_down:
2706 case sdma_event_e80_hw_freeze:
2707 sdma_set_state(sde, sdma_state_s80_hw_freeze);
2708 atomic_dec(&sde->dd->sdma_unfreeze_count);
2709 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2711 case sdma_event_e81_hw_frozen:
2713 case sdma_event_e82_hw_unfreeze:
2715 case sdma_event_e90_sw_halted:
2720 case sdma_state_s30_sw_clean_up_wait:
2722 case sdma_event_e00_go_hw_down:
2723 sdma_set_state(sde, sdma_state_s00_hw_down);
2725 case sdma_event_e10_go_hw_start:
2727 case sdma_event_e15_hw_halt_done:
2729 case sdma_event_e25_hw_clean_up_done:
2731 case sdma_event_e30_go_running:
2732 ss->go_s99_running = 1;
2734 case sdma_event_e40_sw_cleaned:
2735 sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait);
2736 sdma_start_hw_clean_up(sde);
2738 case sdma_event_e50_hw_cleaned:
2740 case sdma_event_e60_hw_halted:
2742 case sdma_event_e70_go_idle:
2743 ss->go_s99_running = 0;
2745 case sdma_event_e80_hw_freeze:
2747 case sdma_event_e81_hw_frozen:
2749 case sdma_event_e82_hw_unfreeze:
2751 case sdma_event_e85_link_down:
2752 ss->go_s99_running = 0;
2754 case sdma_event_e90_sw_halted:
2759 case sdma_state_s40_hw_clean_up_wait:
2761 case sdma_event_e00_go_hw_down:
2762 sdma_set_state(sde, sdma_state_s00_hw_down);
2763 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2765 case sdma_event_e10_go_hw_start:
2767 case sdma_event_e15_hw_halt_done:
2769 case sdma_event_e25_hw_clean_up_done:
2770 sdma_hw_start_up(sde);
2771 sdma_set_state(sde, ss->go_s99_running ?
2772 sdma_state_s99_running :
2773 sdma_state_s20_idle);
2775 case sdma_event_e30_go_running:
2776 ss->go_s99_running = 1;
2778 case sdma_event_e40_sw_cleaned:
2780 case sdma_event_e50_hw_cleaned:
2782 case sdma_event_e60_hw_halted:
2784 case sdma_event_e70_go_idle:
2785 ss->go_s99_running = 0;
2787 case sdma_event_e80_hw_freeze:
2789 case sdma_event_e81_hw_frozen:
2791 case sdma_event_e82_hw_unfreeze:
2793 case sdma_event_e85_link_down:
2794 ss->go_s99_running = 0;
2796 case sdma_event_e90_sw_halted:
2801 case sdma_state_s50_hw_halt_wait:
2803 case sdma_event_e00_go_hw_down:
2804 sdma_set_state(sde, sdma_state_s00_hw_down);
2805 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2807 case sdma_event_e10_go_hw_start:
2809 case sdma_event_e15_hw_halt_done:
2810 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2811 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2813 case sdma_event_e25_hw_clean_up_done:
2815 case sdma_event_e30_go_running:
2816 ss->go_s99_running = 1;
2818 case sdma_event_e40_sw_cleaned:
2820 case sdma_event_e50_hw_cleaned:
2822 case sdma_event_e60_hw_halted:
2823 schedule_work(&sde->err_halt_worker);
2825 case sdma_event_e70_go_idle:
2826 ss->go_s99_running = 0;
2828 case sdma_event_e80_hw_freeze:
2830 case sdma_event_e81_hw_frozen:
2832 case sdma_event_e82_hw_unfreeze:
2834 case sdma_event_e85_link_down:
2835 ss->go_s99_running = 0;
2837 case sdma_event_e90_sw_halted:
2842 case sdma_state_s60_idle_halt_wait:
2844 case sdma_event_e00_go_hw_down:
2845 sdma_set_state(sde, sdma_state_s00_hw_down);
2846 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2848 case sdma_event_e10_go_hw_start:
2850 case sdma_event_e15_hw_halt_done:
2851 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2852 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2854 case sdma_event_e25_hw_clean_up_done:
2856 case sdma_event_e30_go_running:
2857 ss->go_s99_running = 1;
2859 case sdma_event_e40_sw_cleaned:
2861 case sdma_event_e50_hw_cleaned:
2863 case sdma_event_e60_hw_halted:
2864 schedule_work(&sde->err_halt_worker);
2866 case sdma_event_e70_go_idle:
2867 ss->go_s99_running = 0;
2869 case sdma_event_e80_hw_freeze:
2871 case sdma_event_e81_hw_frozen:
2873 case sdma_event_e82_hw_unfreeze:
2875 case sdma_event_e85_link_down:
2877 case sdma_event_e90_sw_halted:
2882 case sdma_state_s80_hw_freeze:
2884 case sdma_event_e00_go_hw_down:
2885 sdma_set_state(sde, sdma_state_s00_hw_down);
2886 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2888 case sdma_event_e10_go_hw_start:
2890 case sdma_event_e15_hw_halt_done:
2892 case sdma_event_e25_hw_clean_up_done:
2894 case sdma_event_e30_go_running:
2895 ss->go_s99_running = 1;
2897 case sdma_event_e40_sw_cleaned:
2899 case sdma_event_e50_hw_cleaned:
2901 case sdma_event_e60_hw_halted:
2903 case sdma_event_e70_go_idle:
2904 ss->go_s99_running = 0;
2906 case sdma_event_e80_hw_freeze:
2908 case sdma_event_e81_hw_frozen:
2909 sdma_set_state(sde, sdma_state_s82_freeze_sw_clean);
2910 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2912 case sdma_event_e82_hw_unfreeze:
2914 case sdma_event_e85_link_down:
2916 case sdma_event_e90_sw_halted:
2921 case sdma_state_s82_freeze_sw_clean:
2923 case sdma_event_e00_go_hw_down:
2924 sdma_set_state(sde, sdma_state_s00_hw_down);
2925 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2927 case sdma_event_e10_go_hw_start:
2929 case sdma_event_e15_hw_halt_done:
2931 case sdma_event_e25_hw_clean_up_done:
2933 case sdma_event_e30_go_running:
2934 ss->go_s99_running = 1;
2936 case sdma_event_e40_sw_cleaned:
2937 /* notify caller this engine is done cleaning */
2938 atomic_dec(&sde->dd->sdma_unfreeze_count);
2939 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2941 case sdma_event_e50_hw_cleaned:
2943 case sdma_event_e60_hw_halted:
2945 case sdma_event_e70_go_idle:
2946 ss->go_s99_running = 0;
2948 case sdma_event_e80_hw_freeze:
2950 case sdma_event_e81_hw_frozen:
2952 case sdma_event_e82_hw_unfreeze:
2953 sdma_hw_start_up(sde);
2954 sdma_set_state(sde, ss->go_s99_running ?
2955 sdma_state_s99_running :
2956 sdma_state_s20_idle);
2958 case sdma_event_e85_link_down:
2960 case sdma_event_e90_sw_halted:
2965 case sdma_state_s99_running:
2967 case sdma_event_e00_go_hw_down:
2968 sdma_set_state(sde, sdma_state_s00_hw_down);
2969 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2971 case sdma_event_e10_go_hw_start:
2973 case sdma_event_e15_hw_halt_done:
2975 case sdma_event_e25_hw_clean_up_done:
2977 case sdma_event_e30_go_running:
2979 case sdma_event_e40_sw_cleaned:
2981 case sdma_event_e50_hw_cleaned:
2983 case sdma_event_e60_hw_halted:
2985 sdma_err_progress_check_schedule(sde);
2986 case sdma_event_e90_sw_halted:
2988 * SW initiated halt does not perform engines
2991 sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
2992 schedule_work(&sde->err_halt_worker);
2994 case sdma_event_e70_go_idle:
2995 sdma_set_state(sde, sdma_state_s60_idle_halt_wait);
2997 case sdma_event_e85_link_down:
2998 ss->go_s99_running = 0;
3000 case sdma_event_e80_hw_freeze:
3001 sdma_set_state(sde, sdma_state_s80_hw_freeze);
3002 atomic_dec(&sde->dd->sdma_unfreeze_count);
3003 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
3005 case sdma_event_e81_hw_frozen:
3007 case sdma_event_e82_hw_unfreeze:
3013 ss->last_event = event;
3015 sdma_make_progress(sde, 0);
3019 * _extend_sdma_tx_descs() - helper to extend txreq
3021 * This is called once the initial nominal allocation
3022 * of descriptors in the sdma_txreq is exhausted.
3024 * The code will bump the allocation up to the max
3025 * of MAX_DESC (64) descriptors. There doesn't seem
3026 * much point in an interim step. The last descriptor
3027 * is reserved for coalesce buffer in order to support
3028 * cases where input packet has >MAX_DESC iovecs.
3031 static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
3035 /* Handle last descriptor */
3036 if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
3037 /* if tlen is 0, it is for padding, release last descriptor */
3039 tx->desc_limit = MAX_DESC;
3040 } else if (!tx->coalesce_buf) {
3041 /* allocate coalesce buffer with space for padding */
3042 tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32),
3044 if (!tx->coalesce_buf)
3046 tx->coalesce_idx = 0;
3051 if (unlikely(tx->num_desc == MAX_DESC))
3054 tx->descp = kmalloc_array(
3056 sizeof(struct sdma_desc),
3061 /* reserve last descriptor for coalescing */
3062 tx->desc_limit = MAX_DESC - 1;
3063 /* copy ones already built */
3064 for (i = 0; i < tx->num_desc; i++)
3065 tx->descp[i] = tx->descs[i];
3068 __sdma_txclean(dd, tx);
3073 * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors
3075 * This is called once the initial nominal allocation of descriptors
3076 * in the sdma_txreq is exhausted.
3078 * This function calls _extend_sdma_tx_descs to extend or allocate
3079 * coalesce buffer. If there is a allocated coalesce buffer, it will
3080 * copy the input packet data into the coalesce buffer. It also adds
3081 * coalesce buffer descriptor once when whole packet is received.
3085 * 0 - coalescing, don't populate descriptor
3086 * 1 - continue with populating descriptor
3088 int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
3089 int type, void *kvaddr, struct page *page,
3090 unsigned long offset, u16 len)
3095 rval = _extend_sdma_tx_descs(dd, tx);
3097 __sdma_txclean(dd, tx);
3101 /* If coalesce buffer is allocated, copy data into it */
3102 if (tx->coalesce_buf) {
3103 if (type == SDMA_MAP_NONE) {
3104 __sdma_txclean(dd, tx);
3108 if (type == SDMA_MAP_PAGE) {
3109 kvaddr = kmap(page);
3111 } else if (WARN_ON(!kvaddr)) {
3112 __sdma_txclean(dd, tx);
3116 memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len);
3117 tx->coalesce_idx += len;
3118 if (type == SDMA_MAP_PAGE)
3121 /* If there is more data, return */
3122 if (tx->tlen - tx->coalesce_idx)
3125 /* Whole packet is received; add any padding */
3126 pad_len = tx->packet_len & (sizeof(u32) - 1);
3128 pad_len = sizeof(u32) - pad_len;
3129 memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len);
3130 /* padding is taken care of for coalescing case */
3131 tx->packet_len += pad_len;
3132 tx->tlen += pad_len;
3135 /* dma map the coalesce buffer */
3136 addr = dma_map_single(&dd->pcidev->dev,
3141 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
3142 __sdma_txclean(dd, tx);
3146 /* Add descriptor for coalesce buffer */
3147 tx->desc_limit = MAX_DESC;
3148 return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
3155 /* Update sdes when the lmc changes */
3156 void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid)
3158 struct sdma_engine *sde;
3162 sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) <<
3163 SD(CHECK_SLID_MASK_SHIFT)) |
3164 (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) <<
3165 SD(CHECK_SLID_VALUE_SHIFT));
3167 for (i = 0; i < dd->num_sdma; i++) {
3168 hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
3170 sde = &dd->per_sdma[i];
3171 write_sde_csr(sde, SD(CHECK_SLID), sreg);
3175 /* tx not dword sized - pad */
3176 int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
3181 if ((unlikely(tx->num_desc == tx->desc_limit))) {
3182 rval = _extend_sdma_tx_descs(dd, tx);
3184 __sdma_txclean(dd, tx);
3188 /* finish the one just added */
3193 sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
3194 _sdma_close_tx(dd, tx);
3199 * Add ahg to the sdma_txreq
3201 * The logic will consume up to 3
3202 * descriptors at the beginning of
3205 void _sdma_txreq_ahgadd(
3206 struct sdma_txreq *tx,
3212 u32 i, shift = 0, desc = 0;
3215 WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4);
3218 mode = SDMA_AHG_APPLY_UPDATE1;
3219 else if (num_ahg <= 5)
3220 mode = SDMA_AHG_APPLY_UPDATE2;
3222 mode = SDMA_AHG_APPLY_UPDATE3;
3224 /* initialize to consumed descriptors to zero */
3226 case SDMA_AHG_APPLY_UPDATE3:
3228 tx->descs[2].qw[0] = 0;
3229 tx->descs[2].qw[1] = 0;
3231 case SDMA_AHG_APPLY_UPDATE2:
3233 tx->descs[1].qw[0] = 0;
3234 tx->descs[1].qw[1] = 0;
3238 tx->descs[0].qw[1] |=
3239 (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
3240 << SDMA_DESC1_HEADER_INDEX_SHIFT) |
3241 (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK)
3242 << SDMA_DESC1_HEADER_DWS_SHIFT) |
3243 (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK)
3244 << SDMA_DESC1_HEADER_MODE_SHIFT) |
3245 (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK)
3246 << SDMA_DESC1_HEADER_UPDATE1_SHIFT);
3247 for (i = 0; i < (num_ahg - 1); i++) {
3248 if (!shift && !(i & 2))
3250 tx->descs[desc].qw[!!(i & 2)] |=
3253 shift = (shift + 32) & 63;
3258 * sdma_ahg_alloc - allocate an AHG entry
3259 * @sde: engine to allocate from
3262 * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
3263 * -ENOSPC if an entry is not available
3265 int sdma_ahg_alloc(struct sdma_engine *sde)
3271 trace_hfi1_ahg_allocate(sde, -EINVAL);
3275 nr = ffz(ACCESS_ONCE(sde->ahg_bits));
3277 trace_hfi1_ahg_allocate(sde, -ENOSPC);
3280 oldbit = test_and_set_bit(nr, &sde->ahg_bits);
3285 trace_hfi1_ahg_allocate(sde, nr);
3290 * sdma_ahg_free - free an AHG entry
3291 * @sde: engine to return AHG entry
3292 * @ahg_index: index to free
3294 * This routine frees the indicate AHG entry.
3296 void sdma_ahg_free(struct sdma_engine *sde, int ahg_index)
3300 trace_hfi1_ahg_deallocate(sde, ahg_index);
3301 if (ahg_index < 0 || ahg_index > 31)
3303 clear_bit(ahg_index, &sde->ahg_bits);
3307 * SPC freeze handling for SDMA engines. Called when the driver knows
3308 * the SPC is going into a freeze but before the freeze is fully
3309 * settled. Generally an error interrupt.
3311 * This event will pull the engine out of running so no more entries can be
3312 * added to the engine's queue.
3314 void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down)
3317 enum sdma_events event = link_down ? sdma_event_e85_link_down :
3318 sdma_event_e80_hw_freeze;
3320 /* set up the wait but do not wait here */
3321 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
3323 /* tell all engines to stop running and wait */
3324 for (i = 0; i < dd->num_sdma; i++)
3325 sdma_process_event(&dd->per_sdma[i], event);
3327 /* sdma_freeze() will wait for all engines to have stopped */
3331 * SPC freeze handling for SDMA engines. Called when the driver knows
3332 * the SPC is fully frozen.
3334 void sdma_freeze(struct hfi1_devdata *dd)
3340 * Make sure all engines have moved out of the running state before
3343 ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
3344 atomic_read(&dd->sdma_unfreeze_count) <=
3346 /* interrupted or count is negative, then unloading - just exit */
3347 if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
3350 /* set up the count for the next wait */
3351 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
3353 /* tell all engines that the SPC is frozen, they can start cleaning */
3354 for (i = 0; i < dd->num_sdma; i++)
3355 sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen);
3358 * Wait for everyone to finish software clean before exiting. The
3359 * software clean will read engine CSRs, so must be completed before
3360 * the next step, which will clear the engine CSRs.
3362 (void)wait_event_interruptible(dd->sdma_unfreeze_wq,
3363 atomic_read(&dd->sdma_unfreeze_count) <= 0);
3364 /* no need to check results - done no matter what */
3368 * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen.
3370 * The SPC freeze acts like a SDMA halt and a hardware clean combined. All
3371 * that is left is a software clean. We could do it after the SPC is fully
3372 * frozen, but then we'd have to add another state to wait for the unfreeze.
3373 * Instead, just defer the software clean until the unfreeze step.
3375 void sdma_unfreeze(struct hfi1_devdata *dd)
3379 /* tell all engines start freeze clean up */
3380 for (i = 0; i < dd->num_sdma; i++)
3381 sdma_process_event(&dd->per_sdma[i],
3382 sdma_event_e82_hw_unfreeze);
3386 * _sdma_engine_progress_schedule() - schedule progress on engine
3387 * @sde: sdma_engine to schedule progress
3390 void _sdma_engine_progress_schedule(
3391 struct sdma_engine *sde)
3393 trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
3394 /* assume we have selected a good cpu */
3396 CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)),
3397 sde->progress_mask);