3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #include <linux/spinlock.h>
52 #include <linux/seqlock.h>
53 #include <linux/netdevice.h>
54 #include <linux/moduleparam.h>
55 #include <linux/bitops.h>
56 #include <linux/timer.h>
57 #include <linux/vmalloc.h>
58 #include <linux/highmem.h>
67 /* must be a power of 2 >= 64 <= 32768 */
68 #define SDMA_DESCQ_CNT 2048
69 #define SDMA_DESC_INTR 64
70 #define INVALID_TAIL 0xffff
72 static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
73 module_param(sdma_descq_cnt, uint, S_IRUGO);
74 MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
76 static uint sdma_idle_cnt = 250;
77 module_param(sdma_idle_cnt, uint, S_IRUGO);
78 MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)");
81 module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO);
82 MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use");
84 static uint sdma_desct_intr = SDMA_DESC_INTR;
85 module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR);
86 MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt");
88 #define SDMA_WAIT_BATCH_SIZE 20
89 /* max wait time for a SDMA engine to indicate it has halted */
90 #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
91 /* all SDMA engine errors that cause a halt */
93 #define SD(name) SEND_DMA_##name
94 #define ALL_SDMA_ENG_HALT_ERRS \
95 (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
96 | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
97 | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
98 | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
99 | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
100 | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
101 | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
102 | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
103 | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
104 | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
105 | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
106 | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
107 | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
108 | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
109 | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
110 | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
111 | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
112 | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
114 /* sdma_sendctrl operations */
115 #define SDMA_SENDCTRL_OP_ENABLE (1U << 0)
116 #define SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
117 #define SDMA_SENDCTRL_OP_HALT (1U << 2)
118 #define SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
120 /* handle long defines */
121 #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
122 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
123 #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
124 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
126 static const char * const sdma_state_names[] = {
127 [sdma_state_s00_hw_down] = "s00_HwDown",
128 [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait",
129 [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait",
130 [sdma_state_s20_idle] = "s20_Idle",
131 [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
132 [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
133 [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
134 [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait",
135 [sdma_state_s80_hw_freeze] = "s80_HwFreeze",
136 [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean",
137 [sdma_state_s99_running] = "s99_Running",
140 static const char * const sdma_event_names[] = {
141 [sdma_event_e00_go_hw_down] = "e00_GoHwDown",
142 [sdma_event_e10_go_hw_start] = "e10_GoHwStart",
143 [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone",
144 [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone",
145 [sdma_event_e30_go_running] = "e30_GoRunning",
146 [sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
147 [sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
148 [sdma_event_e60_hw_halted] = "e60_HwHalted",
149 [sdma_event_e70_go_idle] = "e70_GoIdle",
150 [sdma_event_e80_hw_freeze] = "e80_HwFreeze",
151 [sdma_event_e81_hw_frozen] = "e81_HwFrozen",
152 [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze",
153 [sdma_event_e85_link_down] = "e85_LinkDown",
154 [sdma_event_e90_sw_halted] = "e90_SwHalted",
157 static const struct sdma_set_state_action sdma_action_table[] = {
158 [sdma_state_s00_hw_down] = {
159 .go_s99_running_tofalse = 1,
165 [sdma_state_s10_hw_start_up_halt_wait] = {
171 [sdma_state_s15_hw_start_up_clean_wait] = {
177 [sdma_state_s20_idle] = {
183 [sdma_state_s30_sw_clean_up_wait] = {
189 [sdma_state_s40_hw_clean_up_wait] = {
195 [sdma_state_s50_hw_halt_wait] = {
201 [sdma_state_s60_idle_halt_wait] = {
202 .go_s99_running_tofalse = 1,
208 [sdma_state_s80_hw_freeze] = {
214 [sdma_state_s82_freeze_sw_clean] = {
220 [sdma_state_s99_running] = {
225 .go_s99_running_totrue = 1,
229 #define SDMA_TAIL_UPDATE_THRESH 0x1F
231 /* declare all statics here rather than keep sorting */
232 static void sdma_complete(struct kref *);
233 static void sdma_finalput(struct sdma_state *);
234 static void sdma_get(struct sdma_state *);
235 static void sdma_hw_clean_up_task(unsigned long);
236 static void sdma_put(struct sdma_state *);
237 static void sdma_set_state(struct sdma_engine *, enum sdma_states);
238 static void sdma_start_hw_clean_up(struct sdma_engine *);
239 static void sdma_sw_clean_up_task(unsigned long);
240 static void sdma_sendctrl(struct sdma_engine *, unsigned);
241 static void init_sdma_regs(struct sdma_engine *, u32, uint);
242 static void sdma_process_event(
243 struct sdma_engine *sde,
244 enum sdma_events event);
245 static void __sdma_process_event(
246 struct sdma_engine *sde,
247 enum sdma_events event);
248 static void dump_sdma_state(struct sdma_engine *sde);
249 static void sdma_make_progress(struct sdma_engine *sde, u64 status);
250 static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail);
251 static void sdma_flush_descq(struct sdma_engine *sde);
254 * sdma_state_name() - return state string from enum
257 static const char *sdma_state_name(enum sdma_states state)
259 return sdma_state_names[state];
262 static void sdma_get(struct sdma_state *ss)
267 static void sdma_complete(struct kref *kref)
269 struct sdma_state *ss =
270 container_of(kref, struct sdma_state, kref);
275 static void sdma_put(struct sdma_state *ss)
277 kref_put(&ss->kref, sdma_complete);
280 static void sdma_finalput(struct sdma_state *ss)
283 wait_for_completion(&ss->comp);
286 static inline void write_sde_csr(
287 struct sdma_engine *sde,
291 write_kctxt_csr(sde->dd, sde->this_idx, offset0, value);
294 static inline u64 read_sde_csr(
295 struct sdma_engine *sde,
298 return read_kctxt_csr(sde->dd, sde->this_idx, offset0);
302 * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
303 * sdma engine 'sde' to drop to 0.
305 static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
308 u64 off = 8 * sde->this_idx;
309 struct hfi1_devdata *dd = sde->dd;
316 reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS);
318 reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK;
319 reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT;
322 /* counter is reest if accupancy count changes */
326 /* timed out - bounce the link */
327 dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
328 __func__, sde->this_idx, (u32)reg);
329 queue_work(dd->pport->hfi1_wq,
330 &dd->pport->link_bounce_work);
338 * sdma_wait() - wait for packet egress to complete for all SDMA engines,
339 * and pause for credit return.
341 void sdma_wait(struct hfi1_devdata *dd)
345 for (i = 0; i < dd->num_sdma; i++) {
346 struct sdma_engine *sde = &dd->per_sdma[i];
348 sdma_wait_for_packet_egress(sde, 0);
352 static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt)
356 if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT))
359 reg &= SD(DESC_CNT_CNT_MASK);
360 reg <<= SD(DESC_CNT_CNT_SHIFT);
361 write_sde_csr(sde, SD(DESC_CNT), reg);
365 * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
367 * Depending on timing there can be txreqs in two places:
368 * - in the descq ring
369 * - in the flush list
371 * To avoid ordering issues the descq ring needs to be flushed
372 * first followed by the flush list.
374 * This routine is called from two places
375 * - From a work queue item
376 * - Directly from the state machine just before setting the
379 * Must be called with head_lock held
382 static void sdma_flush(struct sdma_engine *sde)
384 struct sdma_txreq *txp, *txp_next;
385 LIST_HEAD(flushlist);
388 /* flush from head to tail */
389 sdma_flush_descq(sde);
390 spin_lock_irqsave(&sde->flushlist_lock, flags);
391 /* copy flush list */
392 list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
393 list_del_init(&txp->list);
394 list_add_tail(&txp->list, &flushlist);
396 spin_unlock_irqrestore(&sde->flushlist_lock, flags);
397 /* flush from flush list */
398 list_for_each_entry_safe(txp, txp_next, &flushlist, list) {
400 /* protect against complete modifying */
401 struct iowait *wait = txp->wait;
403 list_del_init(&txp->list);
404 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
405 trace_hfi1_sdma_out_sn(sde, txp->sn);
406 if (WARN_ON_ONCE(sde->head_sn != txp->sn))
407 dd_dev_err(sde->dd, "expected %llu got %llu\n",
408 sde->head_sn, txp->sn);
411 sdma_txclean(sde->dd, txp);
413 drained = atomic_dec_and_test(&wait->sdma_busy);
415 (*txp->complete)(txp, SDMA_TXREQ_S_ABORTED, drained);
417 iowait_drain_wakeup(wait);
422 * Fields a work request for flushing the descq ring
425 * If the engine has been brought to running during
426 * the scheduling delay, the flush is ignored, assuming
427 * that the process of bringing the engine to running
428 * would have done this flush prior to going to running.
431 static void sdma_field_flush(struct work_struct *work)
434 struct sdma_engine *sde =
435 container_of(work, struct sdma_engine, flush_worker);
437 write_seqlock_irqsave(&sde->head_lock, flags);
438 if (!__sdma_running(sde))
440 write_sequnlock_irqrestore(&sde->head_lock, flags);
443 static void sdma_err_halt_wait(struct work_struct *work)
445 struct sdma_engine *sde = container_of(work, struct sdma_engine,
448 unsigned long timeout;
450 timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT);
452 statuscsr = read_sde_csr(sde, SD(STATUS));
453 statuscsr &= SD(STATUS_ENG_HALTED_SMASK);
456 if (time_after(jiffies, timeout)) {
458 "SDMA engine %d - timeout waiting for engine to halt\n",
461 * Continue anyway. This could happen if there was
462 * an uncorrectable error in the wrong spot.
466 usleep_range(80, 120);
469 sdma_process_event(sde, sdma_event_e15_hw_halt_done);
472 static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
474 if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
477 struct hfi1_devdata *dd = sde->dd;
479 for (index = 0; index < dd->num_sdma; index++) {
480 struct sdma_engine *curr_sdma = &dd->per_sdma[index];
482 if (curr_sdma != sde)
483 curr_sdma->progress_check_head =
484 curr_sdma->descq_head;
487 "SDMA engine %d - check scheduled\n",
489 mod_timer(&sde->err_progress_check_timer, jiffies + 10);
493 static void sdma_err_progress_check(unsigned long data)
496 struct sdma_engine *sde = (struct sdma_engine *)data;
498 dd_dev_err(sde->dd, "SDE progress check event\n");
499 for (index = 0; index < sde->dd->num_sdma; index++) {
500 struct sdma_engine *curr_sde = &sde->dd->per_sdma[index];
503 /* check progress on each engine except the current one */
507 * We must lock interrupts when acquiring sde->lock,
508 * to avoid a deadlock if interrupt triggers and spins on
509 * the same lock on same CPU
511 spin_lock_irqsave(&curr_sde->tail_lock, flags);
512 write_seqlock(&curr_sde->head_lock);
514 /* skip non-running queues */
515 if (curr_sde->state.current_state != sdma_state_s99_running) {
516 write_sequnlock(&curr_sde->head_lock);
517 spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
521 if ((curr_sde->descq_head != curr_sde->descq_tail) &&
522 (curr_sde->descq_head ==
523 curr_sde->progress_check_head))
524 __sdma_process_event(curr_sde,
525 sdma_event_e90_sw_halted);
526 write_sequnlock(&curr_sde->head_lock);
527 spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
529 schedule_work(&sde->err_halt_worker);
532 static void sdma_hw_clean_up_task(unsigned long opaque)
534 struct sdma_engine *sde = (struct sdma_engine *) opaque;
538 #ifdef CONFIG_SDMA_VERBOSITY
539 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
540 sde->this_idx, slashstrip(__FILE__), __LINE__,
543 statuscsr = read_sde_csr(sde, SD(STATUS));
544 statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK);
550 sdma_process_event(sde, sdma_event_e25_hw_clean_up_done);
553 static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
555 smp_read_barrier_depends(); /* see sdma_update_tail() */
556 return sde->tx_ring[sde->tx_head & sde->sdma_mask];
560 * flush ring for recovery
562 static void sdma_flush_descq(struct sdma_engine *sde)
566 struct sdma_txreq *txp = get_txhead(sde);
568 /* The reason for some of the complexity of this code is that
569 * not all descriptors have corresponding txps. So, we have to
570 * be able to skip over descs until we wander into the range of
571 * the next txp on the list.
573 head = sde->descq_head & sde->sdma_mask;
574 tail = sde->descq_tail & sde->sdma_mask;
575 while (head != tail) {
576 /* advance head, wrap if needed */
577 head = ++sde->descq_head & sde->sdma_mask;
578 /* if now past this txp's descs, do the callback */
579 if (txp && txp->next_descq_idx == head) {
581 /* protect against complete modifying */
582 struct iowait *wait = txp->wait;
584 /* remove from list */
585 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
587 drained = atomic_dec_and_test(&wait->sdma_busy);
588 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
589 trace_hfi1_sdma_out_sn(sde, txp->sn);
590 if (WARN_ON_ONCE(sde->head_sn != txp->sn))
591 dd_dev_err(sde->dd, "expected %llu got %llu\n",
592 sde->head_sn, txp->sn);
595 sdma_txclean(sde->dd, txp);
596 trace_hfi1_sdma_progress(sde, head, tail, txp);
600 SDMA_TXREQ_S_ABORTED,
603 iowait_drain_wakeup(wait);
604 /* see if there is another txp */
605 txp = get_txhead(sde);
610 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
613 static void sdma_sw_clean_up_task(unsigned long opaque)
615 struct sdma_engine *sde = (struct sdma_engine *) opaque;
618 spin_lock_irqsave(&sde->tail_lock, flags);
619 write_seqlock(&sde->head_lock);
622 * At this point, the following should always be true:
623 * - We are halted, so no more descriptors are getting retired.
624 * - We are not running, so no one is submitting new work.
625 * - Only we can send the e40_sw_cleaned, so we can't start
626 * running again until we say so. So, the active list and
627 * descq are ours to play with.
632 * In the error clean up sequence, software clean must be called
633 * before the hardware clean so we can use the hardware head in
634 * the progress routine. A hardware clean or SPC unfreeze will
635 * reset the hardware head.
637 * Process all retired requests. The progress routine will use the
638 * latest physical hardware head - we are not running so speed does
641 sdma_make_progress(sde, 0);
646 * Reset our notion of head and tail.
647 * Note that the HW registers have been reset via an earlier
652 sde->desc_avail = sdma_descq_freecnt(sde);
655 __sdma_process_event(sde, sdma_event_e40_sw_cleaned);
657 write_sequnlock(&sde->head_lock);
658 spin_unlock_irqrestore(&sde->tail_lock, flags);
661 static void sdma_sw_tear_down(struct sdma_engine *sde)
663 struct sdma_state *ss = &sde->state;
665 /* Releasing this reference means the state machine has stopped. */
668 /* stop waiting for all unfreeze events to complete */
669 atomic_set(&sde->dd->sdma_unfreeze_count, -1);
670 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
673 static void sdma_start_hw_clean_up(struct sdma_engine *sde)
675 tasklet_hi_schedule(&sde->sdma_hw_clean_up_task);
678 static void sdma_set_state(struct sdma_engine *sde,
679 enum sdma_states next_state)
681 struct sdma_state *ss = &sde->state;
682 const struct sdma_set_state_action *action = sdma_action_table;
685 trace_hfi1_sdma_state(
687 sdma_state_names[ss->current_state],
688 sdma_state_names[next_state]);
690 /* debugging bookkeeping */
691 ss->previous_state = ss->current_state;
692 ss->previous_op = ss->current_op;
693 ss->current_state = next_state;
695 if (ss->previous_state != sdma_state_s99_running
696 && next_state == sdma_state_s99_running)
699 if (action[next_state].op_enable)
700 op |= SDMA_SENDCTRL_OP_ENABLE;
702 if (action[next_state].op_intenable)
703 op |= SDMA_SENDCTRL_OP_INTENABLE;
705 if (action[next_state].op_halt)
706 op |= SDMA_SENDCTRL_OP_HALT;
708 if (action[next_state].op_cleanup)
709 op |= SDMA_SENDCTRL_OP_CLEANUP;
711 if (action[next_state].go_s99_running_tofalse)
712 ss->go_s99_running = 0;
714 if (action[next_state].go_s99_running_totrue)
715 ss->go_s99_running = 1;
718 sdma_sendctrl(sde, ss->current_op);
722 * sdma_get_descq_cnt() - called when device probed
724 * Return a validated descq count.
726 * This is currently only used in the verbs initialization to build the tx
729 * This will probably be deleted in favor of a more scalable approach to
733 u16 sdma_get_descq_cnt(void)
735 u16 count = sdma_descq_cnt;
738 return SDMA_DESCQ_CNT;
739 /* count must be a power of 2 greater than 64 and less than
740 * 32768. Otherwise return default.
742 if (!is_power_of_2(count))
743 return SDMA_DESCQ_CNT;
744 if (count < 64 || count > 32768)
745 return SDMA_DESCQ_CNT;
750 * sdma_select_engine_vl() - select sdma engine
752 * @selector: a spreading factor
756 * This function returns an engine based on the selector and a vl. The
757 * mapping fields are protected by RCU.
759 struct sdma_engine *sdma_select_engine_vl(
760 struct hfi1_devdata *dd,
764 struct sdma_vl_map *m;
765 struct sdma_map_elem *e;
766 struct sdma_engine *rval;
769 return &dd->per_sdma[0];
772 m = rcu_dereference(dd->sdma_map);
775 return &dd->per_sdma[0];
777 e = m->map[vl & m->mask];
778 rval = e->sde[selector & e->mask];
781 rval = !rval ? &dd->per_sdma[0] : rval;
782 trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
787 * sdma_select_engine_sc() - select sdma engine
789 * @selector: a spreading factor
793 * This function returns an engine based on the selector and an sc.
795 struct sdma_engine *sdma_select_engine_sc(
796 struct hfi1_devdata *dd,
800 u8 vl = sc_to_vlt(dd, sc5);
802 return sdma_select_engine_vl(dd, selector, vl);
806 * Free the indicated map struct
808 static void sdma_map_free(struct sdma_vl_map *m)
812 for (i = 0; m && i < m->actual_vls; i++)
818 * Handle RCU callback
820 static void sdma_map_rcu_callback(struct rcu_head *list)
822 struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list);
828 * sdma_map_init - called when # vls change
831 * @num_vls: number of vls
832 * @vl_engines: per vl engine mapping (optional)
834 * This routine changes the mapping based on the number of vls.
836 * vl_engines is used to specify a non-uniform vl/engine loading. NULL
837 * implies auto computing the loading and giving each VLs a uniform
838 * distribution of engines per VL.
840 * The auto algorithm computes the sde_per_vl and the number of extra
841 * engines. Any extra engines are added from the last VL on down.
843 * rcu locking is used here to control access to the mapping fields.
845 * If either the num_vls or num_sdma are non-power of 2, the array sizes
846 * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
847 * up to the next highest power of 2 and the first entry is reused
848 * in a round robin fashion.
850 * If an error occurs the map change is not done and the mapping is
854 int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
857 int extra, sde_per_vl;
859 u8 lvl_engines[OPA_MAX_VLS];
860 struct sdma_vl_map *oldmap, *newmap;
862 if (!(dd->flags & HFI1_HAS_SEND_DMA))
866 /* truncate divide */
867 sde_per_vl = dd->num_sdma / num_vls;
869 extra = dd->num_sdma % num_vls;
870 vl_engines = lvl_engines;
871 /* add extras from last vl down */
872 for (i = num_vls - 1; i >= 0; i--, extra--)
873 vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0);
877 sizeof(struct sdma_vl_map) +
878 roundup_pow_of_two(num_vls) *
879 sizeof(struct sdma_map_elem *),
883 newmap->actual_vls = num_vls;
884 newmap->vls = roundup_pow_of_two(num_vls);
885 newmap->mask = (1 << ilog2(newmap->vls)) - 1;
886 for (i = 0; i < newmap->vls; i++) {
887 /* save for wrap around */
888 int first_engine = engine;
890 if (i < newmap->actual_vls) {
891 int sz = roundup_pow_of_two(vl_engines[i]);
893 /* only allocate once */
894 newmap->map[i] = kzalloc(
895 sizeof(struct sdma_map_elem) +
896 sz * sizeof(struct sdma_engine *),
900 newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
902 for (j = 0; j < sz; j++) {
903 newmap->map[i]->sde[j] =
904 &dd->per_sdma[engine];
905 if (++engine >= first_engine + vl_engines[i])
906 /* wrap back to first engine */
907 engine = first_engine;
910 /* just re-use entry without allocating */
911 newmap->map[i] = newmap->map[i % num_vls];
913 engine = first_engine + vl_engines[i];
915 /* newmap in hand, save old map */
916 spin_lock_irq(&dd->sde_map_lock);
917 oldmap = rcu_dereference_protected(dd->sdma_map,
918 lockdep_is_held(&dd->sde_map_lock));
921 rcu_assign_pointer(dd->sdma_map, newmap);
923 spin_unlock_irq(&dd->sde_map_lock);
924 /* success, free any old map after grace period */
926 call_rcu(&oldmap->list, sdma_map_rcu_callback);
929 /* free any partial allocation */
930 sdma_map_free(newmap);
935 * Clean up allocated memory.
937 * This routine is can be called regardless of the success of sdma_init()
940 static void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
943 struct sdma_engine *sde;
945 if (dd->sdma_pad_dma) {
946 dma_free_coherent(&dd->pcidev->dev, 4,
947 (void *)dd->sdma_pad_dma,
949 dd->sdma_pad_dma = NULL;
950 dd->sdma_pad_phys = 0;
952 if (dd->sdma_heads_dma) {
953 dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size,
954 (void *)dd->sdma_heads_dma,
955 dd->sdma_heads_phys);
956 dd->sdma_heads_dma = NULL;
957 dd->sdma_heads_phys = 0;
959 for (i = 0; dd->per_sdma && i < num_engines; ++i) {
960 sde = &dd->per_sdma[i];
962 sde->head_dma = NULL;
968 sde->descq_cnt * sizeof(u64[2]),
975 kvfree(sde->tx_ring);
978 spin_lock_irq(&dd->sde_map_lock);
979 kfree(rcu_access_pointer(dd->sdma_map));
980 RCU_INIT_POINTER(dd->sdma_map, NULL);
981 spin_unlock_irq(&dd->sde_map_lock);
988 * sdma_init() - called when device probed
990 * @port: port number (currently only zero)
992 * sdma_init initializes the specified number of engines.
994 * The code initializes each sde, its csrs. Interrupts
995 * are not required to be enabled.
998 * 0 - success, -errno on failure
1000 int sdma_init(struct hfi1_devdata *dd, u8 port)
1003 struct sdma_engine *sde;
1006 struct hfi1_pportdata *ppd = dd->pport + port;
1007 u32 per_sdma_credits;
1008 uint idle_cnt = sdma_idle_cnt;
1009 size_t num_engines = dd->chip_sdma_engines;
1011 if (!HFI1_CAP_IS_KSET(SDMA)) {
1012 HFI1_CAP_CLEAR(SDMA_AHG);
1016 /* can't exceed chip support */
1017 mod_num_sdma <= dd->chip_sdma_engines &&
1018 /* count must be >= vls */
1019 mod_num_sdma >= num_vls)
1020 num_engines = mod_num_sdma;
1022 dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
1023 dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", dd->chip_sdma_engines);
1024 dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
1025 dd->chip_sdma_mem_size);
1028 dd->chip_sdma_mem_size/(num_engines * SDMA_BLOCK_SIZE);
1030 /* set up freeze waitqueue */
1031 init_waitqueue_head(&dd->sdma_unfreeze_wq);
1032 atomic_set(&dd->sdma_unfreeze_count, 0);
1034 descq_cnt = sdma_get_descq_cnt();
1035 dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
1036 num_engines, descq_cnt);
1038 /* alloc memory for array of send engines */
1039 dd->per_sdma = kcalloc(num_engines, sizeof(*dd->per_sdma), GFP_KERNEL);
1043 idle_cnt = ns_to_cclock(dd, idle_cnt);
1044 if (!sdma_desct_intr)
1045 sdma_desct_intr = SDMA_DESC_INTR;
1047 /* Allocate memory for SendDMA descriptor FIFOs */
1048 for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1049 sde = &dd->per_sdma[this_idx];
1052 sde->this_idx = this_idx;
1053 sde->descq_cnt = descq_cnt;
1054 sde->desc_avail = sdma_descq_freecnt(sde);
1055 sde->sdma_shift = ilog2(descq_cnt);
1056 sde->sdma_mask = (1 << sde->sdma_shift) - 1;
1057 sde->descq_full_count = 0;
1059 /* Create a mask for all 3 chip interrupt sources */
1060 sde->imask = (u64)1 << (0*TXE_NUM_SDMA_ENGINES + this_idx)
1061 | (u64)1 << (1*TXE_NUM_SDMA_ENGINES + this_idx)
1062 | (u64)1 << (2*TXE_NUM_SDMA_ENGINES + this_idx);
1063 /* Create a mask specifically for sdma_idle */
1065 (u64)1 << (2*TXE_NUM_SDMA_ENGINES + this_idx);
1066 /* Create a mask specifically for sdma_progress */
1067 sde->progress_mask =
1068 (u64)1 << (TXE_NUM_SDMA_ENGINES + this_idx);
1069 spin_lock_init(&sde->tail_lock);
1070 seqlock_init(&sde->head_lock);
1071 spin_lock_init(&sde->senddmactrl_lock);
1072 spin_lock_init(&sde->flushlist_lock);
1073 /* insure there is always a zero bit */
1074 sde->ahg_bits = 0xfffffffe00000000ULL;
1076 sdma_set_state(sde, sdma_state_s00_hw_down);
1078 /* set up reference counting */
1079 kref_init(&sde->state.kref);
1080 init_completion(&sde->state.comp);
1082 INIT_LIST_HEAD(&sde->flushlist);
1083 INIT_LIST_HEAD(&sde->dmawait);
1086 get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
1090 SDMA_DESC1_HEAD_TO_HOST_FLAG;
1093 SDMA_DESC1_INT_REQ_FLAG;
1095 tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
1096 (unsigned long)sde);
1098 tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
1099 (unsigned long)sde);
1100 INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
1101 INIT_WORK(&sde->flush_worker, sdma_field_flush);
1103 sde->progress_check_head = 0;
1105 setup_timer(&sde->err_progress_check_timer,
1106 sdma_err_progress_check, (unsigned long)sde);
1108 sde->descq = dma_zalloc_coherent(
1110 descq_cnt * sizeof(u64[2]),
1117 kcalloc(descq_cnt, sizeof(struct sdma_txreq *),
1122 sizeof(struct sdma_txreq *) *
1128 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
1129 /* Allocate memory for DMA of head registers to memory */
1130 dd->sdma_heads_dma = dma_zalloc_coherent(
1132 dd->sdma_heads_size,
1133 &dd->sdma_heads_phys,
1136 if (!dd->sdma_heads_dma) {
1137 dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
1141 /* Allocate memory for pad */
1142 dd->sdma_pad_dma = dma_zalloc_coherent(
1148 if (!dd->sdma_pad_dma) {
1149 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
1153 /* assign each engine to different cacheline and init registers */
1154 curr_head = (void *)dd->sdma_heads_dma;
1155 for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1156 unsigned long phys_offset;
1158 sde = &dd->per_sdma[this_idx];
1160 sde->head_dma = curr_head;
1161 curr_head += L1_CACHE_BYTES;
1162 phys_offset = (unsigned long)sde->head_dma -
1163 (unsigned long)dd->sdma_heads_dma;
1164 sde->head_phys = dd->sdma_heads_phys + phys_offset;
1165 init_sdma_regs(sde, per_sdma_credits, idle_cnt);
1167 dd->flags |= HFI1_HAS_SEND_DMA;
1168 dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0;
1169 dd->num_sdma = num_engines;
1170 if (sdma_map_init(dd, port, ppd->vls_operational, NULL))
1172 dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
1176 sdma_clean(dd, num_engines);
1181 * sdma_all_running() - called when the link goes up
1184 * This routine moves all engines to the running state.
1186 void sdma_all_running(struct hfi1_devdata *dd)
1188 struct sdma_engine *sde;
1191 /* move all engines to running */
1192 for (i = 0; i < dd->num_sdma; ++i) {
1193 sde = &dd->per_sdma[i];
1194 sdma_process_event(sde, sdma_event_e30_go_running);
1199 * sdma_all_idle() - called when the link goes down
1202 * This routine moves all engines to the idle state.
1204 void sdma_all_idle(struct hfi1_devdata *dd)
1206 struct sdma_engine *sde;
1209 /* idle all engines */
1210 for (i = 0; i < dd->num_sdma; ++i) {
1211 sde = &dd->per_sdma[i];
1212 sdma_process_event(sde, sdma_event_e70_go_idle);
1217 * sdma_start() - called to kick off state processing for all engines
1220 * This routine is for kicking off the state processing for all required
1221 * sdma engines. Interrupts need to be working at this point.
1224 void sdma_start(struct hfi1_devdata *dd)
1227 struct sdma_engine *sde;
1229 /* kick off the engines state processing */
1230 for (i = 0; i < dd->num_sdma; ++i) {
1231 sde = &dd->per_sdma[i];
1232 sdma_process_event(sde, sdma_event_e10_go_hw_start);
1237 * sdma_exit() - used when module is removed
1240 void sdma_exit(struct hfi1_devdata *dd)
1243 struct sdma_engine *sde;
1245 for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
1248 sde = &dd->per_sdma[this_idx];
1249 if (!list_empty(&sde->dmawait))
1250 dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
1252 sdma_process_event(sde, sdma_event_e00_go_hw_down);
1254 del_timer_sync(&sde->err_progress_check_timer);
1257 * This waits for the state machine to exit so it is not
1258 * necessary to kill the sdma_sw_clean_up_task to make sure
1259 * it is not running.
1261 sdma_finalput(&sde->state);
1263 sdma_clean(dd, dd->num_sdma);
1267 * unmap the indicated descriptor
1269 static inline void sdma_unmap_desc(
1270 struct hfi1_devdata *dd,
1271 struct sdma_desc *descp)
1273 switch (sdma_mapping_type(descp)) {
1274 case SDMA_MAP_SINGLE:
1277 sdma_mapping_addr(descp),
1278 sdma_mapping_len(descp),
1284 sdma_mapping_addr(descp),
1285 sdma_mapping_len(descp),
1292 * return the mode as indicated by the first
1293 * descriptor in the tx.
1295 static inline u8 ahg_mode(struct sdma_txreq *tx)
1297 return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1298 >> SDMA_DESC1_HEADER_MODE_SHIFT;
1302 * sdma_txclean() - clean tx of mappings, descp *kmalloc's
1303 * @dd: hfi1_devdata for unmapping
1304 * @tx: tx request to clean
1306 * This is used in the progress routine to clean the tx or
1307 * by the ULP to toss an in-process tx build.
1309 * The code can be called multiple times without issue.
1313 struct hfi1_devdata *dd,
1314 struct sdma_txreq *tx)
1319 u8 skip = 0, mode = ahg_mode(tx);
1322 sdma_unmap_desc(dd, &tx->descp[0]);
1323 /* determine number of AHG descriptors to skip */
1324 if (mode > SDMA_AHG_APPLY_UPDATE1)
1326 for (i = 1 + skip; i < tx->num_desc; i++)
1327 sdma_unmap_desc(dd, &tx->descp[i]);
1330 kfree(tx->coalesce_buf);
1331 tx->coalesce_buf = NULL;
1332 /* kmalloc'ed descp */
1333 if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) {
1334 tx->desc_limit = ARRAY_SIZE(tx->descs);
1339 static inline u16 sdma_gethead(struct sdma_engine *sde)
1341 struct hfi1_devdata *dd = sde->dd;
1345 #ifdef CONFIG_SDMA_VERBOSITY
1346 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1347 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1351 use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
1352 (dd->flags & HFI1_HAS_SDMA_TIMEOUT);
1353 hwhead = use_dmahead ?
1354 (u16) le64_to_cpu(*sde->head_dma) :
1355 (u16) read_sde_csr(sde, SD(HEAD));
1357 if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
1363 swhead = sde->descq_head & sde->sdma_mask;
1364 /* this code is really bad for cache line trading */
1365 swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1366 cnt = sde->descq_cnt;
1368 if (swhead < swtail)
1370 sane = (hwhead >= swhead) & (hwhead <= swtail);
1371 else if (swhead > swtail)
1372 /* wrapped around */
1373 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
1377 sane = (hwhead == swhead);
1379 if (unlikely(!sane)) {
1380 dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
1382 use_dmahead ? "dma" : "kreg",
1383 hwhead, swhead, swtail, cnt);
1385 /* try one more time, using csr */
1389 /* proceed as if no progress */
1397 * This is called when there are send DMA descriptors that might be
1400 * This is called with head_lock held.
1402 static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail)
1404 struct iowait *wait, *nw;
1405 struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
1406 unsigned i, n = 0, seq;
1407 struct sdma_txreq *stx;
1408 struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
1410 #ifdef CONFIG_SDMA_VERBOSITY
1411 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
1412 slashstrip(__FILE__), __LINE__, __func__);
1413 dd_dev_err(sde->dd, "avail: %u\n", avail);
1417 seq = read_seqbegin(&dev->iowait_lock);
1418 if (!list_empty(&sde->dmawait)) {
1419 /* at least one item */
1420 write_seqlock(&dev->iowait_lock);
1421 /* Harvest waiters wanting DMA descriptors */
1422 list_for_each_entry_safe(
1431 if (n == ARRAY_SIZE(waits))
1433 if (!list_empty(&wait->tx_head)) {
1434 stx = list_first_entry(
1438 num_desc = stx->num_desc;
1440 if (num_desc > avail)
1443 list_del_init(&wait->list);
1446 write_sequnlock(&dev->iowait_lock);
1449 } while (read_seqretry(&dev->iowait_lock, seq));
1451 for (i = 0; i < n; i++)
1452 waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
1455 /* head_lock must be held */
1456 static void sdma_make_progress(struct sdma_engine *sde, u64 status)
1458 struct sdma_txreq *txp = NULL;
1460 u16 hwhead, swhead, swtail;
1461 int idle_check_done = 0;
1463 hwhead = sdma_gethead(sde);
1465 /* The reason for some of the complexity of this code is that
1466 * not all descriptors have corresponding txps. So, we have to
1467 * be able to skip over descs until we wander into the range of
1468 * the next txp on the list.
1472 txp = get_txhead(sde);
1473 swhead = sde->descq_head & sde->sdma_mask;
1474 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1475 while (swhead != hwhead) {
1476 /* advance head, wrap if needed */
1477 swhead = ++sde->descq_head & sde->sdma_mask;
1479 /* if now past this txp's descs, do the callback */
1480 if (txp && txp->next_descq_idx == swhead) {
1482 /* protect against complete modifying */
1483 struct iowait *wait = txp->wait;
1485 /* remove from list */
1486 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
1488 drained = atomic_dec_and_test(&wait->sdma_busy);
1489 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
1490 trace_hfi1_sdma_out_sn(sde, txp->sn);
1491 if (WARN_ON_ONCE(sde->head_sn != txp->sn))
1492 dd_dev_err(sde->dd, "expected %llu got %llu\n",
1493 sde->head_sn, txp->sn);
1496 sdma_txclean(sde->dd, txp);
1502 if (wait && drained)
1503 iowait_drain_wakeup(wait);
1504 /* see if there is another txp */
1505 txp = get_txhead(sde);
1507 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1512 * The SDMA idle interrupt is not guaranteed to be ordered with respect
1513 * to updates to the the dma_head location in host memory. The head
1514 * value read might not be fully up to date. If there are pending
1515 * descriptors and the SDMA idle interrupt fired then read from the
1516 * CSR SDMA head instead to get the latest value from the hardware.
1517 * The hardware SDMA head should be read at most once in this invocation
1518 * of sdma_make_progress(..) which is ensured by idle_check_done flag
1520 if ((status & sde->idle_mask) && !idle_check_done) {
1521 swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1522 if (swtail != hwhead) {
1523 hwhead = (u16)read_sde_csr(sde, SD(HEAD));
1524 idle_check_done = 1;
1529 sde->last_status = status;
1531 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
1535 * sdma_engine_interrupt() - interrupt handler for engine
1537 * @status: sdma interrupt reason
1539 * Status is a mask of the 3 possible interrupts for this engine. It will
1540 * contain bits _only_ for this SDMA engine. It will contain at least one
1541 * bit, it may contain more.
1543 void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
1545 trace_hfi1_sdma_engine_interrupt(sde, status);
1546 write_seqlock(&sde->head_lock);
1547 sdma_set_desc_cnt(sde, sdma_desct_intr);
1548 sdma_make_progress(sde, status);
1549 write_sequnlock(&sde->head_lock);
1553 * sdma_engine_error() - error handler for engine
1555 * @status: sdma interrupt reason
1557 void sdma_engine_error(struct sdma_engine *sde, u64 status)
1559 unsigned long flags;
1561 #ifdef CONFIG_SDMA_VERBOSITY
1562 dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
1564 (unsigned long long)status,
1565 sdma_state_names[sde->state.current_state]);
1567 spin_lock_irqsave(&sde->tail_lock, flags);
1568 write_seqlock(&sde->head_lock);
1569 if (status & ALL_SDMA_ENG_HALT_ERRS)
1570 __sdma_process_event(sde, sdma_event_e60_hw_halted);
1571 if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) {
1573 "SDMA (%u) engine error: 0x%llx state %s\n",
1575 (unsigned long long)status,
1576 sdma_state_names[sde->state.current_state]);
1577 dump_sdma_state(sde);
1579 write_sequnlock(&sde->head_lock);
1580 spin_unlock_irqrestore(&sde->tail_lock, flags);
1583 static void sdma_sendctrl(struct sdma_engine *sde, unsigned op)
1585 u64 set_senddmactrl = 0;
1586 u64 clr_senddmactrl = 0;
1587 unsigned long flags;
1589 #ifdef CONFIG_SDMA_VERBOSITY
1590 dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
1592 (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0,
1593 (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0,
1594 (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0,
1595 (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0);
1598 if (op & SDMA_SENDCTRL_OP_ENABLE)
1599 set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1601 clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1603 if (op & SDMA_SENDCTRL_OP_INTENABLE)
1604 set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1606 clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1608 if (op & SDMA_SENDCTRL_OP_HALT)
1609 set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1611 clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1613 spin_lock_irqsave(&sde->senddmactrl_lock, flags);
1615 sde->p_senddmactrl |= set_senddmactrl;
1616 sde->p_senddmactrl &= ~clr_senddmactrl;
1618 if (op & SDMA_SENDCTRL_OP_CLEANUP)
1619 write_sde_csr(sde, SD(CTRL),
1620 sde->p_senddmactrl |
1621 SD(CTRL_SDMA_CLEANUP_SMASK));
1623 write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl);
1625 spin_unlock_irqrestore(&sde->senddmactrl_lock, flags);
1627 #ifdef CONFIG_SDMA_VERBOSITY
1628 sdma_dumpstate(sde);
1632 static void sdma_setlengen(struct sdma_engine *sde)
1634 #ifdef CONFIG_SDMA_VERBOSITY
1635 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1636 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1640 * Set SendDmaLenGen and clear-then-set the MSB of the generation
1641 * count to enable generation checking and load the internal
1642 * generation counter.
1644 write_sde_csr(sde, SD(LEN_GEN),
1645 (sde->descq_cnt/64) << SD(LEN_GEN_LENGTH_SHIFT)
1647 write_sde_csr(sde, SD(LEN_GEN),
1648 ((sde->descq_cnt/64) << SD(LEN_GEN_LENGTH_SHIFT))
1649 | (4ULL << SD(LEN_GEN_GENERATION_SHIFT))
1653 static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
1655 /* Commit writes to memory and advance the tail on the chip */
1656 smp_wmb(); /* see get_txhead() */
1657 writeq(tail, sde->tail_csr);
1661 * This is called when changing to state s10_hw_start_up_halt_wait as
1662 * a result of send buffer errors or send DMA descriptor errors.
1664 static void sdma_hw_start_up(struct sdma_engine *sde)
1668 #ifdef CONFIG_SDMA_VERBOSITY
1669 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1670 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1673 sdma_setlengen(sde);
1674 sdma_update_tail(sde, 0); /* Set SendDmaTail */
1677 reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) <<
1678 SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT);
1679 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
1682 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
1683 (r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
1685 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
1686 (r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
1688 * set_sdma_integrity
1690 * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
1692 static void set_sdma_integrity(struct sdma_engine *sde)
1694 struct hfi1_devdata *dd = sde->dd;
1697 if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY)))
1700 reg = hfi1_pkt_base_sdma_integrity(dd);
1702 if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
1703 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
1705 SET_STATIC_RATE_CONTROL_SMASK(reg);
1707 write_sde_csr(sde, SD(CHECK_ENABLE), reg);
1711 static void init_sdma_regs(
1712 struct sdma_engine *sde,
1717 #ifdef CONFIG_SDMA_VERBOSITY
1718 struct hfi1_devdata *dd = sde->dd;
1720 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1721 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1724 write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys);
1725 sdma_setlengen(sde);
1726 sdma_update_tail(sde, 0); /* Set SendDmaTail */
1727 write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt);
1728 write_sde_csr(sde, SD(DESC_CNT), 0);
1729 write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
1730 write_sde_csr(sde, SD(MEMORY),
1732 SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
1733 ((u64)(credits * sde->this_idx) <<
1734 SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
1735 write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
1736 set_sdma_integrity(sde);
1737 opmask = OPCODE_CHECK_MASK_DISABLED;
1738 opval = OPCODE_CHECK_VAL_DISABLED;
1739 write_sde_csr(sde, SD(CHECK_OPCODE),
1740 (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
1741 (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
1744 #ifdef CONFIG_SDMA_VERBOSITY
1746 #define sdma_dumpstate_helper0(reg) do { \
1747 csr = read_csr(sde->dd, reg); \
1748 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
1751 #define sdma_dumpstate_helper(reg) do { \
1752 csr = read_sde_csr(sde, reg); \
1753 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
1754 #reg, sde->this_idx, csr); \
1757 #define sdma_dumpstate_helper2(reg) do { \
1758 csr = read_csr(sde->dd, reg + (8 * i)); \
1759 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
1763 void sdma_dumpstate(struct sdma_engine *sde)
1768 sdma_dumpstate_helper(SD(CTRL));
1769 sdma_dumpstate_helper(SD(STATUS));
1770 sdma_dumpstate_helper0(SD(ERR_STATUS));
1771 sdma_dumpstate_helper0(SD(ERR_MASK));
1772 sdma_dumpstate_helper(SD(ENG_ERR_STATUS));
1773 sdma_dumpstate_helper(SD(ENG_ERR_MASK));
1775 for (i = 0; i < CCE_NUM_INT_CSRS; ++i) {
1776 sdma_dumpstate_helper2(CCE_INT_STATUS);
1777 sdma_dumpstate_helper2(CCE_INT_MASK);
1778 sdma_dumpstate_helper2(CCE_INT_BLOCKED);
1781 sdma_dumpstate_helper(SD(TAIL));
1782 sdma_dumpstate_helper(SD(HEAD));
1783 sdma_dumpstate_helper(SD(PRIORITY_THLD));
1784 sdma_dumpstate_helper(SD(IDLE_CNT));
1785 sdma_dumpstate_helper(SD(RELOAD_CNT));
1786 sdma_dumpstate_helper(SD(DESC_CNT));
1787 sdma_dumpstate_helper(SD(DESC_FETCHED_CNT));
1788 sdma_dumpstate_helper(SD(MEMORY));
1789 sdma_dumpstate_helper0(SD(ENGINES));
1790 sdma_dumpstate_helper0(SD(MEM_SIZE));
1791 /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */
1792 sdma_dumpstate_helper(SD(BASE_ADDR));
1793 sdma_dumpstate_helper(SD(LEN_GEN));
1794 sdma_dumpstate_helper(SD(HEAD_ADDR));
1795 sdma_dumpstate_helper(SD(CHECK_ENABLE));
1796 sdma_dumpstate_helper(SD(CHECK_VL));
1797 sdma_dumpstate_helper(SD(CHECK_JOB_KEY));
1798 sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY));
1799 sdma_dumpstate_helper(SD(CHECK_SLID));
1800 sdma_dumpstate_helper(SD(CHECK_OPCODE));
1804 static void dump_sdma_state(struct sdma_engine *sde)
1806 struct hw_sdma_desc *descq;
1807 struct hw_sdma_desc *descqp;
1812 u16 head, tail, cnt;
1814 head = sde->descq_head & sde->sdma_mask;
1815 tail = sde->descq_tail & sde->sdma_mask;
1816 cnt = sdma_descq_freecnt(sde);
1820 "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
1825 !list_empty(&sde->flushlist));
1827 /* print info for each entry in the descriptor queue */
1828 while (head != tail) {
1829 char flags[6] = { 'x', 'x', 'x', 'x', 0 };
1831 descqp = &sde->descq[head];
1832 desc[0] = le64_to_cpu(descqp->qw[0]);
1833 desc[1] = le64_to_cpu(descqp->qw[1]);
1834 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
1835 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
1837 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
1838 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
1839 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
1840 & SDMA_DESC0_PHY_ADDR_MASK;
1841 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
1842 & SDMA_DESC1_GENERATION_MASK;
1843 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
1844 & SDMA_DESC0_BYTE_COUNT_MASK;
1846 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
1847 head, flags, addr, gen, len);
1849 "\tdesc0:0x%016llx desc1 0x%016llx\n",
1851 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
1853 "\taidx: %u amode: %u alen: %u\n",
1854 (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
1855 >> SDMA_DESC1_HEADER_INDEX_SHIFT),
1856 (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1857 >> SDMA_DESC1_HEADER_MODE_SHIFT),
1858 (u8)((desc[1] & SDMA_DESC1_HEADER_DWS_SMASK)
1859 >> SDMA_DESC1_HEADER_DWS_SHIFT));
1861 head &= sde->sdma_mask;
1866 "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
1868 * sdma_seqfile_dump_sde() - debugfs dump of sde
1870 * @sde: send dma engine to dump
1872 * This routine dumps the sde to the indicated seq file.
1874 void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
1877 struct hw_sdma_desc *descqp;
1883 head = sde->descq_head & sde->sdma_mask;
1884 tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1885 seq_printf(s, SDE_FMT, sde->this_idx,
1887 sdma_state_name(sde->state.current_state),
1888 (unsigned long long)read_sde_csr(sde, SD(CTRL)),
1889 (unsigned long long)read_sde_csr(sde, SD(STATUS)),
1890 (unsigned long long)read_sde_csr(sde,
1891 SD(ENG_ERR_STATUS)),
1892 (unsigned long long)read_sde_csr(sde, SD(TAIL)),
1894 (unsigned long long)read_sde_csr(sde, SD(HEAD)),
1896 (unsigned long long)le64_to_cpu(*sde->head_dma),
1897 (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
1898 (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
1899 (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
1900 (unsigned long long)sde->last_status,
1901 (unsigned long long)sde->ahg_bits,
1906 !list_empty(&sde->flushlist),
1907 sde->descq_full_count,
1908 (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
1910 /* print info for each entry in the descriptor queue */
1911 while (head != tail) {
1912 char flags[6] = { 'x', 'x', 'x', 'x', 0 };
1914 descqp = &sde->descq[head];
1915 desc[0] = le64_to_cpu(descqp->qw[0]);
1916 desc[1] = le64_to_cpu(descqp->qw[1]);
1917 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
1918 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
1920 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
1921 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
1922 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
1923 & SDMA_DESC0_PHY_ADDR_MASK;
1924 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
1925 & SDMA_DESC1_GENERATION_MASK;
1926 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
1927 & SDMA_DESC0_BYTE_COUNT_MASK;
1929 "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
1930 head, flags, addr, gen, len);
1931 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
1932 seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
1933 (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
1934 >> SDMA_DESC1_HEADER_INDEX_SHIFT),
1935 (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1936 >> SDMA_DESC1_HEADER_MODE_SHIFT));
1937 head = (head + 1) & sde->sdma_mask;
1942 * add the generation number into
1943 * the qw1 and return
1945 static inline u64 add_gen(struct sdma_engine *sde, u64 qw1)
1947 u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3;
1949 qw1 &= ~SDMA_DESC1_GENERATION_SMASK;
1950 qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK)
1951 << SDMA_DESC1_GENERATION_SHIFT;
1956 * This routine submits the indicated tx
1958 * Space has already been guaranteed and
1959 * tail side of ring is locked.
1961 * The hardware tail update is done
1962 * in the caller and that is facilitated
1963 * by returning the new tail.
1965 * There is special case logic for ahg
1966 * to not add the generation number for
1967 * up to 2 descriptors that follow the
1971 static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
1975 struct sdma_desc *descp = tx->descp;
1976 u8 skip = 0, mode = ahg_mode(tx);
1978 tail = sde->descq_tail & sde->sdma_mask;
1979 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
1980 sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1]));
1981 trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1],
1982 tail, &sde->descq[tail]);
1983 tail = ++sde->descq_tail & sde->sdma_mask;
1985 if (mode > SDMA_AHG_APPLY_UPDATE1)
1987 for (i = 1; i < tx->num_desc; i++, descp++) {
1990 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
1992 /* edits don't have generation */
1996 /* replace generation with real one for non-edits */
1997 qw1 = add_gen(sde, descp->qw[1]);
1999 sde->descq[tail].qw[1] = cpu_to_le64(qw1);
2000 trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1,
2001 tail, &sde->descq[tail]);
2002 tail = ++sde->descq_tail & sde->sdma_mask;
2004 tx->next_descq_idx = tail;
2005 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2006 tx->sn = sde->tail_sn++;
2007 trace_hfi1_sdma_in_sn(sde, tx->sn);
2008 WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]);
2010 sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx;
2011 sde->desc_avail -= tx->num_desc;
2016 * Check for progress
2018 static int sdma_check_progress(
2019 struct sdma_engine *sde,
2020 struct iowait *wait,
2021 struct sdma_txreq *tx)
2025 sde->desc_avail = sdma_descq_freecnt(sde);
2026 if (tx->num_desc <= sde->desc_avail)
2028 /* pulse the head_lock */
2029 if (wait && wait->sleep) {
2032 seq = raw_seqcount_begin(
2033 (const seqcount_t *)&sde->head_lock.seqcount);
2034 ret = wait->sleep(sde, wait, tx, seq);
2036 sde->desc_avail = sdma_descq_freecnt(sde);
2043 * sdma_send_txreq() - submit a tx req to ring
2044 * @sde: sdma engine to use
2045 * @wait: wait structure to use when full (may be NULL)
2046 * @tx: sdma_txreq to submit
2048 * The call submits the tx into the ring. If a iowait structure is non-NULL
2049 * the packet will be queued to the list in wait.
2052 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
2053 * ring (wait == NULL)
2054 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2056 int sdma_send_txreq(struct sdma_engine *sde,
2057 struct iowait *wait,
2058 struct sdma_txreq *tx)
2062 unsigned long flags;
2064 /* user should have supplied entire packet */
2065 if (unlikely(tx->tlen))
2068 spin_lock_irqsave(&sde->tail_lock, flags);
2070 if (unlikely(!__sdma_running(sde)))
2072 if (unlikely(tx->num_desc > sde->desc_avail))
2074 tail = submit_tx(sde, tx);
2076 atomic_inc(&wait->sdma_busy);
2077 sdma_update_tail(sde, tail);
2079 spin_unlock_irqrestore(&sde->tail_lock, flags);
2083 atomic_inc(&wait->sdma_busy);
2084 tx->next_descq_idx = 0;
2085 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2086 tx->sn = sde->tail_sn++;
2087 trace_hfi1_sdma_in_sn(sde, tx->sn);
2089 spin_lock(&sde->flushlist_lock);
2090 list_add_tail(&tx->list, &sde->flushlist);
2091 spin_unlock(&sde->flushlist_lock);
2094 wait->count += tx->num_desc;
2096 schedule_work(&sde->flush_worker);
2100 ret = sdma_check_progress(sde, wait, tx);
2101 if (ret == -EAGAIN) {
2105 sde->descq_full_count++;
2110 * sdma_send_txlist() - submit a list of tx req to ring
2111 * @sde: sdma engine to use
2112 * @wait: wait structure to use when full (may be NULL)
2113 * @tx_list: list of sdma_txreqs to submit
2115 * The call submits the list into the ring.
2117 * If the iowait structure is non-NULL and not equal to the iowait list
2118 * the unprocessed part of the list will be appended to the list in wait.
2120 * In all cases, the tx_list will be updated so the head of the tx_list is
2121 * the list of descriptors that have yet to be transmitted.
2123 * The intent of this call is to provide a more efficient
2124 * way of submitting multiple packets to SDMA while holding the tail
2128 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring
2130 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2132 int sdma_send_txlist(struct sdma_engine *sde,
2133 struct iowait *wait,
2134 struct list_head *tx_list)
2136 struct sdma_txreq *tx, *tx_next;
2138 unsigned long flags;
2139 u16 tail = INVALID_TAIL;
2142 spin_lock_irqsave(&sde->tail_lock, flags);
2144 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2146 if (unlikely(!__sdma_running(sde)))
2148 if (unlikely(tx->num_desc > sde->desc_avail))
2150 if (unlikely(tx->tlen)) {
2154 list_del_init(&tx->list);
2155 tail = submit_tx(sde, tx);
2157 if (tail != INVALID_TAIL &&
2158 (count & SDMA_TAIL_UPDATE_THRESH) == 0) {
2159 sdma_update_tail(sde, tail);
2160 tail = INVALID_TAIL;
2165 atomic_add(count, &wait->sdma_busy);
2166 if (tail != INVALID_TAIL)
2167 sdma_update_tail(sde, tail);
2168 spin_unlock_irqrestore(&sde->tail_lock, flags);
2171 spin_lock(&sde->flushlist_lock);
2172 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2174 list_del_init(&tx->list);
2176 atomic_inc(&wait->sdma_busy);
2177 tx->next_descq_idx = 0;
2178 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2179 tx->sn = sde->tail_sn++;
2180 trace_hfi1_sdma_in_sn(sde, tx->sn);
2182 list_add_tail(&tx->list, &sde->flushlist);
2185 wait->count += tx->num_desc;
2188 spin_unlock(&sde->flushlist_lock);
2189 schedule_work(&sde->flush_worker);
2193 ret = sdma_check_progress(sde, wait, tx);
2194 if (ret == -EAGAIN) {
2198 sde->descq_full_count++;
2202 static void sdma_process_event(struct sdma_engine *sde,
2203 enum sdma_events event)
2205 unsigned long flags;
2207 spin_lock_irqsave(&sde->tail_lock, flags);
2208 write_seqlock(&sde->head_lock);
2210 __sdma_process_event(sde, event);
2212 if (sde->state.current_state == sdma_state_s99_running)
2213 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
2215 write_sequnlock(&sde->head_lock);
2216 spin_unlock_irqrestore(&sde->tail_lock, flags);
2219 static void __sdma_process_event(struct sdma_engine *sde,
2220 enum sdma_events event)
2222 struct sdma_state *ss = &sde->state;
2223 int need_progress = 0;
2225 /* CONFIG SDMA temporary */
2226 #ifdef CONFIG_SDMA_VERBOSITY
2227 dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx,
2228 sdma_state_names[ss->current_state],
2229 sdma_event_names[event]);
2232 switch (ss->current_state) {
2233 case sdma_state_s00_hw_down:
2235 case sdma_event_e00_go_hw_down:
2237 case sdma_event_e30_go_running:
2239 * If down, but running requested (usually result
2240 * of link up, then we need to start up.
2241 * This can happen when hw down is requested while
2242 * bringing the link up with traffic active on
2244 ss->go_s99_running = 1;
2245 /* fall through and start dma engine */
2246 case sdma_event_e10_go_hw_start:
2247 /* This reference means the state machine is started */
2248 sdma_get(&sde->state);
2250 sdma_state_s10_hw_start_up_halt_wait);
2252 case sdma_event_e15_hw_halt_done:
2254 case sdma_event_e25_hw_clean_up_done:
2256 case sdma_event_e40_sw_cleaned:
2257 sdma_sw_tear_down(sde);
2259 case sdma_event_e50_hw_cleaned:
2261 case sdma_event_e60_hw_halted:
2263 case sdma_event_e70_go_idle:
2265 case sdma_event_e80_hw_freeze:
2267 case sdma_event_e81_hw_frozen:
2269 case sdma_event_e82_hw_unfreeze:
2271 case sdma_event_e85_link_down:
2273 case sdma_event_e90_sw_halted:
2278 case sdma_state_s10_hw_start_up_halt_wait:
2280 case sdma_event_e00_go_hw_down:
2281 sdma_set_state(sde, sdma_state_s00_hw_down);
2282 sdma_sw_tear_down(sde);
2284 case sdma_event_e10_go_hw_start:
2286 case sdma_event_e15_hw_halt_done:
2288 sdma_state_s15_hw_start_up_clean_wait);
2289 sdma_start_hw_clean_up(sde);
2291 case sdma_event_e25_hw_clean_up_done:
2293 case sdma_event_e30_go_running:
2294 ss->go_s99_running = 1;
2296 case sdma_event_e40_sw_cleaned:
2298 case sdma_event_e50_hw_cleaned:
2300 case sdma_event_e60_hw_halted:
2301 schedule_work(&sde->err_halt_worker);
2303 case sdma_event_e70_go_idle:
2304 ss->go_s99_running = 0;
2306 case sdma_event_e80_hw_freeze:
2308 case sdma_event_e81_hw_frozen:
2310 case sdma_event_e82_hw_unfreeze:
2312 case sdma_event_e85_link_down:
2314 case sdma_event_e90_sw_halted:
2319 case sdma_state_s15_hw_start_up_clean_wait:
2321 case sdma_event_e00_go_hw_down:
2322 sdma_set_state(sde, sdma_state_s00_hw_down);
2323 sdma_sw_tear_down(sde);
2325 case sdma_event_e10_go_hw_start:
2327 case sdma_event_e15_hw_halt_done:
2329 case sdma_event_e25_hw_clean_up_done:
2330 sdma_hw_start_up(sde);
2331 sdma_set_state(sde, ss->go_s99_running ?
2332 sdma_state_s99_running :
2333 sdma_state_s20_idle);
2335 case sdma_event_e30_go_running:
2336 ss->go_s99_running = 1;
2338 case sdma_event_e40_sw_cleaned:
2340 case sdma_event_e50_hw_cleaned:
2342 case sdma_event_e60_hw_halted:
2344 case sdma_event_e70_go_idle:
2345 ss->go_s99_running = 0;
2347 case sdma_event_e80_hw_freeze:
2349 case sdma_event_e81_hw_frozen:
2351 case sdma_event_e82_hw_unfreeze:
2353 case sdma_event_e85_link_down:
2355 case sdma_event_e90_sw_halted:
2360 case sdma_state_s20_idle:
2362 case sdma_event_e00_go_hw_down:
2363 sdma_set_state(sde, sdma_state_s00_hw_down);
2364 sdma_sw_tear_down(sde);
2366 case sdma_event_e10_go_hw_start:
2368 case sdma_event_e15_hw_halt_done:
2370 case sdma_event_e25_hw_clean_up_done:
2372 case sdma_event_e30_go_running:
2373 sdma_set_state(sde, sdma_state_s99_running);
2374 ss->go_s99_running = 1;
2376 case sdma_event_e40_sw_cleaned:
2378 case sdma_event_e50_hw_cleaned:
2380 case sdma_event_e60_hw_halted:
2381 sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
2382 schedule_work(&sde->err_halt_worker);
2384 case sdma_event_e70_go_idle:
2386 case sdma_event_e85_link_down:
2388 case sdma_event_e80_hw_freeze:
2389 sdma_set_state(sde, sdma_state_s80_hw_freeze);
2390 atomic_dec(&sde->dd->sdma_unfreeze_count);
2391 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2393 case sdma_event_e81_hw_frozen:
2395 case sdma_event_e82_hw_unfreeze:
2397 case sdma_event_e90_sw_halted:
2402 case sdma_state_s30_sw_clean_up_wait:
2404 case sdma_event_e00_go_hw_down:
2405 sdma_set_state(sde, sdma_state_s00_hw_down);
2407 case sdma_event_e10_go_hw_start:
2409 case sdma_event_e15_hw_halt_done:
2411 case sdma_event_e25_hw_clean_up_done:
2413 case sdma_event_e30_go_running:
2414 ss->go_s99_running = 1;
2416 case sdma_event_e40_sw_cleaned:
2417 sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait);
2418 sdma_start_hw_clean_up(sde);
2420 case sdma_event_e50_hw_cleaned:
2422 case sdma_event_e60_hw_halted:
2424 case sdma_event_e70_go_idle:
2425 ss->go_s99_running = 0;
2427 case sdma_event_e80_hw_freeze:
2429 case sdma_event_e81_hw_frozen:
2431 case sdma_event_e82_hw_unfreeze:
2433 case sdma_event_e85_link_down:
2434 ss->go_s99_running = 0;
2436 case sdma_event_e90_sw_halted:
2441 case sdma_state_s40_hw_clean_up_wait:
2443 case sdma_event_e00_go_hw_down:
2444 sdma_set_state(sde, sdma_state_s00_hw_down);
2445 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2447 case sdma_event_e10_go_hw_start:
2449 case sdma_event_e15_hw_halt_done:
2451 case sdma_event_e25_hw_clean_up_done:
2452 sdma_hw_start_up(sde);
2453 sdma_set_state(sde, ss->go_s99_running ?
2454 sdma_state_s99_running :
2455 sdma_state_s20_idle);
2457 case sdma_event_e30_go_running:
2458 ss->go_s99_running = 1;
2460 case sdma_event_e40_sw_cleaned:
2462 case sdma_event_e50_hw_cleaned:
2464 case sdma_event_e60_hw_halted:
2466 case sdma_event_e70_go_idle:
2467 ss->go_s99_running = 0;
2469 case sdma_event_e80_hw_freeze:
2471 case sdma_event_e81_hw_frozen:
2473 case sdma_event_e82_hw_unfreeze:
2475 case sdma_event_e85_link_down:
2476 ss->go_s99_running = 0;
2478 case sdma_event_e90_sw_halted:
2483 case sdma_state_s50_hw_halt_wait:
2485 case sdma_event_e00_go_hw_down:
2486 sdma_set_state(sde, sdma_state_s00_hw_down);
2487 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2489 case sdma_event_e10_go_hw_start:
2491 case sdma_event_e15_hw_halt_done:
2492 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2493 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2495 case sdma_event_e25_hw_clean_up_done:
2497 case sdma_event_e30_go_running:
2498 ss->go_s99_running = 1;
2500 case sdma_event_e40_sw_cleaned:
2502 case sdma_event_e50_hw_cleaned:
2504 case sdma_event_e60_hw_halted:
2505 schedule_work(&sde->err_halt_worker);
2507 case sdma_event_e70_go_idle:
2508 ss->go_s99_running = 0;
2510 case sdma_event_e80_hw_freeze:
2512 case sdma_event_e81_hw_frozen:
2514 case sdma_event_e82_hw_unfreeze:
2516 case sdma_event_e85_link_down:
2517 ss->go_s99_running = 0;
2519 case sdma_event_e90_sw_halted:
2524 case sdma_state_s60_idle_halt_wait:
2526 case sdma_event_e00_go_hw_down:
2527 sdma_set_state(sde, sdma_state_s00_hw_down);
2528 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2530 case sdma_event_e10_go_hw_start:
2532 case sdma_event_e15_hw_halt_done:
2533 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2534 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2536 case sdma_event_e25_hw_clean_up_done:
2538 case sdma_event_e30_go_running:
2539 ss->go_s99_running = 1;
2541 case sdma_event_e40_sw_cleaned:
2543 case sdma_event_e50_hw_cleaned:
2545 case sdma_event_e60_hw_halted:
2546 schedule_work(&sde->err_halt_worker);
2548 case sdma_event_e70_go_idle:
2549 ss->go_s99_running = 0;
2551 case sdma_event_e80_hw_freeze:
2553 case sdma_event_e81_hw_frozen:
2555 case sdma_event_e82_hw_unfreeze:
2557 case sdma_event_e85_link_down:
2559 case sdma_event_e90_sw_halted:
2564 case sdma_state_s80_hw_freeze:
2566 case sdma_event_e00_go_hw_down:
2567 sdma_set_state(sde, sdma_state_s00_hw_down);
2568 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2570 case sdma_event_e10_go_hw_start:
2572 case sdma_event_e15_hw_halt_done:
2574 case sdma_event_e25_hw_clean_up_done:
2576 case sdma_event_e30_go_running:
2577 ss->go_s99_running = 1;
2579 case sdma_event_e40_sw_cleaned:
2581 case sdma_event_e50_hw_cleaned:
2583 case sdma_event_e60_hw_halted:
2585 case sdma_event_e70_go_idle:
2586 ss->go_s99_running = 0;
2588 case sdma_event_e80_hw_freeze:
2590 case sdma_event_e81_hw_frozen:
2591 sdma_set_state(sde, sdma_state_s82_freeze_sw_clean);
2592 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2594 case sdma_event_e82_hw_unfreeze:
2596 case sdma_event_e85_link_down:
2598 case sdma_event_e90_sw_halted:
2603 case sdma_state_s82_freeze_sw_clean:
2605 case sdma_event_e00_go_hw_down:
2606 sdma_set_state(sde, sdma_state_s00_hw_down);
2607 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2609 case sdma_event_e10_go_hw_start:
2611 case sdma_event_e15_hw_halt_done:
2613 case sdma_event_e25_hw_clean_up_done:
2615 case sdma_event_e30_go_running:
2616 ss->go_s99_running = 1;
2618 case sdma_event_e40_sw_cleaned:
2619 /* notify caller this engine is done cleaning */
2620 atomic_dec(&sde->dd->sdma_unfreeze_count);
2621 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2623 case sdma_event_e50_hw_cleaned:
2625 case sdma_event_e60_hw_halted:
2627 case sdma_event_e70_go_idle:
2628 ss->go_s99_running = 0;
2630 case sdma_event_e80_hw_freeze:
2632 case sdma_event_e81_hw_frozen:
2634 case sdma_event_e82_hw_unfreeze:
2635 sdma_hw_start_up(sde);
2636 sdma_set_state(sde, ss->go_s99_running ?
2637 sdma_state_s99_running :
2638 sdma_state_s20_idle);
2640 case sdma_event_e85_link_down:
2642 case sdma_event_e90_sw_halted:
2647 case sdma_state_s99_running:
2649 case sdma_event_e00_go_hw_down:
2650 sdma_set_state(sde, sdma_state_s00_hw_down);
2651 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2653 case sdma_event_e10_go_hw_start:
2655 case sdma_event_e15_hw_halt_done:
2657 case sdma_event_e25_hw_clean_up_done:
2659 case sdma_event_e30_go_running:
2661 case sdma_event_e40_sw_cleaned:
2663 case sdma_event_e50_hw_cleaned:
2665 case sdma_event_e60_hw_halted:
2667 sdma_err_progress_check_schedule(sde);
2668 case sdma_event_e90_sw_halted:
2670 * SW initiated halt does not perform engines
2673 sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
2674 schedule_work(&sde->err_halt_worker);
2676 case sdma_event_e70_go_idle:
2677 sdma_set_state(sde, sdma_state_s60_idle_halt_wait);
2679 case sdma_event_e85_link_down:
2680 ss->go_s99_running = 0;
2682 case sdma_event_e80_hw_freeze:
2683 sdma_set_state(sde, sdma_state_s80_hw_freeze);
2684 atomic_dec(&sde->dd->sdma_unfreeze_count);
2685 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2687 case sdma_event_e81_hw_frozen:
2689 case sdma_event_e82_hw_unfreeze:
2695 ss->last_event = event;
2697 sdma_make_progress(sde, 0);
2701 * _extend_sdma_tx_descs() - helper to extend txreq
2703 * This is called once the initial nominal allocation
2704 * of descriptors in the sdma_txreq is exhausted.
2706 * The code will bump the allocation up to the max
2707 * of MAX_DESC (64) descriptors. There doesn't seem
2708 * much point in an interim step. The last descriptor
2709 * is reserved for coalesce buffer in order to support
2710 * cases where input packet has >MAX_DESC iovecs.
2713 static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
2717 /* Handle last descriptor */
2718 if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
2719 /* if tlen is 0, it is for padding, release last descriptor */
2721 tx->desc_limit = MAX_DESC;
2722 } else if (!tx->coalesce_buf) {
2723 /* allocate coalesce buffer with space for padding */
2724 tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32),
2726 if (!tx->coalesce_buf)
2729 tx->coalesce_idx = 0;
2734 if (unlikely(tx->num_desc == MAX_DESC))
2737 tx->descp = kmalloc_array(
2739 sizeof(struct sdma_desc),
2744 /* reserve last descriptor for coalescing */
2745 tx->desc_limit = MAX_DESC - 1;
2746 /* copy ones already built */
2747 for (i = 0; i < tx->num_desc; i++)
2748 tx->descp[i] = tx->descs[i];
2753 * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors
2755 * This is called once the initial nominal allocation of descriptors
2756 * in the sdma_txreq is exhausted.
2758 * This function calls _extend_sdma_tx_descs to extend or allocate
2759 * coalesce buffer. If there is a allocated coalesce buffer, it will
2760 * copy the input packet data into the coalesce buffer. It also adds
2761 * coalesce buffer descriptor once whe whole packet is received.
2765 * 0 - coalescing, don't populate descriptor
2766 * 1 - continue with populating descriptor
2768 int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
2769 int type, void *kvaddr, struct page *page,
2770 unsigned long offset, u16 len)
2775 rval = _extend_sdma_tx_descs(dd, tx);
2777 sdma_txclean(dd, tx);
2781 /* If coalesce buffer is allocated, copy data into it */
2782 if (tx->coalesce_buf) {
2783 if (type == SDMA_MAP_NONE) {
2784 sdma_txclean(dd, tx);
2788 if (type == SDMA_MAP_PAGE) {
2789 kvaddr = kmap(page);
2791 } else if (WARN_ON(!kvaddr)) {
2792 sdma_txclean(dd, tx);
2796 memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len);
2797 tx->coalesce_idx += len;
2798 if (type == SDMA_MAP_PAGE)
2801 /* If there is more data, return */
2802 if (tx->tlen - tx->coalesce_idx)
2805 /* Whole packet is received; add any padding */
2806 pad_len = tx->packet_len & (sizeof(u32) - 1);
2808 pad_len = sizeof(u32) - pad_len;
2809 memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len);
2810 /* padding is taken care of for coalescing case */
2811 tx->packet_len += pad_len;
2812 tx->tlen += pad_len;
2815 /* dma map the coalesce buffer */
2816 addr = dma_map_single(&dd->pcidev->dev,
2821 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
2822 sdma_txclean(dd, tx);
2826 /* Add descriptor for coalesce buffer */
2827 tx->desc_limit = MAX_DESC;
2828 return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
2835 /* Update sdes when the lmc changes */
2836 void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid)
2838 struct sdma_engine *sde;
2842 sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) <<
2843 SD(CHECK_SLID_MASK_SHIFT)) |
2844 (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) <<
2845 SD(CHECK_SLID_VALUE_SHIFT));
2847 for (i = 0; i < dd->num_sdma; i++) {
2848 hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
2850 sde = &dd->per_sdma[i];
2851 write_sde_csr(sde, SD(CHECK_SLID), sreg);
2855 /* tx not dword sized - pad */
2856 int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
2861 if ((unlikely(tx->num_desc == tx->desc_limit))) {
2862 rval = _extend_sdma_tx_descs(dd, tx);
2864 sdma_txclean(dd, tx);
2868 /* finish the one just added */
2873 sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
2874 _sdma_close_tx(dd, tx);
2879 * Add ahg to the sdma_txreq
2881 * The logic will consume up to 3
2882 * descriptors at the beginning of
2885 void _sdma_txreq_ahgadd(
2886 struct sdma_txreq *tx,
2892 u32 i, shift = 0, desc = 0;
2895 WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4);
2898 mode = SDMA_AHG_APPLY_UPDATE1;
2899 else if (num_ahg <= 5)
2900 mode = SDMA_AHG_APPLY_UPDATE2;
2902 mode = SDMA_AHG_APPLY_UPDATE3;
2904 /* initialize to consumed descriptors to zero */
2906 case SDMA_AHG_APPLY_UPDATE3:
2908 tx->descs[2].qw[0] = 0;
2909 tx->descs[2].qw[1] = 0;
2911 case SDMA_AHG_APPLY_UPDATE2:
2913 tx->descs[1].qw[0] = 0;
2914 tx->descs[1].qw[1] = 0;
2918 tx->descs[0].qw[1] |=
2919 (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
2920 << SDMA_DESC1_HEADER_INDEX_SHIFT) |
2921 (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK)
2922 << SDMA_DESC1_HEADER_DWS_SHIFT) |
2923 (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK)
2924 << SDMA_DESC1_HEADER_MODE_SHIFT) |
2925 (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK)
2926 << SDMA_DESC1_HEADER_UPDATE1_SHIFT);
2927 for (i = 0; i < (num_ahg - 1); i++) {
2928 if (!shift && !(i & 2))
2930 tx->descs[desc].qw[!!(i & 2)] |=
2933 shift = (shift + 32) & 63;
2938 * sdma_ahg_alloc - allocate an AHG entry
2939 * @sde: engine to allocate from
2942 * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
2943 * -ENOSPC if an entry is not available
2945 int sdma_ahg_alloc(struct sdma_engine *sde)
2951 trace_hfi1_ahg_allocate(sde, -EINVAL);
2955 nr = ffz(ACCESS_ONCE(sde->ahg_bits));
2957 trace_hfi1_ahg_allocate(sde, -ENOSPC);
2960 oldbit = test_and_set_bit(nr, &sde->ahg_bits);
2965 trace_hfi1_ahg_allocate(sde, nr);
2970 * sdma_ahg_free - free an AHG entry
2971 * @sde: engine to return AHG entry
2972 * @ahg_index: index to free
2974 * This routine frees the indicate AHG entry.
2976 void sdma_ahg_free(struct sdma_engine *sde, int ahg_index)
2980 trace_hfi1_ahg_deallocate(sde, ahg_index);
2981 if (ahg_index < 0 || ahg_index > 31)
2983 clear_bit(ahg_index, &sde->ahg_bits);
2987 * SPC freeze handling for SDMA engines. Called when the driver knows
2988 * the SPC is going into a freeze but before the freeze is fully
2989 * settled. Generally an error interrupt.
2991 * This event will pull the engine out of running so no more entries can be
2992 * added to the engine's queue.
2994 void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down)
2997 enum sdma_events event = link_down ? sdma_event_e85_link_down :
2998 sdma_event_e80_hw_freeze;
3000 /* set up the wait but do not wait here */
3001 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
3003 /* tell all engines to stop running and wait */
3004 for (i = 0; i < dd->num_sdma; i++)
3005 sdma_process_event(&dd->per_sdma[i], event);
3007 /* sdma_freeze() will wait for all engines to have stopped */
3011 * SPC freeze handling for SDMA engines. Called when the driver knows
3012 * the SPC is fully frozen.
3014 void sdma_freeze(struct hfi1_devdata *dd)
3020 * Make sure all engines have moved out of the running state before
3023 ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
3024 atomic_read(&dd->sdma_unfreeze_count) <= 0);
3025 /* interrupted or count is negative, then unloading - just exit */
3026 if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
3029 /* set up the count for the next wait */
3030 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
3032 /* tell all engines that the SPC is frozen, they can start cleaning */
3033 for (i = 0; i < dd->num_sdma; i++)
3034 sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen);
3037 * Wait for everyone to finish software clean before exiting. The
3038 * software clean will read engine CSRs, so must be completed before
3039 * the next step, which will clear the engine CSRs.
3041 (void) wait_event_interruptible(dd->sdma_unfreeze_wq,
3042 atomic_read(&dd->sdma_unfreeze_count) <= 0);
3043 /* no need to check results - done no matter what */
3047 * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen.
3049 * The SPC freeze acts like a SDMA halt and a hardware clean combined. All
3050 * that is left is a software clean. We could do it after the SPC is fully
3051 * frozen, but then we'd have to add another state to wait for the unfreeze.
3052 * Instead, just defer the software clean until the unfreeze step.
3054 void sdma_unfreeze(struct hfi1_devdata *dd)
3058 /* tell all engines start freeze clean up */
3059 for (i = 0; i < dd->num_sdma; i++)
3060 sdma_process_event(&dd->per_sdma[i],
3061 sdma_event_e82_hw_unfreeze);
3065 * _sdma_engine_progress_schedule() - schedule progress on engine
3066 * @sde: sdma_engine to schedule progress
3069 void _sdma_engine_progress_schedule(
3070 struct sdma_engine *sde)
3072 trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
3073 /* assume we have selected a good cpu */
3075 CCE_INT_FORCE + (8*(IS_SDMA_START/64)), sde->progress_mask);