2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #undef TRACE_SYSTEM_VAR
48 #define TRACE_SYSTEM_VAR hfi1
50 #if !defined(__HFI1_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
51 #define __HFI1_TRACE_H
53 #include <linux/tracepoint.h>
54 #include <linux/trace_seq.h>
60 #define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
61 #define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev))
63 #define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
64 #define show_packettype(etype) \
65 __print_symbolic(etype, \
66 packettype_name(EXPECTED), \
67 packettype_name(EAGER), \
68 packettype_name(IB), \
69 packettype_name(ERROR), \
70 packettype_name(BYPASS))
73 #define TRACE_SYSTEM hfi1_rx
75 TRACE_EVENT(hfi1_rcvhdr,
76 TP_PROTO(struct hfi1_devdata *dd,
85 TP_ARGS(dd, ctxt, eflags, etype, hlen, tlen, updegr, etail),
86 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
95 TP_fast_assign(DD_DEV_ASSIGN(dd);
96 __entry->eflags = eflags;
98 __entry->etype = etype;
100 __entry->tlen = tlen;
101 __entry->updegr = updegr;
102 __entry->etail = etail;
105 "[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d",
109 __entry->etype, show_packettype(__entry->etype),
117 TRACE_EVENT(hfi1_receive_interrupt,
118 TP_PROTO(struct hfi1_devdata *dd, u32 ctxt),
120 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
122 __field(u8, slow_path)
123 __field(u8, dma_rtail)
125 TP_fast_assign(DD_DEV_ASSIGN(dd);
126 __entry->ctxt = ctxt;
127 if (dd->rcd[ctxt]->do_interrupt ==
128 &handle_receive_interrupt) {
129 __entry->slow_path = 1;
130 __entry->dma_rtail = 0xFF;
131 } else if (dd->rcd[ctxt]->do_interrupt ==
132 &handle_receive_interrupt_dma_rtail){
133 __entry->dma_rtail = 1;
134 __entry->slow_path = 0;
135 } else if (dd->rcd[ctxt]->do_interrupt ==
136 &handle_receive_interrupt_nodma_rtail) {
137 __entry->dma_rtail = 0;
138 __entry->slow_path = 0;
141 TP_printk("[%s] ctxt %d SlowPath: %d DmaRtail: %d",
149 TRACE_EVENT(hfi1_exp_tid_reg,
150 TP_PROTO(unsigned ctxt, u16 subctxt, u32 rarr,
151 u32 npages, unsigned long va, unsigned long pa,
153 TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
155 __field(unsigned, ctxt)
156 __field(u16, subctxt)
159 __field(unsigned long, va)
160 __field(unsigned long, pa)
161 __field(dma_addr_t, dma)
164 __entry->ctxt = ctxt;
165 __entry->subctxt = subctxt;
166 __entry->rarr = rarr;
167 __entry->npages = npages;
172 TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
183 TRACE_EVENT(hfi1_exp_tid_unreg,
184 TP_PROTO(unsigned ctxt, u16 subctxt, u32 rarr, u32 npages,
185 unsigned long va, unsigned long pa, dma_addr_t dma),
186 TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
188 __field(unsigned, ctxt)
189 __field(u16, subctxt)
192 __field(unsigned long, va)
193 __field(unsigned long, pa)
194 __field(dma_addr_t, dma)
197 __entry->ctxt = ctxt;
198 __entry->subctxt = subctxt;
199 __entry->rarr = rarr;
200 __entry->npages = npages;
205 TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
216 TRACE_EVENT(hfi1_exp_tid_inval,
217 TP_PROTO(unsigned ctxt, u16 subctxt, unsigned long va, u32 rarr,
218 u32 npages, dma_addr_t dma),
219 TP_ARGS(ctxt, subctxt, va, rarr, npages, dma),
221 __field(unsigned, ctxt)
222 __field(u16, subctxt)
223 __field(unsigned long, va)
226 __field(dma_addr_t, dma)
229 __entry->ctxt = ctxt;
230 __entry->subctxt = subctxt;
232 __entry->rarr = rarr;
233 __entry->npages = npages;
236 TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx dma: 0x%llx",
246 TRACE_EVENT(hfi1_mmu_invalidate,
247 TP_PROTO(unsigned ctxt, u16 subctxt, const char *type,
248 unsigned long start, unsigned long end),
249 TP_ARGS(ctxt, subctxt, type, start, end),
251 __field(unsigned, ctxt)
252 __field(u16, subctxt)
254 __field(unsigned long, start)
255 __field(unsigned long, end)
258 __entry->ctxt = ctxt;
259 __entry->subctxt = subctxt;
260 __assign_str(type, type);
261 __entry->start = start;
264 TP_printk("[%3u:%02u] MMU Invalidate (%s) 0x%lx - 0x%lx",
274 #define TRACE_SYSTEM hfi1_tx
276 TRACE_EVENT(hfi1_piofree,
277 TP_PROTO(struct send_context *sc, int extra),
279 TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
280 __field(u32, sw_index)
281 __field(u32, hw_context)
284 TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
285 __entry->sw_index = sc->sw_index;
286 __entry->hw_context = sc->hw_context;
287 __entry->extra = extra;
289 TP_printk("[%s] ctxt %u(%u) extra %d",
297 TRACE_EVENT(hfi1_wantpiointr,
298 TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
299 TP_ARGS(sc, needint, credit_ctrl),
300 TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
301 __field(u32, sw_index)
302 __field(u32, hw_context)
303 __field(u32, needint)
304 __field(u64, credit_ctrl)
306 TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
307 __entry->sw_index = sc->sw_index;
308 __entry->hw_context = sc->hw_context;
309 __entry->needint = needint;
310 __entry->credit_ctrl = credit_ctrl;
312 TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
317 (unsigned long long)__entry->credit_ctrl
321 DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
322 TP_PROTO(struct rvt_qp *qp, u32 flags),
325 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
328 __field(u32, s_flags)
331 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
332 __entry->flags = flags;
333 __entry->qpn = qp->ibqp.qp_num;
334 __entry->s_flags = qp->s_flags;
337 "[%s] qpn 0x%x flags 0x%x s_flags 0x%x",
345 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
346 TP_PROTO(struct rvt_qp *qp, u32 flags),
349 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
350 TP_PROTO(struct rvt_qp *qp, u32 flags),
354 #define TRACE_SYSTEM hfi1_ibhdrs
356 u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr);
357 const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
359 #define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
361 const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
363 #define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
365 #define lrh_name(lrh) { HFI1_##lrh, #lrh }
366 #define show_lnh(lrh) \
367 __print_symbolic(lrh, \
371 #define ib_opcode_name(opcode) { IB_OPCODE_##opcode, #opcode }
372 #define show_ib_opcode(opcode) \
373 __print_symbolic(opcode, \
374 ib_opcode_name(RC_SEND_FIRST), \
375 ib_opcode_name(RC_SEND_MIDDLE), \
376 ib_opcode_name(RC_SEND_LAST), \
377 ib_opcode_name(RC_SEND_LAST_WITH_IMMEDIATE), \
378 ib_opcode_name(RC_SEND_ONLY), \
379 ib_opcode_name(RC_SEND_ONLY_WITH_IMMEDIATE), \
380 ib_opcode_name(RC_RDMA_WRITE_FIRST), \
381 ib_opcode_name(RC_RDMA_WRITE_MIDDLE), \
382 ib_opcode_name(RC_RDMA_WRITE_LAST), \
383 ib_opcode_name(RC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
384 ib_opcode_name(RC_RDMA_WRITE_ONLY), \
385 ib_opcode_name(RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
386 ib_opcode_name(RC_RDMA_READ_REQUEST), \
387 ib_opcode_name(RC_RDMA_READ_RESPONSE_FIRST), \
388 ib_opcode_name(RC_RDMA_READ_RESPONSE_MIDDLE), \
389 ib_opcode_name(RC_RDMA_READ_RESPONSE_LAST), \
390 ib_opcode_name(RC_RDMA_READ_RESPONSE_ONLY), \
391 ib_opcode_name(RC_ACKNOWLEDGE), \
392 ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
393 ib_opcode_name(RC_COMPARE_SWAP), \
394 ib_opcode_name(RC_FETCH_ADD), \
395 ib_opcode_name(RC_SEND_LAST_WITH_INVALIDATE), \
396 ib_opcode_name(RC_SEND_ONLY_WITH_INVALIDATE), \
397 ib_opcode_name(UC_SEND_FIRST), \
398 ib_opcode_name(UC_SEND_MIDDLE), \
399 ib_opcode_name(UC_SEND_LAST), \
400 ib_opcode_name(UC_SEND_LAST_WITH_IMMEDIATE), \
401 ib_opcode_name(UC_SEND_ONLY), \
402 ib_opcode_name(UC_SEND_ONLY_WITH_IMMEDIATE), \
403 ib_opcode_name(UC_RDMA_WRITE_FIRST), \
404 ib_opcode_name(UC_RDMA_WRITE_MIDDLE), \
405 ib_opcode_name(UC_RDMA_WRITE_LAST), \
406 ib_opcode_name(UC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
407 ib_opcode_name(UC_RDMA_WRITE_ONLY), \
408 ib_opcode_name(UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
409 ib_opcode_name(UD_SEND_ONLY), \
410 ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE), \
413 #define LRH_PRN "vl %d lver %d sl %d lnh %d,%s dlid %.4x len %d slid %.4x"
415 "op 0x%.2x,%s se %d m %d pad %d tver %d pkey 0x%.4x " \
416 "f %d b %d qpn 0x%.6x a %d psn 0x%.8x"
417 #define EHDR_PRN "%s"
419 DECLARE_EVENT_CLASS(hfi1_ibhdr_template,
420 TP_PROTO(struct hfi1_devdata *dd,
421 struct hfi1_ib_header *hdr),
445 /* extended headers */
446 __dynamic_array(u8, ehdrs, ibhdr_exhdr_len(hdr))
449 struct hfi1_other_headers *ohdr;
454 (u8)(be16_to_cpu(hdr->lrh[0]) >> 12);
456 (u8)(be16_to_cpu(hdr->lrh[0]) >> 8) & 0xf;
458 (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
460 (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
462 be16_to_cpu(hdr->lrh[1]);
463 /* allow for larger len */
465 be16_to_cpu(hdr->lrh[2]);
467 be16_to_cpu(hdr->lrh[3]);
469 if (__entry->lnh == HFI1_LRH_BTH)
472 ohdr = &hdr->u.l.oth;
474 (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
476 (be32_to_cpu(ohdr->bth[0]) >> 23) & 1;
478 (be32_to_cpu(ohdr->bth[0]) >> 22) & 1;
480 (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
482 (be32_to_cpu(ohdr->bth[0]) >> 16) & 0xf;
484 be32_to_cpu(ohdr->bth[0]) & 0xffff;
486 (be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT) &
489 (be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT) &
492 be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
494 (be32_to_cpu(ohdr->bth[2]) >> 31) & 1;
495 /* allow for larger PSN */
497 be32_to_cpu(ohdr->bth[2]) & 0x7fffffff;
498 /* extended headers */
499 memcpy(__get_dynamic_array(ehdrs), &ohdr->u,
500 ibhdr_exhdr_len(hdr));
502 TP_printk("[%s] " LRH_PRN " " BTH_PRN " " EHDR_PRN,
508 __entry->lnh, show_lnh(__entry->lnh),
513 __entry->opcode, show_ib_opcode(__entry->opcode),
524 /* extended headers */
527 (void *)__get_dynamic_array(ehdrs))
531 DEFINE_EVENT(hfi1_ibhdr_template, input_ibhdr,
532 TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
535 DEFINE_EVENT(hfi1_ibhdr_template, pio_output_ibhdr,
536 TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
539 DEFINE_EVENT(hfi1_ibhdr_template, ack_output_ibhdr,
540 TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
543 DEFINE_EVENT(hfi1_ibhdr_template, sdma_output_ibhdr,
544 TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
548 "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \
549 "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]"
552 #define TRACE_SYSTEM hfi1_snoop
554 TRACE_EVENT(snoop_capture,
555 TP_PROTO(struct hfi1_devdata *dd,
557 struct hfi1_ib_header *hdr,
560 TP_ARGS(dd, hdr_len, hdr, data_len, data),
569 __field(u32, hdr_len)
570 __field(u32, data_len)
572 __dynamic_array(u8, raw_hdr, hdr_len)
573 __dynamic_array(u8, raw_pkt, data_len)
576 struct hfi1_other_headers *ohdr;
578 __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
579 if (__entry->lnh == HFI1_LRH_BTH)
582 ohdr = &hdr->u.l.oth;
584 __entry->slid = be16_to_cpu(hdr->lrh[3]);
585 __entry->dlid = be16_to_cpu(hdr->lrh[1]);
586 __entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
587 __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
588 __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
589 __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff;
590 __entry->hdr_len = hdr_len;
591 __entry->data_len = data_len;
592 memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
593 memcpy(__get_dynamic_array(raw_pkt), data, data_len);
602 show_ib_opcode(__entry->opcode),
611 #define TRACE_SYSTEM hfi1_ctxts
614 "cred:%u, credaddr:0x%llx, piobase:0x%llx, rcvhdr_cnt:%u, " \
615 "rcvbase:0x%llx, rcvegrc:%u, rcvegrb:0x%llx"
616 TRACE_EVENT(hfi1_uctxtdata,
617 TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt),
619 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
620 __field(unsigned, ctxt)
621 __field(u32, credits)
622 __field(u64, hw_free)
623 __field(u64, piobase)
624 __field(u16, rcvhdrq_cnt)
625 __field(u64, rcvhdrq_phys)
626 __field(u32, eager_cnt)
627 __field(u64, rcvegr_phys)
629 TP_fast_assign(DD_DEV_ASSIGN(dd);
630 __entry->ctxt = uctxt->ctxt;
631 __entry->credits = uctxt->sc->credits;
632 __entry->hw_free = (u64)uctxt->sc->hw_free;
633 __entry->piobase = (u64)uctxt->sc->base_addr;
634 __entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
635 __entry->rcvhdrq_phys = uctxt->rcvhdrq_phys;
636 __entry->eager_cnt = uctxt->egrbufs.alloced;
637 __entry->rcvegr_phys =
638 uctxt->egrbufs.rcvtids[0].phys;
640 TP_printk("[%s] ctxt %u " UCTXT_FMT,
646 __entry->rcvhdrq_cnt,
647 __entry->rcvhdrq_phys,
654 "egrtids:%u, egr_size:%u, hdrq_cnt:%u, hdrq_size:%u, sdma_ring_size:%u"
655 TRACE_EVENT(hfi1_ctxt_info,
656 TP_PROTO(struct hfi1_devdata *dd, unsigned ctxt, unsigned subctxt,
657 struct hfi1_ctxt_info cinfo),
658 TP_ARGS(dd, ctxt, subctxt, cinfo),
659 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
660 __field(unsigned, ctxt)
661 __field(unsigned, subctxt)
662 __field(u16, egrtids)
663 __field(u16, rcvhdrq_cnt)
664 __field(u16, rcvhdrq_size)
665 __field(u16, sdma_ring_size)
666 __field(u32, rcvegr_size)
668 TP_fast_assign(DD_DEV_ASSIGN(dd);
669 __entry->ctxt = ctxt;
670 __entry->subctxt = subctxt;
671 __entry->egrtids = cinfo.egrtids;
672 __entry->rcvhdrq_cnt = cinfo.rcvhdrq_cnt;
673 __entry->rcvhdrq_size = cinfo.rcvhdrq_entsize;
674 __entry->sdma_ring_size = cinfo.sdma_ring_size;
675 __entry->rcvegr_size = cinfo.rcvegr_size;
677 TP_printk("[%s] ctxt %u:%u " CINFO_FMT,
682 __entry->rcvegr_size,
683 __entry->rcvhdrq_cnt,
684 __entry->rcvhdrq_size,
685 __entry->sdma_ring_size
690 #define TRACE_SYSTEM hfi1_sma
693 "shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
697 ((struct buffer_control *)__get_dynamic_array(bct))->field \
700 DECLARE_EVENT_CLASS(hfi1_bct_template,
701 TP_PROTO(struct hfi1_devdata *dd,
702 struct buffer_control *bc),
704 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
705 __dynamic_array(u8, bct, sizeof(*bc))
707 TP_fast_assign(DD_DEV_ASSIGN(dd);
708 memcpy(__get_dynamic_array(bct), bc,
711 TP_printk(BCT_FORMAT,
712 BCT(overall_shared_limit),
714 BCT(vl[0].dedicated),
717 BCT(vl[1].dedicated),
720 BCT(vl[2].dedicated),
723 BCT(vl[3].dedicated),
726 BCT(vl[4].dedicated),
729 BCT(vl[5].dedicated),
732 BCT(vl[6].dedicated),
735 BCT(vl[7].dedicated),
738 BCT(vl[15].dedicated),
743 DEFINE_EVENT(hfi1_bct_template, bct_set,
744 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
747 DEFINE_EVENT(hfi1_bct_template, bct_get,
748 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
752 #define TRACE_SYSTEM hfi1_sdma
754 TRACE_EVENT(hfi1_sdma_descriptor,
755 TP_PROTO(struct sdma_engine *sde,
760 TP_ARGS(sde, desc0, desc1, e, descp),
761 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
762 __field(void *, descp)
768 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
769 __entry->desc0 = desc0;
770 __entry->desc1 = desc1;
771 __entry->idx = sde->this_idx;
772 __entry->descp = descp;
776 "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
779 __parse_sdma_flags(__entry->desc0, __entry->desc1),
780 (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
781 SDMA_DESC0_PHY_ADDR_MASK,
782 (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
783 SDMA_DESC1_GENERATION_MASK),
784 (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
785 SDMA_DESC0_BYTE_COUNT_MASK),
793 TRACE_EVENT(hfi1_sdma_engine_select,
794 TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
795 TP_ARGS(dd, sel, vl, idx),
796 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
801 TP_fast_assign(DD_DEV_ASSIGN(dd);
806 TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
814 DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
815 TP_PROTO(struct sdma_engine *sde, u64 status),
816 TP_ARGS(sde, status),
817 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
821 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
822 __entry->status = status;
823 __entry->idx = sde->this_idx;
825 TP_printk("[%s] SDE(%u) status %llx",
828 (unsigned long long)__entry->status
832 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
833 TP_PROTO(struct sdma_engine *sde, u64 status),
837 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
838 TP_PROTO(struct sdma_engine *sde, u64 status),
842 DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
843 TP_PROTO(struct sdma_engine *sde, int aidx),
845 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
849 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
850 __entry->idx = sde->this_idx;
851 __entry->aidx = aidx;
853 TP_printk("[%s] SDE(%u) aidx %d",
860 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
861 TP_PROTO(struct sdma_engine *sde, int aidx),
864 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
865 TP_PROTO(struct sdma_engine *sde, int aidx),
868 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
869 TRACE_EVENT(hfi1_sdma_progress,
870 TP_PROTO(struct sdma_engine *sde,
873 struct sdma_txreq *txp
875 TP_ARGS(sde, hwhead, swhead, txp),
876 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
881 __field(u16, tx_tail)
882 __field(u16, tx_head)
885 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
886 __entry->hwhead = hwhead;
887 __entry->swhead = swhead;
888 __entry->tx_tail = sde->tx_tail;
889 __entry->tx_head = sde->tx_head;
890 __entry->txnext = txp ? txp->next_descq_idx : ~0;
891 __entry->idx = sde->this_idx;
892 __entry->sn = txp ? txp->sn : ~0;
895 "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
907 TRACE_EVENT(hfi1_sdma_progress,
908 TP_PROTO(struct sdma_engine *sde,
909 u16 hwhead, u16 swhead,
910 struct sdma_txreq *txp
912 TP_ARGS(sde, hwhead, swhead, txp),
913 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
917 __field(u16, tx_tail)
918 __field(u16, tx_head)
921 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
922 __entry->hwhead = hwhead;
923 __entry->swhead = swhead;
924 __entry->tx_tail = sde->tx_tail;
925 __entry->tx_head = sde->tx_head;
926 __entry->txnext = txp ? txp->next_descq_idx : ~0;
927 __entry->idx = sde->this_idx;
930 "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
942 DECLARE_EVENT_CLASS(hfi1_sdma_sn,
943 TP_PROTO(struct sdma_engine *sde, u64 sn),
945 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
949 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
951 __entry->idx = sde->this_idx;
953 TP_printk("[%s] SDE(%u) sn %llu",
960 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
962 struct sdma_engine *sde,
968 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
969 TP_PROTO(struct sdma_engine *sde, u64 sn),
973 #define USDMA_HDR_FORMAT \
974 "[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
976 TRACE_EVENT(hfi1_sdma_user_header,
977 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
978 struct hfi1_pkt_header *hdr, u32 tidval),
979 TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
985 __field(__le32, pbc0)
986 __field(__le32, pbc1)
987 __field(__be32, lrh0)
988 __field(__be32, lrh1)
989 __field(__be32, bth0)
990 __field(__be32, bth1)
991 __field(__be32, bth2)
992 __field(__le32, kdeth0)
993 __field(__le32, kdeth1)
994 __field(__le32, kdeth2)
995 __field(__le32, kdeth3)
996 __field(__le32, kdeth4)
997 __field(__le32, kdeth5)
998 __field(__le32, kdeth6)
999 __field(__le32, kdeth7)
1000 __field(__le32, kdeth8)
1001 __field(u32, tidval)
1004 __le32 *pbc = (__le32 *)hdr->pbc;
1005 __be32 *lrh = (__be32 *)hdr->lrh;
1006 __be32 *bth = (__be32 *)hdr->bth;
1007 __le32 *kdeth = (__le32 *)&hdr->kdeth;
1010 __entry->ctxt = ctxt;
1011 __entry->subctxt = subctxt;
1013 __entry->pbc0 = pbc[0];
1014 __entry->pbc1 = pbc[1];
1015 __entry->lrh0 = be32_to_cpu(lrh[0]);
1016 __entry->lrh1 = be32_to_cpu(lrh[1]);
1017 __entry->bth0 = be32_to_cpu(bth[0]);
1018 __entry->bth1 = be32_to_cpu(bth[1]);
1019 __entry->bth2 = be32_to_cpu(bth[2]);
1020 __entry->kdeth0 = kdeth[0];
1021 __entry->kdeth1 = kdeth[1];
1022 __entry->kdeth2 = kdeth[2];
1023 __entry->kdeth3 = kdeth[3];
1024 __entry->kdeth4 = kdeth[4];
1025 __entry->kdeth5 = kdeth[5];
1026 __entry->kdeth6 = kdeth[6];
1027 __entry->kdeth7 = kdeth[7];
1028 __entry->kdeth8 = kdeth[8];
1029 __entry->tidval = tidval;
1031 TP_printk(USDMA_HDR_FORMAT,
1056 #define SDMA_UREQ_FMT \
1057 "[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
1058 TRACE_EVENT(hfi1_sdma_user_reqinfo,
1059 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
1060 TP_ARGS(dd, ctxt, subctxt, i),
1064 __field(u8, subctxt)
1065 __field(u8, ver_opcode)
1068 __field(u16, fragsize)
1069 __field(u16, comp_idx)
1073 __entry->ctxt = ctxt;
1074 __entry->subctxt = subctxt;
1075 __entry->ver_opcode = i[0] & 0xff;
1076 __entry->iovcnt = (i[0] >> 8) & 0xff;
1077 __entry->npkts = i[1];
1078 __entry->fragsize = i[2];
1079 __entry->comp_idx = i[3];
1081 TP_printk(SDMA_UREQ_FMT,
1085 __entry->ver_opcode,
1093 #define usdma_complete_name(st) { st, #st }
1094 #define show_usdma_complete_state(st) \
1095 __print_symbolic(st, \
1096 usdma_complete_name(FREE), \
1097 usdma_complete_name(QUEUED), \
1098 usdma_complete_name(COMPLETE), \
1099 usdma_complete_name(ERROR))
1101 TRACE_EVENT(hfi1_sdma_user_completion,
1102 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
1103 u8 state, int code),
1104 TP_ARGS(dd, ctxt, subctxt, idx, state, code),
1108 __field(u8, subctxt)
1115 __entry->ctxt = ctxt;
1116 __entry->subctxt = subctxt;
1118 __entry->state = state;
1119 __entry->code = code;
1121 TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
1122 __get_str(dev), __entry->ctxt, __entry->subctxt,
1123 __entry->idx, show_usdma_complete_state(__entry->state),
1127 const char *print_u32_array(struct trace_seq *, u32 *, int);
1128 #define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
1130 TRACE_EVENT(hfi1_sdma_user_header_ahg,
1131 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
1132 u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
1133 TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
1137 __field(u8, subctxt)
1142 __field(u32, tidval)
1143 __array(u32, ahg, 10)
1147 __entry->ctxt = ctxt;
1148 __entry->subctxt = subctxt;
1151 __entry->idx = ahgidx;
1153 __entry->tidval = tidval;
1154 memcpy(__entry->ahg, ahg, len * sizeof(u32));
1156 TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
1164 __print_u32_hex(__entry->ahg, __entry->len),
1169 TRACE_EVENT(hfi1_sdma_state,
1170 TP_PROTO(struct sdma_engine *sde,
1174 TP_ARGS(sde, cstate, nstate),
1175 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
1176 __string(curstate, cstate)
1177 __string(newstate, nstate)
1179 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
1180 __assign_str(curstate, cstate);
1181 __assign_str(newstate, nstate);
1183 TP_printk("[%s] current state %s new state %s",
1185 __get_str(curstate),
1191 #define TRACE_SYSTEM hfi1_rc
1193 DECLARE_EVENT_CLASS(hfi1_rc_template,
1194 TP_PROTO(struct rvt_qp *qp, u32 psn),
1197 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1199 __field(u32, s_flags)
1202 __field(u32, s_next_psn)
1203 __field(u32, s_sending_psn)
1204 __field(u32, s_sending_hpsn)
1208 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
1209 __entry->qpn = qp->ibqp.qp_num;
1210 __entry->s_flags = qp->s_flags;
1212 __entry->s_psn = qp->s_psn;
1213 __entry->s_next_psn = qp->s_next_psn;
1214 __entry->s_sending_psn = qp->s_sending_psn;
1215 __entry->s_sending_hpsn = qp->s_sending_hpsn;
1216 __entry->r_psn = qp->r_psn;
1219 "[%s] qpn 0x%x s_flags 0x%x psn 0x%x s_psn 0x%x s_next_psn 0x%x s_sending_psn 0x%x sending_hpsn 0x%x r_psn 0x%x",
1225 __entry->s_next_psn,
1226 __entry->s_sending_psn,
1227 __entry->s_sending_hpsn,
1232 DEFINE_EVENT(hfi1_rc_template, hfi1_rc_sendcomplete,
1233 TP_PROTO(struct rvt_qp *qp, u32 psn),
1237 DEFINE_EVENT(hfi1_rc_template, hfi1_rc_ack,
1238 TP_PROTO(struct rvt_qp *qp, u32 psn),
1242 DEFINE_EVENT(hfi1_rc_template, hfi1_rc_timeout,
1243 TP_PROTO(struct rvt_qp *qp, u32 psn),
1247 DEFINE_EVENT(hfi1_rc_template, hfi1_rc_rcv_error,
1248 TP_PROTO(struct rvt_qp *qp, u32 psn),
1253 #define TRACE_SYSTEM hfi1_misc
1255 TRACE_EVENT(hfi1_interrupt,
1256 TP_PROTO(struct hfi1_devdata *dd, const struct is_table *is_entry,
1258 TP_ARGS(dd, is_entry, src),
1259 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
1260 __array(char, buf, 64)
1263 TP_fast_assign(DD_DEV_ASSIGN(dd)
1264 is_entry->is_name(__entry->buf, 64,
1265 src - is_entry->start);
1268 TP_printk("[%s] source: %s [%d]", __get_str(dev), __entry->buf,
1274 * This produces a REALLY ugly trace in the console output when the string is
1279 #define TRACE_SYSTEM hfi1_trace
1281 #define MAX_MSG_LEN 512
1283 DECLARE_EVENT_CLASS(hfi1_trace_template,
1284 TP_PROTO(const char *function, struct va_format *vaf),
1285 TP_ARGS(function, vaf),
1286 TP_STRUCT__entry(__string(function, function)
1287 __dynamic_array(char, msg, MAX_MSG_LEN)
1289 TP_fast_assign(__assign_str(function, function);
1290 WARN_ON_ONCE(vsnprintf
1291 (__get_dynamic_array(msg),
1292 MAX_MSG_LEN, vaf->fmt,
1296 TP_printk("(%s) %s",
1297 __get_str(function),
1302 * It may be nice to macroize the __hfi1_trace but the va_* stuff requires an
1303 * actual function to work and can not be in a macro.
1305 #define __hfi1_trace_def(lvl) \
1306 void __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
1308 DEFINE_EVENT(hfi1_trace_template, hfi1_ ##lvl, \
1309 TP_PROTO(const char *function, struct va_format *vaf), \
1310 TP_ARGS(function, vaf))
1312 #define __hfi1_trace_fn(lvl) \
1313 void __hfi1_trace_##lvl(const char *func, char *fmt, ...) \
1315 struct va_format vaf = { \
1320 va_start(args, fmt); \
1322 trace_hfi1_ ##lvl(func, &vaf); \
1328 * To create a new trace level simply define it below and as a __hfi1_trace_fn
1329 * in trace.c. This will create all the hooks for calling
1330 * hfi1_cdbg(LVL, fmt, ...); as well as take care of all
1331 * the debugfs stuff.
1333 __hfi1_trace_def(PKT);
1334 __hfi1_trace_def(PROC);
1335 __hfi1_trace_def(SDMA);
1336 __hfi1_trace_def(LINKVERB);
1337 __hfi1_trace_def(DEBUG);
1338 __hfi1_trace_def(SNOOP);
1339 __hfi1_trace_def(CNTR);
1340 __hfi1_trace_def(PIO);
1341 __hfi1_trace_def(DC8051);
1342 __hfi1_trace_def(FIRMWARE);
1343 __hfi1_trace_def(RCVCTRL);
1344 __hfi1_trace_def(TID);
1345 __hfi1_trace_def(MMU);
1346 __hfi1_trace_def(IOCTL);
1348 #define hfi1_cdbg(which, fmt, ...) \
1349 __hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)
1351 #define hfi1_dbg(fmt, ...) \
1352 hfi1_cdbg(DEBUG, fmt, ##__VA_ARGS__)
1355 * Define HFI1_EARLY_DBG at compile time or here to enable early trace
1356 * messages. Do not check in an enablement for this.
1359 #ifdef HFI1_EARLY_DBG
1360 #define hfi1_dbg_early(fmt, ...) \
1361 trace_printk(fmt, ##__VA_ARGS__)
1363 #define hfi1_dbg_early(fmt, ...)
1366 #endif /* __HFI1_TRACE_H */
1368 #undef TRACE_INCLUDE_PATH
1369 #undef TRACE_INCLUDE_FILE
1370 #define TRACE_INCLUDE_PATH .
1371 #define TRACE_INCLUDE_FILE trace
1372 #include <trace/define_trace.h>