2 * Copyright (c) 2012 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_smi.h>
40 static int reply(struct ib_smp *smp)
43 * The verbs framework will handle the directed/LID route
46 smp->method = IB_MGMT_METHOD_GET_RESP;
47 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
48 smp->status |= IB_SMP_DIRECTION;
49 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
52 static int reply_failure(struct ib_smp *smp)
55 * The verbs framework will handle the directed/LID route
58 smp->method = IB_MGMT_METHOD_GET_RESP;
59 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
60 smp->status |= IB_SMP_DIRECTION;
61 return IB_MAD_RESULT_FAILURE | IB_MAD_RESULT_REPLY;
64 static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
66 struct ib_mad_send_buf *send_buf;
67 struct ib_mad_agent *agent;
71 unsigned long timeout;
73 agent = ibp->rvp.send_agent;
78 if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE))
82 if (ibp->rvp.trap_timeout &&
83 time_before(jiffies, ibp->rvp.trap_timeout))
86 send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
87 IB_MGMT_MAD_DATA, GFP_ATOMIC,
88 IB_MGMT_BASE_VERSION);
93 smp->base_version = IB_MGMT_BASE_VERSION;
94 smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
95 smp->class_version = 1;
96 smp->method = IB_MGMT_METHOD_TRAP;
98 smp->tid = cpu_to_be64(ibp->rvp.tid);
99 smp->attr_id = IB_SMP_ATTR_NOTICE;
100 /* o14-1: smp->mkey = 0; */
101 memcpy(smp->data, data, len);
103 spin_lock_irqsave(&ibp->rvp.lock, flags);
104 if (!ibp->rvp.sm_ah) {
105 if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
108 ah = qib_create_qp0_ah(ibp, ibp->rvp.sm_lid);
113 ibp->rvp.sm_ah = ibah_to_rvtah(ah);
119 send_buf->ah = &ibp->rvp.sm_ah->ibah;
122 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
125 ret = ib_post_send_mad(send_buf, NULL);
128 timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000;
129 ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout);
131 ib_free_send_mad(send_buf);
132 ibp->rvp.trap_timeout = 0;
137 * Send a bad [PQ]_Key trap (ch. 14.3.8).
139 void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
140 u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
142 struct ib_mad_notice_attr data;
144 if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
145 ibp->rvp.pkey_violations++;
147 ibp->rvp.qkey_violations++;
148 ibp->rvp.n_pkt_drops++;
150 /* Send violation trap */
151 data.generic_type = IB_NOTICE_TYPE_SECURITY;
152 data.prod_type_msb = 0;
153 data.prod_type_lsb = IB_NOTICE_PROD_CA;
154 data.trap_num = trap_num;
155 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
156 data.toggle_count = 0;
157 memset(&data.details, 0, sizeof(data.details));
158 data.details.ntc_257_258.lid1 = lid1;
159 data.details.ntc_257_258.lid2 = lid2;
160 data.details.ntc_257_258.key = cpu_to_be32(key);
161 data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
162 data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
164 qib_send_trap(ibp, &data, sizeof(data));
168 * Send a bad M_Key trap (ch. 14.3.9).
170 static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
172 struct ib_mad_notice_attr data;
174 /* Send violation trap */
175 data.generic_type = IB_NOTICE_TYPE_SECURITY;
176 data.prod_type_msb = 0;
177 data.prod_type_lsb = IB_NOTICE_PROD_CA;
178 data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
179 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
180 data.toggle_count = 0;
181 memset(&data.details, 0, sizeof(data.details));
182 data.details.ntc_256.lid = data.issuer_lid;
183 data.details.ntc_256.method = smp->method;
184 data.details.ntc_256.attr_id = smp->attr_id;
185 data.details.ntc_256.attr_mod = smp->attr_mod;
186 data.details.ntc_256.mkey = smp->mkey;
187 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
190 data.details.ntc_256.dr_slid = smp->dr_slid;
191 data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
192 hop_cnt = smp->hop_cnt;
193 if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
194 data.details.ntc_256.dr_trunc_hop |=
195 IB_NOTICE_TRAP_DR_TRUNC;
196 hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
198 data.details.ntc_256.dr_trunc_hop |= hop_cnt;
199 memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path,
203 qib_send_trap(ibp, &data, sizeof(data));
207 * Send a Port Capability Mask Changed trap (ch. 14.3.11).
209 void qib_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
211 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
212 struct qib_devdata *dd = dd_from_dev(ibdev);
213 struct qib_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
214 struct ib_mad_notice_attr data;
216 data.generic_type = IB_NOTICE_TYPE_INFO;
217 data.prod_type_msb = 0;
218 data.prod_type_lsb = IB_NOTICE_PROD_CA;
219 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
220 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
221 data.toggle_count = 0;
222 memset(&data.details, 0, sizeof(data.details));
223 data.details.ntc_144.lid = data.issuer_lid;
224 data.details.ntc_144.new_cap_mask =
225 cpu_to_be32(ibp->rvp.port_cap_flags);
226 qib_send_trap(ibp, &data, sizeof(data));
230 * Send a System Image GUID Changed trap (ch. 14.3.12).
232 void qib_sys_guid_chg(struct qib_ibport *ibp)
234 struct ib_mad_notice_attr data;
236 data.generic_type = IB_NOTICE_TYPE_INFO;
237 data.prod_type_msb = 0;
238 data.prod_type_lsb = IB_NOTICE_PROD_CA;
239 data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
240 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
241 data.toggle_count = 0;
242 memset(&data.details, 0, sizeof(data.details));
243 data.details.ntc_145.lid = data.issuer_lid;
244 data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
246 qib_send_trap(ibp, &data, sizeof(data));
250 * Send a Node Description Changed trap (ch. 14.3.13).
252 void qib_node_desc_chg(struct qib_ibport *ibp)
254 struct ib_mad_notice_attr data;
256 data.generic_type = IB_NOTICE_TYPE_INFO;
257 data.prod_type_msb = 0;
258 data.prod_type_lsb = IB_NOTICE_PROD_CA;
259 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
260 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
261 data.toggle_count = 0;
262 memset(&data.details, 0, sizeof(data.details));
263 data.details.ntc_144.lid = data.issuer_lid;
264 data.details.ntc_144.local_changes = 1;
265 data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
267 qib_send_trap(ibp, &data, sizeof(data));
270 static int subn_get_nodedescription(struct ib_smp *smp,
271 struct ib_device *ibdev)
274 smp->status |= IB_SMP_INVALID_FIELD;
276 memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
281 static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
284 struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
285 struct qib_devdata *dd = dd_from_ibdev(ibdev);
286 u32 vendor, majrev, minrev;
287 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
289 /* GUID 0 is illegal */
290 if (smp->attr_mod || pidx >= dd->num_pports ||
291 dd->pport[pidx].guid == 0)
292 smp->status |= IB_SMP_INVALID_FIELD;
294 nip->port_guid = dd->pport[pidx].guid;
296 nip->base_version = 1;
297 nip->class_version = 1;
298 nip->node_type = 1; /* channel adapter */
299 nip->num_ports = ibdev->phys_port_cnt;
300 /* This is already in network order */
301 nip->sys_guid = ib_qib_sys_image_guid;
302 nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */
303 nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));
304 nip->device_id = cpu_to_be16(dd->deviceid);
307 nip->revision = cpu_to_be32((majrev << 16) | minrev);
308 nip->local_port_num = port;
309 vendor = dd->vendorid;
310 nip->vendor_id[0] = QIB_SRC_OUI_1;
311 nip->vendor_id[1] = QIB_SRC_OUI_2;
312 nip->vendor_id[2] = QIB_SRC_OUI_3;
317 static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
320 struct qib_devdata *dd = dd_from_ibdev(ibdev);
321 u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
322 __be64 *p = (__be64 *) smp->data;
323 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
325 /* 32 blocks of 8 64-bit GUIDs per block */
327 memset(smp->data, 0, sizeof(smp->data));
329 if (startgx == 0 && pidx < dd->num_pports) {
330 struct qib_pportdata *ppd = dd->pport + pidx;
331 struct qib_ibport *ibp = &ppd->ibport_data;
332 __be64 g = ppd->guid;
335 /* GUID 0 is illegal */
337 smp->status |= IB_SMP_INVALID_FIELD;
339 /* The first is a copy of the read-only HW GUID. */
341 for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
342 p[i] = ibp->guids[i - 1];
345 smp->status |= IB_SMP_INVALID_FIELD;
350 static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w)
352 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);
355 static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s)
357 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);
360 static int get_overrunthreshold(struct qib_pportdata *ppd)
362 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);
366 * set_overrunthreshold - set the overrun threshold
367 * @ppd: the physical port data
368 * @n: the new threshold
370 * Note that this will only take effect when the link state changes.
372 static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n)
374 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,
379 static int get_phyerrthreshold(struct qib_pportdata *ppd)
381 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);
385 * set_phyerrthreshold - set the physical error threshold
386 * @ppd: the physical port data
387 * @n: the new threshold
389 * Note that this will only take effect when the link state changes.
391 static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n)
393 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,
399 * get_linkdowndefaultstate - get the default linkdown state
400 * @ppd: the physical port data
402 * Returns zero if the default is POLL, 1 if the default is SLEEP.
404 static int get_linkdowndefaultstate(struct qib_pportdata *ppd)
406 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==
407 IB_LINKINITCMD_SLEEP;
410 static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
415 /* Is the mkey in the process of expiring? */
416 if (ibp->rvp.mkey_lease_timeout &&
417 time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
418 /* Clear timeout and mkey protection field. */
419 ibp->rvp.mkey_lease_timeout = 0;
420 ibp->rvp.mkeyprot = 0;
423 if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 ||
424 ibp->rvp.mkey == smp->mkey)
427 /* Unset lease timeout on any valid Get/Set/TrapRepress */
428 if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
429 (smp->method == IB_MGMT_METHOD_GET ||
430 smp->method == IB_MGMT_METHOD_SET ||
431 smp->method == IB_MGMT_METHOD_TRAP_REPRESS))
432 ibp->rvp.mkey_lease_timeout = 0;
435 switch (smp->method) {
436 case IB_MGMT_METHOD_GET:
437 /* Bad mkey not a violation below level 2 */
438 if (ibp->rvp.mkeyprot < 2)
440 case IB_MGMT_METHOD_SET:
441 case IB_MGMT_METHOD_TRAP_REPRESS:
442 if (ibp->rvp.mkey_violations != 0xFFFF)
443 ++ibp->rvp.mkey_violations;
444 if (!ibp->rvp.mkey_lease_timeout &&
445 ibp->rvp.mkey_lease_period)
446 ibp->rvp.mkey_lease_timeout = jiffies +
447 ibp->rvp.mkey_lease_period * HZ;
448 /* Generate a trap notice. */
449 qib_bad_mkey(ibp, smp);
457 static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
460 struct qib_devdata *dd;
461 struct qib_pportdata *ppd;
462 struct qib_ibport *ibp;
463 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
467 u32 port_num = be32_to_cpu(smp->attr_mod);
472 if (port_num > ibdev->phys_port_cnt) {
473 smp->status |= IB_SMP_INVALID_FIELD;
477 if (port_num != port) {
478 ibp = to_iport(ibdev, port_num);
479 ret = check_mkey(ibp, smp, 0);
481 ret = IB_MAD_RESULT_FAILURE;
487 dd = dd_from_ibdev(ibdev);
488 /* IB numbers ports from 1, hdw from 0 */
489 ppd = dd->pport + (port_num - 1);
490 ibp = &ppd->ibport_data;
492 /* Clear all fields. Only set the non-zero fields. */
493 memset(smp->data, 0, sizeof(smp->data));
495 /* Only return the mkey if the protection field allows it. */
496 if (!(smp->method == IB_MGMT_METHOD_GET &&
497 ibp->rvp.mkey != smp->mkey &&
498 ibp->rvp.mkeyprot == 1))
499 pip->mkey = ibp->rvp.mkey;
500 pip->gid_prefix = ibp->rvp.gid_prefix;
501 pip->lid = cpu_to_be16(ppd->lid);
502 pip->sm_lid = cpu_to_be16(ibp->rvp.sm_lid);
503 pip->cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
504 /* pip->diag_code; */
505 pip->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
506 pip->local_port_num = port;
507 pip->link_width_enabled = ppd->link_width_enabled;
508 pip->link_width_supported = ppd->link_width_supported;
509 pip->link_width_active = ppd->link_width_active;
510 state = dd->f_iblink_state(ppd->lastibcstat);
511 pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state;
513 pip->portphysstate_linkdown =
514 (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
515 (get_linkdowndefaultstate(ppd) ? 1 : 2);
516 pip->mkeyprot_resv_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
517 pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
518 ppd->link_speed_enabled;
519 switch (ppd->ibmtu) {
520 default: /* something is wrong; fall through */
537 pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->rvp.sm_sl;
538 pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */
539 pip->vl_high_limit = ibp->rvp.vl_high_limit;
540 pip->vl_arb_high_cap =
541 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
542 pip->vl_arb_low_cap =
543 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);
544 /* InitTypeReply = 0 */
545 pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
546 /* HCAs ignore VLStallCount and HOQLife */
547 /* pip->vlstallcnt_hoqlife; */
548 pip->operationalvl_pei_peo_fpi_fpo =
549 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
550 pip->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
551 /* P_KeyViolations are counted by hardware. */
552 pip->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
553 pip->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
554 /* Only the hardware GUID is supported for now */
555 pip->guid_cap = QIB_GUIDS_PER_PORT;
556 pip->clientrereg_resv_subnetto = ibp->rvp.subnet_timeout;
557 /* 32.768 usec. response time (guessing) */
558 pip->resv_resptimevalue = 3;
559 pip->localphyerrors_overrunerrors =
560 (get_phyerrthreshold(ppd) << 4) |
561 get_overrunthreshold(ppd);
562 /* pip->max_credit_hint; */
563 if (ibp->rvp.port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
566 v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
567 pip->link_roundtrip_latency[0] = v >> 16;
568 pip->link_roundtrip_latency[1] = v >> 8;
569 pip->link_roundtrip_latency[2] = v;
579 * get_pkeys - return the PKEY table
580 * @dd: the qlogic_ib device
581 * @port: the IB port number
582 * @pkeys: the pkey table is placed here
584 static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
586 struct qib_pportdata *ppd = dd->pport + port - 1;
588 * always a kernel context, no locking needed.
589 * If we get here with ppd setup, no need to check
592 struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];
594 memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys));
599 static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
602 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
603 u16 *p = (u16 *) smp->data;
604 __be16 *q = (__be16 *) smp->data;
606 /* 64 blocks of 32 16-bit P_Key entries */
608 memset(smp->data, 0, sizeof(smp->data));
610 struct qib_devdata *dd = dd_from_ibdev(ibdev);
611 unsigned i, n = qib_get_npkeys(dd);
613 get_pkeys(dd, port, p);
615 for (i = 0; i < n; i++)
616 q[i] = cpu_to_be16(p[i]);
618 smp->status |= IB_SMP_INVALID_FIELD;
623 static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
626 struct qib_devdata *dd = dd_from_ibdev(ibdev);
627 u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
628 __be64 *p = (__be64 *) smp->data;
629 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
631 /* 32 blocks of 8 64-bit GUIDs per block */
633 if (startgx == 0 && pidx < dd->num_pports) {
634 struct qib_pportdata *ppd = dd->pport + pidx;
635 struct qib_ibport *ibp = &ppd->ibport_data;
638 /* The first entry is read-only. */
639 for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
640 ibp->guids[i - 1] = p[i];
642 smp->status |= IB_SMP_INVALID_FIELD;
644 /* The only GUID we support is the first read-only entry. */
645 return subn_get_guidinfo(smp, ibdev, port);
649 * subn_set_portinfo - set port information
650 * @smp: the incoming SM packet
651 * @ibdev: the infiniband device
652 * @port: the port on the device
654 * Set Portinfo (see ch. 14.2.5.6).
656 static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
659 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
660 struct ib_event event;
661 struct qib_devdata *dd;
662 struct qib_pportdata *ppd;
663 struct qib_ibport *ibp;
664 u8 clientrereg = (pip->clientrereg_resv_subnetto & 0x80);
674 u32 port_num = be32_to_cpu(smp->attr_mod);
679 if (port_num > ibdev->phys_port_cnt)
681 /* Port attributes can only be set on the receiving port */
682 if (port_num != port)
686 dd = dd_from_ibdev(ibdev);
687 /* IB numbers ports from 1, hdw from 0 */
688 ppd = dd->pport + (port_num - 1);
689 ibp = &ppd->ibport_data;
690 event.device = ibdev;
691 event.element.port_num = port;
693 ibp->rvp.mkey = pip->mkey;
694 ibp->rvp.gid_prefix = pip->gid_prefix;
695 ibp->rvp.mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
697 lid = be16_to_cpu(pip->lid);
698 /* Must be a valid unicast LID address. */
699 if (lid == 0 || lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
700 smp->status |= IB_SMP_INVALID_FIELD;
701 else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
703 qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
704 if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
705 qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT);
706 qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7);
707 event.event = IB_EVENT_LID_CHANGE;
708 ib_dispatch_event(&event);
711 smlid = be16_to_cpu(pip->sm_lid);
712 msl = pip->neighbormtu_mastersmsl & 0xF;
713 /* Must be a valid unicast LID address. */
714 if (smlid == 0 || smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
715 smp->status |= IB_SMP_INVALID_FIELD;
716 else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
717 spin_lock_irqsave(&ibp->rvp.lock, flags);
718 if (ibp->rvp.sm_ah) {
719 if (smlid != ibp->rvp.sm_lid)
720 rdma_ah_set_dlid(&ibp->rvp.sm_ah->attr,
722 if (msl != ibp->rvp.sm_sl)
723 rdma_ah_set_sl(&ibp->rvp.sm_ah->attr, msl);
725 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
726 if (smlid != ibp->rvp.sm_lid)
727 ibp->rvp.sm_lid = smlid;
728 if (msl != ibp->rvp.sm_sl)
729 ibp->rvp.sm_sl = msl;
730 event.event = IB_EVENT_SM_CHANGE;
731 ib_dispatch_event(&event);
734 /* Allow 1x or 4x to be set (see 14.2.6.6). */
735 lwe = pip->link_width_enabled;
738 set_link_width_enabled(ppd, ppd->link_width_supported);
739 else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
740 smp->status |= IB_SMP_INVALID_FIELD;
741 else if (lwe != ppd->link_width_enabled)
742 set_link_width_enabled(ppd, lwe);
745 lse = pip->linkspeedactive_enabled & 0xF;
748 * The IB 1.2 spec. only allows link speed values
749 * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific
753 set_link_speed_enabled(ppd,
754 ppd->link_speed_supported);
755 else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
756 smp->status |= IB_SMP_INVALID_FIELD;
757 else if (lse != ppd->link_speed_enabled)
758 set_link_speed_enabled(ppd, lse);
761 /* Set link down default state. */
762 switch (pip->portphysstate_linkdown & 0xF) {
766 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
767 IB_LINKINITCMD_SLEEP);
770 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
771 IB_LINKINITCMD_POLL);
774 smp->status |= IB_SMP_INVALID_FIELD;
777 ibp->rvp.mkeyprot = pip->mkeyprot_resv_lmc >> 6;
778 ibp->rvp.vl_high_limit = pip->vl_high_limit;
779 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
780 ibp->rvp.vl_high_limit);
782 mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
784 smp->status |= IB_SMP_INVALID_FIELD;
786 qib_set_mtu(ppd, mtu);
788 /* Set operational VLs */
789 vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
791 if (vls > ppd->vls_supported)
792 smp->status |= IB_SMP_INVALID_FIELD;
794 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
797 if (pip->mkey_violations == 0)
798 ibp->rvp.mkey_violations = 0;
800 if (pip->pkey_violations == 0)
801 ibp->rvp.pkey_violations = 0;
803 if (pip->qkey_violations == 0)
804 ibp->rvp.qkey_violations = 0;
806 ore = pip->localphyerrors_overrunerrors;
807 if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
808 smp->status |= IB_SMP_INVALID_FIELD;
810 if (set_overrunthreshold(ppd, (ore & 0xF)))
811 smp->status |= IB_SMP_INVALID_FIELD;
813 ibp->rvp.subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
816 * Do the port state change now that the other link parameters
818 * Changing the port physical state only makes sense if the link
819 * is down or is being set to down.
821 state = pip->linkspeed_portstate & 0xF;
822 lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
823 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
824 smp->status |= IB_SMP_INVALID_FIELD;
827 * Only state changes of DOWN, ARM, and ACTIVE are valid
828 * and must be in the correct state to take effect (see 7.2.6).
837 lstate = QIB_IB_LINKDOWN_ONLY;
838 else if (lstate == 1)
839 lstate = QIB_IB_LINKDOWN_SLEEP;
840 else if (lstate == 2)
841 lstate = QIB_IB_LINKDOWN;
842 else if (lstate == 3)
843 lstate = QIB_IB_LINKDOWN_DISABLE;
845 smp->status |= IB_SMP_INVALID_FIELD;
848 spin_lock_irqsave(&ppd->lflags_lock, flags);
849 ppd->lflags &= ~QIBL_LINKV;
850 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
851 qib_set_linkstate(ppd, lstate);
853 * Don't send a reply if the response would be sent
854 * through the disabled port.
856 if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) {
857 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
860 qib_wait_linkstate(ppd, QIBL_LINKV, 10);
863 qib_set_linkstate(ppd, QIB_IB_LINKARM);
866 qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
869 smp->status |= IB_SMP_INVALID_FIELD;
873 event.event = IB_EVENT_CLIENT_REREGISTER;
874 ib_dispatch_event(&event);
877 ret = subn_get_portinfo(smp, ibdev, port);
879 /* restore re-reg bit per o14-12.2.1 */
880 pip->clientrereg_resv_subnetto |= clientrereg;
885 smp->status |= IB_SMP_INVALID_FIELD;
887 ret = subn_get_portinfo(smp, ibdev, port);
893 * rm_pkey - decrecment the reference count for the given PKEY
894 * @dd: the qlogic_ib device
895 * @key: the PKEY index
897 * Return true if this was the last reference and the hardware table entry
898 * needs to be changed.
900 static int rm_pkey(struct qib_pportdata *ppd, u16 key)
905 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
906 if (ppd->pkeys[i] != key)
908 if (atomic_dec_and_test(&ppd->pkeyrefs[i])) {
923 * add_pkey - add the given PKEY to the hardware table
924 * @dd: the qlogic_ib device
927 * Return an error code if unable to add the entry, zero if no change,
928 * or 1 if the hardware PKEY register needs to be updated.
930 static int add_pkey(struct qib_pportdata *ppd, u16 key)
933 u16 lkey = key & 0x7FFF;
937 if (lkey == 0x7FFF) {
942 /* Look for an empty slot or a matching PKEY. */
943 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
944 if (!ppd->pkeys[i]) {
948 /* If it matches exactly, try to increment the ref count */
949 if (ppd->pkeys[i] == key) {
950 if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) {
954 /* Lost the race. Look for an empty slot below. */
955 atomic_dec(&ppd->pkeyrefs[i]);
959 * It makes no sense to have both the limited and unlimited
960 * PKEY set at the same time since the unlimited one will
961 * disable the limited one.
963 if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
972 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
973 if (!ppd->pkeys[i] &&
974 atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
975 /* for qibstats, etc. */
988 * set_pkeys - set the PKEY table for ctxt 0
989 * @dd: the qlogic_ib device
990 * @port: the IB port number
991 * @pkeys: the PKEY table
993 static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
995 struct qib_pportdata *ppd;
996 struct qib_ctxtdata *rcd;
1001 * IB port one/two always maps to context zero/one,
1002 * always a kernel context, no locking needed
1003 * If we get here with ppd setup, no need to check
1004 * that rcd is valid.
1006 ppd = dd->pport + (port - 1);
1007 rcd = dd->rcd[ppd->hw_pidx];
1009 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
1011 u16 okey = rcd->pkeys[i];
1016 * The value of this PKEY table entry is changing.
1017 * Remove the old entry in the hardware's array of PKEYs.
1020 changed |= rm_pkey(ppd, okey);
1022 int ret = add_pkey(ppd, key);
1029 rcd->pkeys[i] = key;
1032 struct ib_event event;
1034 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
1036 event.event = IB_EVENT_PKEY_CHANGE;
1037 event.device = &dd->verbs_dev.rdi.ibdev;
1038 event.element.port_num = port;
1039 ib_dispatch_event(&event);
1044 static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
1047 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
1048 __be16 *p = (__be16 *) smp->data;
1049 u16 *q = (u16 *) smp->data;
1050 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1051 unsigned i, n = qib_get_npkeys(dd);
1053 for (i = 0; i < n; i++)
1054 q[i] = be16_to_cpu(p[i]);
1056 if (startpx != 0 || set_pkeys(dd, port, q) != 0)
1057 smp->status |= IB_SMP_INVALID_FIELD;
1059 return subn_get_pkeytable(smp, ibdev, port);
1062 static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1065 struct qib_ibport *ibp = to_iport(ibdev, port);
1066 u8 *p = (u8 *) smp->data;
1069 memset(smp->data, 0, sizeof(smp->data));
1071 if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP))
1072 smp->status |= IB_SMP_UNSUP_METHOD;
1074 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
1075 *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];
1080 static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1083 struct qib_ibport *ibp = to_iport(ibdev, port);
1084 u8 *p = (u8 *) smp->data;
1087 if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP)) {
1088 smp->status |= IB_SMP_UNSUP_METHOD;
1092 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {
1093 ibp->sl_to_vl[i] = *p >> 4;
1094 ibp->sl_to_vl[i + 1] = *p & 0xF;
1096 qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)),
1097 _QIB_EVENT_SL2VL_CHANGE_BIT);
1099 return subn_get_sl_to_vl(smp, ibdev, port);
1102 static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
1105 unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
1106 struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1108 memset(smp->data, 0, sizeof(smp->data));
1110 if (ppd->vls_supported == IB_VL_VL0)
1111 smp->status |= IB_SMP_UNSUP_METHOD;
1112 else if (which == IB_VLARB_LOWPRI_0_31)
1113 (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
1115 else if (which == IB_VLARB_HIGHPRI_0_31)
1116 (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
1119 smp->status |= IB_SMP_INVALID_FIELD;
1124 static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
1127 unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
1128 struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1130 if (ppd->vls_supported == IB_VL_VL0)
1131 smp->status |= IB_SMP_UNSUP_METHOD;
1132 else if (which == IB_VLARB_LOWPRI_0_31)
1133 (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
1135 else if (which == IB_VLARB_HIGHPRI_0_31)
1136 (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
1139 smp->status |= IB_SMP_INVALID_FIELD;
1141 return subn_get_vl_arb(smp, ibdev, port);
1144 static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
1148 * For now, we only send the trap once so no need to process this.
1150 * o14-3.a4 The SMA shall not send any message in response to a valid
1151 * SubnTrapRepress() message.
1153 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1156 static int pma_get_classportinfo(struct ib_pma_mad *pmp,
1157 struct ib_device *ibdev)
1159 struct ib_class_port_info *p =
1160 (struct ib_class_port_info *)pmp->data;
1161 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1163 memset(pmp->data, 0, sizeof(pmp->data));
1165 if (pmp->mad_hdr.attr_mod != 0)
1166 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1168 /* Note that AllPortSelect is not valid */
1169 p->base_version = 1;
1170 p->class_version = 1;
1171 p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
1173 * Set the most significant bit of CM2 to indicate support for
1174 * congestion statistics
1176 ib_set_cpi_capmask2(p,
1177 dd->psxmitwait_supported <<
1178 (31 - IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE));
1180 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
1182 ib_set_cpi_resp_time(p, 18);
1184 return reply((struct ib_smp *) pmp);
1187 static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
1188 struct ib_device *ibdev, u8 port)
1190 struct ib_pma_portsamplescontrol *p =
1191 (struct ib_pma_portsamplescontrol *)pmp->data;
1192 struct qib_ibdev *dev = to_idev(ibdev);
1193 struct qib_devdata *dd = dd_from_dev(dev);
1194 struct qib_ibport *ibp = to_iport(ibdev, port);
1195 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1196 unsigned long flags;
1197 u8 port_select = p->port_select;
1199 memset(pmp->data, 0, sizeof(pmp->data));
1201 p->port_select = port_select;
1202 if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
1203 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1206 spin_lock_irqsave(&ibp->rvp.lock, flags);
1207 p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
1208 p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1209 p->counter_width = 4; /* 32 bit counters */
1210 p->counter_mask0_9 = COUNTER_MASK0_9;
1211 p->sample_start = cpu_to_be32(ibp->rvp.pma_sample_start);
1212 p->sample_interval = cpu_to_be32(ibp->rvp.pma_sample_interval);
1213 p->tag = cpu_to_be16(ibp->rvp.pma_tag);
1214 p->counter_select[0] = ibp->rvp.pma_counter_select[0];
1215 p->counter_select[1] = ibp->rvp.pma_counter_select[1];
1216 p->counter_select[2] = ibp->rvp.pma_counter_select[2];
1217 p->counter_select[3] = ibp->rvp.pma_counter_select[3];
1218 p->counter_select[4] = ibp->rvp.pma_counter_select[4];
1219 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1222 return reply((struct ib_smp *) pmp);
1225 static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
1226 struct ib_device *ibdev, u8 port)
1228 struct ib_pma_portsamplescontrol *p =
1229 (struct ib_pma_portsamplescontrol *)pmp->data;
1230 struct qib_ibdev *dev = to_idev(ibdev);
1231 struct qib_devdata *dd = dd_from_dev(dev);
1232 struct qib_ibport *ibp = to_iport(ibdev, port);
1233 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1234 unsigned long flags;
1235 u8 status, xmit_flags;
1238 if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
1239 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1240 ret = reply((struct ib_smp *) pmp);
1244 spin_lock_irqsave(&ibp->rvp.lock, flags);
1246 /* Port Sampling code owns the PS* HW counters */
1247 xmit_flags = ppd->cong_stats.flags;
1248 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE;
1249 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1250 if (status == IB_PMA_SAMPLE_STATUS_DONE ||
1251 (status == IB_PMA_SAMPLE_STATUS_RUNNING &&
1252 xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
1253 ibp->rvp.pma_sample_start = be32_to_cpu(p->sample_start);
1254 ibp->rvp.pma_sample_interval = be32_to_cpu(p->sample_interval);
1255 ibp->rvp.pma_tag = be16_to_cpu(p->tag);
1256 ibp->rvp.pma_counter_select[0] = p->counter_select[0];
1257 ibp->rvp.pma_counter_select[1] = p->counter_select[1];
1258 ibp->rvp.pma_counter_select[2] = p->counter_select[2];
1259 ibp->rvp.pma_counter_select[3] = p->counter_select[3];
1260 ibp->rvp.pma_counter_select[4] = p->counter_select[4];
1261 dd->f_set_cntr_sample(ppd, ibp->rvp.pma_sample_interval,
1262 ibp->rvp.pma_sample_start);
1264 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1266 ret = pma_get_portsamplescontrol(pmp, ibdev, port);
1272 static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd,
1278 case IB_PMA_PORT_XMIT_DATA:
1279 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);
1281 case IB_PMA_PORT_RCV_DATA:
1282 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);
1284 case IB_PMA_PORT_XMIT_PKTS:
1285 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);
1287 case IB_PMA_PORT_RCV_PKTS:
1288 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);
1290 case IB_PMA_PORT_XMIT_WAIT:
1291 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);
1300 /* This function assumes that the xmit_wait lock is already held */
1301 static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd)
1305 delta = get_counter(&ppd->ibport_data, ppd,
1306 IB_PMA_PORT_XMIT_WAIT);
1307 return ppd->cong_stats.counter + delta;
1310 static void cache_hw_sample_counters(struct qib_pportdata *ppd)
1312 struct qib_ibport *ibp = &ppd->ibport_data;
1314 ppd->cong_stats.counter_cache.psxmitdata =
1315 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA);
1316 ppd->cong_stats.counter_cache.psrcvdata =
1317 get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA);
1318 ppd->cong_stats.counter_cache.psxmitpkts =
1319 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS);
1320 ppd->cong_stats.counter_cache.psrcvpkts =
1321 get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS);
1322 ppd->cong_stats.counter_cache.psxmitwait =
1323 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT);
1326 static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
1332 case IB_PMA_PORT_XMIT_DATA:
1333 ret = ppd->cong_stats.counter_cache.psxmitdata;
1335 case IB_PMA_PORT_RCV_DATA:
1336 ret = ppd->cong_stats.counter_cache.psrcvdata;
1338 case IB_PMA_PORT_XMIT_PKTS:
1339 ret = ppd->cong_stats.counter_cache.psxmitpkts;
1341 case IB_PMA_PORT_RCV_PKTS:
1342 ret = ppd->cong_stats.counter_cache.psrcvpkts;
1344 case IB_PMA_PORT_XMIT_WAIT:
1345 ret = ppd->cong_stats.counter_cache.psxmitwait;
1354 static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
1355 struct ib_device *ibdev, u8 port)
1357 struct ib_pma_portsamplesresult *p =
1358 (struct ib_pma_portsamplesresult *)pmp->data;
1359 struct qib_ibdev *dev = to_idev(ibdev);
1360 struct qib_devdata *dd = dd_from_dev(dev);
1361 struct qib_ibport *ibp = to_iport(ibdev, port);
1362 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1363 unsigned long flags;
1367 memset(pmp->data, 0, sizeof(pmp->data));
1368 spin_lock_irqsave(&ibp->rvp.lock, flags);
1369 p->tag = cpu_to_be16(ibp->rvp.pma_tag);
1370 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1371 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1373 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1374 p->sample_status = cpu_to_be16(status);
1375 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
1376 cache_hw_sample_counters(ppd);
1377 ppd->cong_stats.counter =
1378 xmit_wait_get_value_delta(ppd);
1379 dd->f_set_cntr_sample(ppd,
1380 QIB_CONG_TIMER_PSINTERVAL, 0);
1381 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1384 for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
1385 p->counter[i] = cpu_to_be32(
1386 get_cache_hw_sample_counters(
1387 ppd, ibp->rvp.pma_counter_select[i]));
1388 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1390 return reply((struct ib_smp *) pmp);
1393 static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
1394 struct ib_device *ibdev, u8 port)
1396 struct ib_pma_portsamplesresult_ext *p =
1397 (struct ib_pma_portsamplesresult_ext *)pmp->data;
1398 struct qib_ibdev *dev = to_idev(ibdev);
1399 struct qib_devdata *dd = dd_from_dev(dev);
1400 struct qib_ibport *ibp = to_iport(ibdev, port);
1401 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1402 unsigned long flags;
1406 /* Port Sampling code owns the PS* HW counters */
1407 memset(pmp->data, 0, sizeof(pmp->data));
1408 spin_lock_irqsave(&ibp->rvp.lock, flags);
1409 p->tag = cpu_to_be16(ibp->rvp.pma_tag);
1410 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1411 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1413 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1414 p->sample_status = cpu_to_be16(status);
1416 p->extended_width = cpu_to_be32(0x80000000);
1417 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
1418 cache_hw_sample_counters(ppd);
1419 ppd->cong_stats.counter =
1420 xmit_wait_get_value_delta(ppd);
1421 dd->f_set_cntr_sample(ppd,
1422 QIB_CONG_TIMER_PSINTERVAL, 0);
1423 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1426 for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
1427 p->counter[i] = cpu_to_be64(
1428 get_cache_hw_sample_counters(
1429 ppd, ibp->rvp.pma_counter_select[i]));
1430 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1432 return reply((struct ib_smp *) pmp);
1435 static int pma_get_portcounters(struct ib_pma_mad *pmp,
1436 struct ib_device *ibdev, u8 port)
1438 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1440 struct qib_ibport *ibp = to_iport(ibdev, port);
1441 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1442 struct qib_verbs_counters cntrs;
1443 u8 port_select = p->port_select;
1445 qib_get_counters(ppd, &cntrs);
1447 /* Adjust counters for any resets done. */
1448 cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
1449 cntrs.link_error_recovery_counter -=
1450 ibp->z_link_error_recovery_counter;
1451 cntrs.link_downed_counter -= ibp->z_link_downed_counter;
1452 cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
1453 cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors;
1454 cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
1455 cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1456 cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1457 cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
1458 cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
1459 cntrs.local_link_integrity_errors -=
1460 ibp->z_local_link_integrity_errors;
1461 cntrs.excessive_buffer_overrun_errors -=
1462 ibp->z_excessive_buffer_overrun_errors;
1463 cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1464 cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
1466 memset(pmp->data, 0, sizeof(pmp->data));
1468 p->port_select = port_select;
1469 if (pmp->mad_hdr.attr_mod != 0 || port_select != port)
1470 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1472 if (cntrs.symbol_error_counter > 0xFFFFUL)
1473 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1475 p->symbol_error_counter =
1476 cpu_to_be16((u16)cntrs.symbol_error_counter);
1477 if (cntrs.link_error_recovery_counter > 0xFFUL)
1478 p->link_error_recovery_counter = 0xFF;
1480 p->link_error_recovery_counter =
1481 (u8)cntrs.link_error_recovery_counter;
1482 if (cntrs.link_downed_counter > 0xFFUL)
1483 p->link_downed_counter = 0xFF;
1485 p->link_downed_counter = (u8)cntrs.link_downed_counter;
1486 if (cntrs.port_rcv_errors > 0xFFFFUL)
1487 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1489 p->port_rcv_errors =
1490 cpu_to_be16((u16) cntrs.port_rcv_errors);
1491 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1492 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1494 p->port_rcv_remphys_errors =
1495 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
1496 if (cntrs.port_xmit_discards > 0xFFFFUL)
1497 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1499 p->port_xmit_discards =
1500 cpu_to_be16((u16)cntrs.port_xmit_discards);
1501 if (cntrs.local_link_integrity_errors > 0xFUL)
1502 cntrs.local_link_integrity_errors = 0xFUL;
1503 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1504 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1505 p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
1506 cntrs.excessive_buffer_overrun_errors;
1507 if (cntrs.vl15_dropped > 0xFFFFUL)
1508 p->vl15_dropped = cpu_to_be16(0xFFFF);
1510 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1511 if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1512 p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
1514 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
1515 if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
1516 p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
1518 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
1519 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
1520 p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
1522 p->port_xmit_packets =
1523 cpu_to_be32((u32)cntrs.port_xmit_packets);
1524 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
1525 p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
1527 p->port_rcv_packets =
1528 cpu_to_be32((u32) cntrs.port_rcv_packets);
1530 return reply((struct ib_smp *) pmp);
1533 static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
1534 struct ib_device *ibdev, u8 port)
1536 /* Congestion PMA packets start at offset 24 not 64 */
1537 struct ib_pma_portcounters_cong *p =
1538 (struct ib_pma_portcounters_cong *)pmp->reserved;
1539 struct qib_verbs_counters cntrs;
1540 struct qib_ibport *ibp = to_iport(ibdev, port);
1541 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1542 struct qib_devdata *dd = dd_from_ppd(ppd);
1543 u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF;
1544 u64 xmit_wait_counter;
1545 unsigned long flags;
1548 * This check is performed only in the GET method because the
1549 * SET method ends up calling this anyway.
1551 if (!dd->psxmitwait_supported)
1552 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
1553 if (port_select != port)
1554 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1556 qib_get_counters(ppd, &cntrs);
1557 spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
1558 xmit_wait_counter = xmit_wait_get_value_delta(ppd);
1559 spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
1561 /* Adjust counters for any resets done. */
1562 cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
1563 cntrs.link_error_recovery_counter -=
1564 ibp->z_link_error_recovery_counter;
1565 cntrs.link_downed_counter -= ibp->z_link_downed_counter;
1566 cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
1567 cntrs.port_rcv_remphys_errors -=
1568 ibp->z_port_rcv_remphys_errors;
1569 cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
1570 cntrs.local_link_integrity_errors -=
1571 ibp->z_local_link_integrity_errors;
1572 cntrs.excessive_buffer_overrun_errors -=
1573 ibp->z_excessive_buffer_overrun_errors;
1574 cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1575 cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
1576 cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1577 cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1578 cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
1579 cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
1581 memset(pmp->reserved, 0, sizeof(pmp->reserved) +
1585 * Set top 3 bits to indicate interval in picoseconds in
1588 p->port_check_rate =
1589 cpu_to_be16((QIB_XMIT_RATE_PICO << 13) |
1590 (dd->psxmitwait_check_rate &
1591 ~(QIB_XMIT_RATE_PICO << 13)));
1592 p->port_adr_events = cpu_to_be64(0);
1593 p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);
1594 p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);
1595 p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);
1596 p->port_xmit_packets =
1597 cpu_to_be64(cntrs.port_xmit_packets);
1598 p->port_rcv_packets =
1599 cpu_to_be64(cntrs.port_rcv_packets);
1600 if (cntrs.symbol_error_counter > 0xFFFFUL)
1601 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1603 p->symbol_error_counter =
1605 (u16)cntrs.symbol_error_counter);
1606 if (cntrs.link_error_recovery_counter > 0xFFUL)
1607 p->link_error_recovery_counter = 0xFF;
1609 p->link_error_recovery_counter =
1610 (u8)cntrs.link_error_recovery_counter;
1611 if (cntrs.link_downed_counter > 0xFFUL)
1612 p->link_downed_counter = 0xFF;
1614 p->link_downed_counter =
1615 (u8)cntrs.link_downed_counter;
1616 if (cntrs.port_rcv_errors > 0xFFFFUL)
1617 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1619 p->port_rcv_errors =
1620 cpu_to_be16((u16) cntrs.port_rcv_errors);
1621 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1622 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1624 p->port_rcv_remphys_errors =
1626 (u16)cntrs.port_rcv_remphys_errors);
1627 if (cntrs.port_xmit_discards > 0xFFFFUL)
1628 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1630 p->port_xmit_discards =
1631 cpu_to_be16((u16)cntrs.port_xmit_discards);
1632 if (cntrs.local_link_integrity_errors > 0xFUL)
1633 cntrs.local_link_integrity_errors = 0xFUL;
1634 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1635 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1636 p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
1637 cntrs.excessive_buffer_overrun_errors;
1638 if (cntrs.vl15_dropped > 0xFFFFUL)
1639 p->vl15_dropped = cpu_to_be16(0xFFFF);
1641 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1643 return reply((struct ib_smp *)pmp);
1646 static void qib_snapshot_pmacounters(
1647 struct qib_ibport *ibp,
1648 struct qib_pma_counters *pmacounters)
1650 struct qib_pma_counters *p;
1653 memset(pmacounters, 0, sizeof(*pmacounters));
1654 for_each_possible_cpu(cpu) {
1655 p = per_cpu_ptr(ibp->pmastats, cpu);
1656 pmacounters->n_unicast_xmit += p->n_unicast_xmit;
1657 pmacounters->n_unicast_rcv += p->n_unicast_rcv;
1658 pmacounters->n_multicast_xmit += p->n_multicast_xmit;
1659 pmacounters->n_multicast_rcv += p->n_multicast_rcv;
1663 static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
1664 struct ib_device *ibdev, u8 port)
1666 struct ib_pma_portcounters_ext *p =
1667 (struct ib_pma_portcounters_ext *)pmp->data;
1668 struct qib_ibport *ibp = to_iport(ibdev, port);
1669 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1670 u64 swords, rwords, spkts, rpkts, xwait;
1671 struct qib_pma_counters pma;
1672 u8 port_select = p->port_select;
1674 memset(pmp->data, 0, sizeof(pmp->data));
1676 p->port_select = port_select;
1677 if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
1678 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1682 qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
1684 /* Adjust counters for any resets done. */
1685 swords -= ibp->z_port_xmit_data;
1686 rwords -= ibp->z_port_rcv_data;
1687 spkts -= ibp->z_port_xmit_packets;
1688 rpkts -= ibp->z_port_rcv_packets;
1690 p->port_xmit_data = cpu_to_be64(swords);
1691 p->port_rcv_data = cpu_to_be64(rwords);
1692 p->port_xmit_packets = cpu_to_be64(spkts);
1693 p->port_rcv_packets = cpu_to_be64(rpkts);
1695 qib_snapshot_pmacounters(ibp, &pma);
1697 p->port_unicast_xmit_packets = cpu_to_be64(pma.n_unicast_xmit
1698 - ibp->z_unicast_xmit);
1699 p->port_unicast_rcv_packets = cpu_to_be64(pma.n_unicast_rcv
1700 - ibp->z_unicast_rcv);
1701 p->port_multicast_xmit_packets = cpu_to_be64(pma.n_multicast_xmit
1702 - ibp->z_multicast_xmit);
1703 p->port_multicast_rcv_packets = cpu_to_be64(pma.n_multicast_rcv
1704 - ibp->z_multicast_rcv);
1707 return reply((struct ib_smp *) pmp);
1710 static int pma_set_portcounters(struct ib_pma_mad *pmp,
1711 struct ib_device *ibdev, u8 port)
1713 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1715 struct qib_ibport *ibp = to_iport(ibdev, port);
1716 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1717 struct qib_verbs_counters cntrs;
1720 * Since the HW doesn't support clearing counters, we save the
1721 * current count and subtract it from future responses.
1723 qib_get_counters(ppd, &cntrs);
1725 if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
1726 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1728 if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
1729 ibp->z_link_error_recovery_counter =
1730 cntrs.link_error_recovery_counter;
1732 if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
1733 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1735 if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
1736 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1738 if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
1739 ibp->z_port_rcv_remphys_errors =
1740 cntrs.port_rcv_remphys_errors;
1742 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
1743 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1745 if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
1746 ibp->z_local_link_integrity_errors =
1747 cntrs.local_link_integrity_errors;
1749 if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
1750 ibp->z_excessive_buffer_overrun_errors =
1751 cntrs.excessive_buffer_overrun_errors;
1753 if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
1754 ibp->rvp.n_vl15_dropped = 0;
1755 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1758 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
1759 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1761 if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
1762 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1764 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
1765 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1767 if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
1768 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1770 return pma_get_portcounters(pmp, ibdev, port);
1773 static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
1774 struct ib_device *ibdev, u8 port)
1776 struct qib_ibport *ibp = to_iport(ibdev, port);
1777 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1778 struct qib_devdata *dd = dd_from_ppd(ppd);
1779 struct qib_verbs_counters cntrs;
1780 u32 counter_select = (be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24) & 0xFF;
1782 unsigned long flags;
1784 qib_get_counters(ppd, &cntrs);
1785 /* Get counter values before we save them */
1786 ret = pma_get_portcounters_cong(pmp, ibdev, port);
1788 if (counter_select & IB_PMA_SEL_CONG_XMIT) {
1789 spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
1790 ppd->cong_stats.counter = 0;
1791 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
1793 spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
1795 if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
1796 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1797 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1798 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1799 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1801 if (counter_select & IB_PMA_SEL_CONG_ALL) {
1802 ibp->z_symbol_error_counter =
1803 cntrs.symbol_error_counter;
1804 ibp->z_link_error_recovery_counter =
1805 cntrs.link_error_recovery_counter;
1806 ibp->z_link_downed_counter =
1807 cntrs.link_downed_counter;
1808 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1809 ibp->z_port_rcv_remphys_errors =
1810 cntrs.port_rcv_remphys_errors;
1811 ibp->z_port_xmit_discards =
1812 cntrs.port_xmit_discards;
1813 ibp->z_local_link_integrity_errors =
1814 cntrs.local_link_integrity_errors;
1815 ibp->z_excessive_buffer_overrun_errors =
1816 cntrs.excessive_buffer_overrun_errors;
1817 ibp->rvp.n_vl15_dropped = 0;
1818 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1824 static int pma_set_portcounters_ext(struct ib_pma_mad *pmp,
1825 struct ib_device *ibdev, u8 port)
1827 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1829 struct qib_ibport *ibp = to_iport(ibdev, port);
1830 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1831 u64 swords, rwords, spkts, rpkts, xwait;
1832 struct qib_pma_counters pma;
1834 qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
1836 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
1837 ibp->z_port_xmit_data = swords;
1839 if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
1840 ibp->z_port_rcv_data = rwords;
1842 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
1843 ibp->z_port_xmit_packets = spkts;
1845 if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
1846 ibp->z_port_rcv_packets = rpkts;
1848 qib_snapshot_pmacounters(ibp, &pma);
1850 if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
1851 ibp->z_unicast_xmit = pma.n_unicast_xmit;
1853 if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
1854 ibp->z_unicast_rcv = pma.n_unicast_rcv;
1856 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
1857 ibp->z_multicast_xmit = pma.n_multicast_xmit;
1859 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
1860 ibp->z_multicast_rcv = pma.n_multicast_rcv;
1862 return pma_get_portcounters_ext(pmp, ibdev, port);
1865 static int process_subn(struct ib_device *ibdev, int mad_flags,
1866 u8 port, const struct ib_mad *in_mad,
1867 struct ib_mad *out_mad)
1869 struct ib_smp *smp = (struct ib_smp *)out_mad;
1870 struct qib_ibport *ibp = to_iport(ibdev, port);
1871 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1875 if (smp->class_version != 1) {
1876 smp->status |= IB_SMP_UNSUP_VERSION;
1881 ret = check_mkey(ibp, smp, mad_flags);
1883 u32 port_num = be32_to_cpu(smp->attr_mod);
1886 * If this is a get/set portinfo, we already check the
1887 * M_Key if the MAD is for another port and the M_Key
1888 * is OK on the receiving port. This check is needed
1889 * to increment the error counters when the M_Key
1890 * fails to match on *both* ports.
1892 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
1893 (smp->method == IB_MGMT_METHOD_GET ||
1894 smp->method == IB_MGMT_METHOD_SET) &&
1895 port_num && port_num <= ibdev->phys_port_cnt &&
1897 (void) check_mkey(to_iport(ibdev, port_num), smp, 0);
1898 ret = IB_MAD_RESULT_FAILURE;
1902 switch (smp->method) {
1903 case IB_MGMT_METHOD_GET:
1904 switch (smp->attr_id) {
1905 case IB_SMP_ATTR_NODE_DESC:
1906 ret = subn_get_nodedescription(smp, ibdev);
1908 case IB_SMP_ATTR_NODE_INFO:
1909 ret = subn_get_nodeinfo(smp, ibdev, port);
1911 case IB_SMP_ATTR_GUID_INFO:
1912 ret = subn_get_guidinfo(smp, ibdev, port);
1914 case IB_SMP_ATTR_PORT_INFO:
1915 ret = subn_get_portinfo(smp, ibdev, port);
1917 case IB_SMP_ATTR_PKEY_TABLE:
1918 ret = subn_get_pkeytable(smp, ibdev, port);
1920 case IB_SMP_ATTR_SL_TO_VL_TABLE:
1921 ret = subn_get_sl_to_vl(smp, ibdev, port);
1923 case IB_SMP_ATTR_VL_ARB_TABLE:
1924 ret = subn_get_vl_arb(smp, ibdev, port);
1926 case IB_SMP_ATTR_SM_INFO:
1927 if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
1928 ret = IB_MAD_RESULT_SUCCESS |
1929 IB_MAD_RESULT_CONSUMED;
1932 if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
1933 ret = IB_MAD_RESULT_SUCCESS;
1938 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1943 case IB_MGMT_METHOD_SET:
1944 switch (smp->attr_id) {
1945 case IB_SMP_ATTR_GUID_INFO:
1946 ret = subn_set_guidinfo(smp, ibdev, port);
1948 case IB_SMP_ATTR_PORT_INFO:
1949 ret = subn_set_portinfo(smp, ibdev, port);
1951 case IB_SMP_ATTR_PKEY_TABLE:
1952 ret = subn_set_pkeytable(smp, ibdev, port);
1954 case IB_SMP_ATTR_SL_TO_VL_TABLE:
1955 ret = subn_set_sl_to_vl(smp, ibdev, port);
1957 case IB_SMP_ATTR_VL_ARB_TABLE:
1958 ret = subn_set_vl_arb(smp, ibdev, port);
1960 case IB_SMP_ATTR_SM_INFO:
1961 if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
1962 ret = IB_MAD_RESULT_SUCCESS |
1963 IB_MAD_RESULT_CONSUMED;
1966 if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
1967 ret = IB_MAD_RESULT_SUCCESS;
1972 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1977 case IB_MGMT_METHOD_TRAP_REPRESS:
1978 if (smp->attr_id == IB_SMP_ATTR_NOTICE)
1979 ret = subn_trap_repress(smp, ibdev, port);
1981 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1986 case IB_MGMT_METHOD_TRAP:
1987 case IB_MGMT_METHOD_REPORT:
1988 case IB_MGMT_METHOD_REPORT_RESP:
1989 case IB_MGMT_METHOD_GET_RESP:
1991 * The ib_mad module will call us to process responses
1992 * before checking for other consumers.
1993 * Just tell the caller to process it normally.
1995 ret = IB_MAD_RESULT_SUCCESS;
1998 case IB_MGMT_METHOD_SEND:
1999 if (ib_get_smp_direction(smp) &&
2000 smp->attr_id == QIB_VENDOR_IPG) {
2001 ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,
2003 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
2005 ret = IB_MAD_RESULT_SUCCESS;
2009 smp->status |= IB_SMP_UNSUP_METHOD;
2017 static int process_perf(struct ib_device *ibdev, u8 port,
2018 const struct ib_mad *in_mad,
2019 struct ib_mad *out_mad)
2021 struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
2025 if (pmp->mad_hdr.class_version != 1) {
2026 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
2027 ret = reply((struct ib_smp *) pmp);
2031 switch (pmp->mad_hdr.method) {
2032 case IB_MGMT_METHOD_GET:
2033 switch (pmp->mad_hdr.attr_id) {
2034 case IB_PMA_CLASS_PORT_INFO:
2035 ret = pma_get_classportinfo(pmp, ibdev);
2037 case IB_PMA_PORT_SAMPLES_CONTROL:
2038 ret = pma_get_portsamplescontrol(pmp, ibdev, port);
2040 case IB_PMA_PORT_SAMPLES_RESULT:
2041 ret = pma_get_portsamplesresult(pmp, ibdev, port);
2043 case IB_PMA_PORT_SAMPLES_RESULT_EXT:
2044 ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);
2046 case IB_PMA_PORT_COUNTERS:
2047 ret = pma_get_portcounters(pmp, ibdev, port);
2049 case IB_PMA_PORT_COUNTERS_EXT:
2050 ret = pma_get_portcounters_ext(pmp, ibdev, port);
2052 case IB_PMA_PORT_COUNTERS_CONG:
2053 ret = pma_get_portcounters_cong(pmp, ibdev, port);
2056 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
2057 ret = reply((struct ib_smp *) pmp);
2061 case IB_MGMT_METHOD_SET:
2062 switch (pmp->mad_hdr.attr_id) {
2063 case IB_PMA_PORT_SAMPLES_CONTROL:
2064 ret = pma_set_portsamplescontrol(pmp, ibdev, port);
2066 case IB_PMA_PORT_COUNTERS:
2067 ret = pma_set_portcounters(pmp, ibdev, port);
2069 case IB_PMA_PORT_COUNTERS_EXT:
2070 ret = pma_set_portcounters_ext(pmp, ibdev, port);
2072 case IB_PMA_PORT_COUNTERS_CONG:
2073 ret = pma_set_portcounters_cong(pmp, ibdev, port);
2076 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
2077 ret = reply((struct ib_smp *) pmp);
2081 case IB_MGMT_METHOD_TRAP:
2082 case IB_MGMT_METHOD_GET_RESP:
2084 * The ib_mad module will call us to process responses
2085 * before checking for other consumers.
2086 * Just tell the caller to process it normally.
2088 ret = IB_MAD_RESULT_SUCCESS;
2092 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
2093 ret = reply((struct ib_smp *) pmp);
2100 static int cc_get_classportinfo(struct ib_cc_mad *ccp,
2101 struct ib_device *ibdev)
2103 struct ib_cc_classportinfo_attr *p =
2104 (struct ib_cc_classportinfo_attr *)ccp->mgmt_data;
2106 memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
2108 p->base_version = 1;
2109 p->class_version = 1;
2113 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
2115 p->resp_time_value = 18;
2117 return reply((struct ib_smp *) ccp);
2120 static int cc_get_congestion_info(struct ib_cc_mad *ccp,
2121 struct ib_device *ibdev, u8 port)
2123 struct ib_cc_info_attr *p =
2124 (struct ib_cc_info_attr *)ccp->mgmt_data;
2125 struct qib_ibport *ibp = to_iport(ibdev, port);
2126 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2128 memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
2130 p->congestion_info = 0;
2131 p->control_table_cap = ppd->cc_max_table_entries;
2133 return reply((struct ib_smp *) ccp);
2136 static int cc_get_congestion_setting(struct ib_cc_mad *ccp,
2137 struct ib_device *ibdev, u8 port)
2140 struct ib_cc_congestion_setting_attr *p =
2141 (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
2142 struct qib_ibport *ibp = to_iport(ibdev, port);
2143 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2144 struct ib_cc_congestion_entry_shadow *entries;
2146 memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
2148 spin_lock(&ppd->cc_shadow_lock);
2150 entries = ppd->congestion_entries_shadow->entries;
2151 p->port_control = cpu_to_be16(
2152 ppd->congestion_entries_shadow->port_control);
2153 p->control_map = cpu_to_be16(
2154 ppd->congestion_entries_shadow->control_map);
2155 for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
2156 p->entries[i].ccti_increase = entries[i].ccti_increase;
2157 p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
2158 p->entries[i].trigger_threshold = entries[i].trigger_threshold;
2159 p->entries[i].ccti_min = entries[i].ccti_min;
2162 spin_unlock(&ppd->cc_shadow_lock);
2164 return reply((struct ib_smp *) ccp);
2167 static int cc_get_congestion_control_table(struct ib_cc_mad *ccp,
2168 struct ib_device *ibdev, u8 port)
2170 struct ib_cc_table_attr *p =
2171 (struct ib_cc_table_attr *)ccp->mgmt_data;
2172 struct qib_ibport *ibp = to_iport(ibdev, port);
2173 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2174 u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
2177 struct ib_cc_table_entry_shadow *entries;
2180 /* Is the table index more than what is supported? */
2181 if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
2184 memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
2186 spin_lock(&ppd->cc_shadow_lock);
2189 (ppd->ccti_entries_shadow->ccti_last_entry + 1)/IB_CCT_ENTRIES;
2190 max_cct_block = max_cct_block ? max_cct_block - 1 : 0;
2192 if (cct_block_index > max_cct_block) {
2193 spin_unlock(&ppd->cc_shadow_lock);
2197 ccp->attr_mod = cpu_to_be32(cct_block_index);
2199 cct_entry = IB_CCT_ENTRIES * (cct_block_index + 1);
2203 p->ccti_limit = cpu_to_be16(cct_entry);
2205 entries = &ppd->ccti_entries_shadow->
2206 entries[IB_CCT_ENTRIES * cct_block_index];
2207 cct_entry %= IB_CCT_ENTRIES;
2209 for (i = 0; i <= cct_entry; i++)
2210 p->ccti_entries[i].entry = cpu_to_be16(entries[i].entry);
2212 spin_unlock(&ppd->cc_shadow_lock);
2214 return reply((struct ib_smp *) ccp);
2217 return reply_failure((struct ib_smp *) ccp);
2220 static int cc_set_congestion_setting(struct ib_cc_mad *ccp,
2221 struct ib_device *ibdev, u8 port)
2223 struct ib_cc_congestion_setting_attr *p =
2224 (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
2225 struct qib_ibport *ibp = to_iport(ibdev, port);
2226 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2229 ppd->cc_sl_control_map = be16_to_cpu(p->control_map);
2231 for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
2232 ppd->congestion_entries[i].ccti_increase =
2233 p->entries[i].ccti_increase;
2235 ppd->congestion_entries[i].ccti_timer =
2236 be16_to_cpu(p->entries[i].ccti_timer);
2238 ppd->congestion_entries[i].trigger_threshold =
2239 p->entries[i].trigger_threshold;
2241 ppd->congestion_entries[i].ccti_min =
2242 p->entries[i].ccti_min;
2245 return reply((struct ib_smp *) ccp);
2248 static int cc_set_congestion_control_table(struct ib_cc_mad *ccp,
2249 struct ib_device *ibdev, u8 port)
2251 struct ib_cc_table_attr *p =
2252 (struct ib_cc_table_attr *)ccp->mgmt_data;
2253 struct qib_ibport *ibp = to_iport(ibdev, port);
2254 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2255 u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
2257 struct ib_cc_table_entry_shadow *entries;
2260 /* Is the table index more than what is supported? */
2261 if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
2264 /* If this packet is the first in the sequence then
2265 * zero the total table entry count.
2267 if (be16_to_cpu(p->ccti_limit) < IB_CCT_ENTRIES)
2268 ppd->total_cct_entry = 0;
2270 cct_entry = (be16_to_cpu(p->ccti_limit))%IB_CCT_ENTRIES;
2272 /* ccti_limit is 0 to 63 */
2273 ppd->total_cct_entry += (cct_entry + 1);
2275 if (ppd->total_cct_entry > ppd->cc_supported_table_entries)
2278 ppd->ccti_limit = be16_to_cpu(p->ccti_limit);
2280 entries = ppd->ccti_entries + (IB_CCT_ENTRIES * cct_block_index);
2282 for (i = 0; i <= cct_entry; i++)
2283 entries[i].entry = be16_to_cpu(p->ccti_entries[i].entry);
2285 spin_lock(&ppd->cc_shadow_lock);
2287 ppd->ccti_entries_shadow->ccti_last_entry = ppd->total_cct_entry - 1;
2288 memcpy(ppd->ccti_entries_shadow->entries, ppd->ccti_entries,
2289 (ppd->total_cct_entry * sizeof(struct ib_cc_table_entry)));
2291 ppd->congestion_entries_shadow->port_control = IB_CC_CCS_PC_SL_BASED;
2292 ppd->congestion_entries_shadow->control_map = ppd->cc_sl_control_map;
2293 memcpy(ppd->congestion_entries_shadow->entries, ppd->congestion_entries,
2294 IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry));
2296 spin_unlock(&ppd->cc_shadow_lock);
2298 return reply((struct ib_smp *) ccp);
2301 return reply_failure((struct ib_smp *) ccp);
2304 static int check_cc_key(struct qib_ibport *ibp,
2305 struct ib_cc_mad *ccp, int mad_flags)
2310 static int process_cc(struct ib_device *ibdev, int mad_flags,
2311 u8 port, const struct ib_mad *in_mad,
2312 struct ib_mad *out_mad)
2314 struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad;
2315 struct qib_ibport *ibp = to_iport(ibdev, port);
2320 if (ccp->class_version != 2) {
2321 ccp->status |= IB_SMP_UNSUP_VERSION;
2322 ret = reply((struct ib_smp *)ccp);
2326 ret = check_cc_key(ibp, ccp, mad_flags);
2330 switch (ccp->method) {
2331 case IB_MGMT_METHOD_GET:
2332 switch (ccp->attr_id) {
2333 case IB_CC_ATTR_CLASSPORTINFO:
2334 ret = cc_get_classportinfo(ccp, ibdev);
2337 case IB_CC_ATTR_CONGESTION_INFO:
2338 ret = cc_get_congestion_info(ccp, ibdev, port);
2341 case IB_CC_ATTR_CA_CONGESTION_SETTING:
2342 ret = cc_get_congestion_setting(ccp, ibdev, port);
2345 case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
2346 ret = cc_get_congestion_control_table(ccp, ibdev, port);
2351 ccp->status |= IB_SMP_UNSUP_METH_ATTR;
2352 ret = reply((struct ib_smp *) ccp);
2356 case IB_MGMT_METHOD_SET:
2357 switch (ccp->attr_id) {
2358 case IB_CC_ATTR_CA_CONGESTION_SETTING:
2359 ret = cc_set_congestion_setting(ccp, ibdev, port);
2362 case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
2363 ret = cc_set_congestion_control_table(ccp, ibdev, port);
2368 ccp->status |= IB_SMP_UNSUP_METH_ATTR;
2369 ret = reply((struct ib_smp *) ccp);
2373 case IB_MGMT_METHOD_GET_RESP:
2375 * The ib_mad module will call us to process responses
2376 * before checking for other consumers.
2377 * Just tell the caller to process it normally.
2379 ret = IB_MAD_RESULT_SUCCESS;
2382 case IB_MGMT_METHOD_TRAP:
2384 ccp->status |= IB_SMP_UNSUP_METHOD;
2385 ret = reply((struct ib_smp *) ccp);
2393 * qib_process_mad - process an incoming MAD packet
2394 * @ibdev: the infiniband device this packet came in on
2395 * @mad_flags: MAD flags
2396 * @port: the port number this packet came in on
2397 * @in_wc: the work completion entry for this packet
2398 * @in_grh: the global route header for this packet
2399 * @in_mad: the incoming MAD
2400 * @out_mad: any outgoing MAD reply
2402 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
2403 * interested in processing.
2405 * Note that the verbs framework has already done the MAD sanity checks,
2406 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
2409 * This is called by the ib_mad module.
2411 int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
2412 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
2413 const struct ib_mad_hdr *in, size_t in_mad_size,
2414 struct ib_mad_hdr *out, size_t *out_mad_size,
2415 u16 *out_mad_pkey_index)
2418 struct qib_ibport *ibp = to_iport(ibdev, port);
2419 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2420 const struct ib_mad *in_mad = (const struct ib_mad *)in;
2421 struct ib_mad *out_mad = (struct ib_mad *)out;
2423 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
2424 *out_mad_size != sizeof(*out_mad)))
2425 return IB_MAD_RESULT_FAILURE;
2427 switch (in_mad->mad_hdr.mgmt_class) {
2428 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
2429 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
2430 ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
2433 case IB_MGMT_CLASS_PERF_MGMT:
2434 ret = process_perf(ibdev, port, in_mad, out_mad);
2437 case IB_MGMT_CLASS_CONG_MGMT:
2438 if (!ppd->congestion_entries_shadow ||
2439 !qib_cc_table_size) {
2440 ret = IB_MAD_RESULT_SUCCESS;
2443 ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad);
2447 ret = IB_MAD_RESULT_SUCCESS;
2454 static void xmit_wait_timer_func(unsigned long opaque)
2456 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
2457 struct qib_devdata *dd = dd_from_ppd(ppd);
2458 unsigned long flags;
2461 spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
2462 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
2463 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
2464 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
2465 /* save counter cache */
2466 cache_hw_sample_counters(ppd);
2467 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
2471 ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
2472 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
2474 spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
2475 mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
2478 void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx)
2480 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
2481 struct qib_devdata *dd = container_of(ibdev,
2482 struct qib_devdata, verbs_dev);
2484 /* Initialize xmit_wait structure */
2485 dd->pport[port_idx].cong_stats.counter = 0;
2486 init_timer(&dd->pport[port_idx].cong_stats.timer);
2487 dd->pport[port_idx].cong_stats.timer.function = xmit_wait_timer_func;
2488 dd->pport[port_idx].cong_stats.timer.data =
2489 (unsigned long)(&dd->pport[port_idx]);
2490 dd->pport[port_idx].cong_stats.timer.expires = 0;
2491 add_timer(&dd->pport[port_idx].cong_stats.timer);
2494 void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx)
2496 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
2497 struct qib_devdata *dd = container_of(ibdev,
2498 struct qib_devdata, verbs_dev);
2500 if (dd->pport[port_idx].cong_stats.timer.data)
2501 del_timer_sync(&dd->pport[port_idx].cong_stats.timer);
2503 if (dd->pport[port_idx].ibport_data.smi_ah)
2504 rdma_destroy_ah(&dd->pport[port_idx].ibport_data.smi_ah->ibah);