]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/infiniband/hw/qib/qib_mad.c
IB/core: Use rdma_ah_attr accessor functions
[karo-tx-linux.git] / drivers / infiniband / hw / qib / qib_mad.c
1 /*
2  * Copyright (c) 2012 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <rdma/ib_smi.h>
36
37 #include "qib.h"
38 #include "qib_mad.h"
39
40 static int reply(struct ib_smp *smp)
41 {
42         /*
43          * The verbs framework will handle the directed/LID route
44          * packet changes.
45          */
46         smp->method = IB_MGMT_METHOD_GET_RESP;
47         if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
48                 smp->status |= IB_SMP_DIRECTION;
49         return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
50 }
51
52 static int reply_failure(struct ib_smp *smp)
53 {
54         /*
55          * The verbs framework will handle the directed/LID route
56          * packet changes.
57          */
58         smp->method = IB_MGMT_METHOD_GET_RESP;
59         if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
60                 smp->status |= IB_SMP_DIRECTION;
61         return IB_MAD_RESULT_FAILURE | IB_MAD_RESULT_REPLY;
62 }
63
64 static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
65 {
66         struct ib_mad_send_buf *send_buf;
67         struct ib_mad_agent *agent;
68         struct ib_smp *smp;
69         int ret;
70         unsigned long flags;
71         unsigned long timeout;
72
73         agent = ibp->rvp.send_agent;
74         if (!agent)
75                 return;
76
77         /* o14-3.2.1 */
78         if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE))
79                 return;
80
81         /* o14-2 */
82         if (ibp->rvp.trap_timeout &&
83             time_before(jiffies, ibp->rvp.trap_timeout))
84                 return;
85
86         send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
87                                       IB_MGMT_MAD_DATA, GFP_ATOMIC,
88                                       IB_MGMT_BASE_VERSION);
89         if (IS_ERR(send_buf))
90                 return;
91
92         smp = send_buf->mad;
93         smp->base_version = IB_MGMT_BASE_VERSION;
94         smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
95         smp->class_version = 1;
96         smp->method = IB_MGMT_METHOD_TRAP;
97         ibp->rvp.tid++;
98         smp->tid = cpu_to_be64(ibp->rvp.tid);
99         smp->attr_id = IB_SMP_ATTR_NOTICE;
100         /* o14-1: smp->mkey = 0; */
101         memcpy(smp->data, data, len);
102
103         spin_lock_irqsave(&ibp->rvp.lock, flags);
104         if (!ibp->rvp.sm_ah) {
105                 if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
106                         struct ib_ah *ah;
107
108                         ah = qib_create_qp0_ah(ibp, ibp->rvp.sm_lid);
109                         if (IS_ERR(ah))
110                                 ret = PTR_ERR(ah);
111                         else {
112                                 send_buf->ah = ah;
113                                 ibp->rvp.sm_ah = ibah_to_rvtah(ah);
114                                 ret = 0;
115                         }
116                 } else
117                         ret = -EINVAL;
118         } else {
119                 send_buf->ah = &ibp->rvp.sm_ah->ibah;
120                 ret = 0;
121         }
122         spin_unlock_irqrestore(&ibp->rvp.lock, flags);
123
124         if (!ret)
125                 ret = ib_post_send_mad(send_buf, NULL);
126         if (!ret) {
127                 /* 4.096 usec. */
128                 timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000;
129                 ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout);
130         } else {
131                 ib_free_send_mad(send_buf);
132                 ibp->rvp.trap_timeout = 0;
133         }
134 }
135
136 /*
137  * Send a bad [PQ]_Key trap (ch. 14.3.8).
138  */
139 void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
140                    u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
141 {
142         struct ib_mad_notice_attr data;
143
144         if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
145                 ibp->rvp.pkey_violations++;
146         else
147                 ibp->rvp.qkey_violations++;
148         ibp->rvp.n_pkt_drops++;
149
150         /* Send violation trap */
151         data.generic_type = IB_NOTICE_TYPE_SECURITY;
152         data.prod_type_msb = 0;
153         data.prod_type_lsb = IB_NOTICE_PROD_CA;
154         data.trap_num = trap_num;
155         data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
156         data.toggle_count = 0;
157         memset(&data.details, 0, sizeof(data.details));
158         data.details.ntc_257_258.lid1 = lid1;
159         data.details.ntc_257_258.lid2 = lid2;
160         data.details.ntc_257_258.key = cpu_to_be32(key);
161         data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
162         data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
163
164         qib_send_trap(ibp, &data, sizeof(data));
165 }
166
167 /*
168  * Send a bad M_Key trap (ch. 14.3.9).
169  */
170 static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
171 {
172         struct ib_mad_notice_attr data;
173
174         /* Send violation trap */
175         data.generic_type = IB_NOTICE_TYPE_SECURITY;
176         data.prod_type_msb = 0;
177         data.prod_type_lsb = IB_NOTICE_PROD_CA;
178         data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
179         data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
180         data.toggle_count = 0;
181         memset(&data.details, 0, sizeof(data.details));
182         data.details.ntc_256.lid = data.issuer_lid;
183         data.details.ntc_256.method = smp->method;
184         data.details.ntc_256.attr_id = smp->attr_id;
185         data.details.ntc_256.attr_mod = smp->attr_mod;
186         data.details.ntc_256.mkey = smp->mkey;
187         if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
188                 u8 hop_cnt;
189
190                 data.details.ntc_256.dr_slid = smp->dr_slid;
191                 data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
192                 hop_cnt = smp->hop_cnt;
193                 if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
194                         data.details.ntc_256.dr_trunc_hop |=
195                                 IB_NOTICE_TRAP_DR_TRUNC;
196                         hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
197                 }
198                 data.details.ntc_256.dr_trunc_hop |= hop_cnt;
199                 memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path,
200                        hop_cnt);
201         }
202
203         qib_send_trap(ibp, &data, sizeof(data));
204 }
205
206 /*
207  * Send a Port Capability Mask Changed trap (ch. 14.3.11).
208  */
209 void qib_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
210 {
211         struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
212         struct qib_devdata *dd = dd_from_dev(ibdev);
213         struct qib_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
214         struct ib_mad_notice_attr data;
215
216         data.generic_type = IB_NOTICE_TYPE_INFO;
217         data.prod_type_msb = 0;
218         data.prod_type_lsb = IB_NOTICE_PROD_CA;
219         data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
220         data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
221         data.toggle_count = 0;
222         memset(&data.details, 0, sizeof(data.details));
223         data.details.ntc_144.lid = data.issuer_lid;
224         data.details.ntc_144.new_cap_mask =
225                                         cpu_to_be32(ibp->rvp.port_cap_flags);
226         qib_send_trap(ibp, &data, sizeof(data));
227 }
228
229 /*
230  * Send a System Image GUID Changed trap (ch. 14.3.12).
231  */
232 void qib_sys_guid_chg(struct qib_ibport *ibp)
233 {
234         struct ib_mad_notice_attr data;
235
236         data.generic_type = IB_NOTICE_TYPE_INFO;
237         data.prod_type_msb = 0;
238         data.prod_type_lsb = IB_NOTICE_PROD_CA;
239         data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
240         data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
241         data.toggle_count = 0;
242         memset(&data.details, 0, sizeof(data.details));
243         data.details.ntc_145.lid = data.issuer_lid;
244         data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
245
246         qib_send_trap(ibp, &data, sizeof(data));
247 }
248
249 /*
250  * Send a Node Description Changed trap (ch. 14.3.13).
251  */
252 void qib_node_desc_chg(struct qib_ibport *ibp)
253 {
254         struct ib_mad_notice_attr data;
255
256         data.generic_type = IB_NOTICE_TYPE_INFO;
257         data.prod_type_msb = 0;
258         data.prod_type_lsb = IB_NOTICE_PROD_CA;
259         data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
260         data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
261         data.toggle_count = 0;
262         memset(&data.details, 0, sizeof(data.details));
263         data.details.ntc_144.lid = data.issuer_lid;
264         data.details.ntc_144.local_changes = 1;
265         data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
266
267         qib_send_trap(ibp, &data, sizeof(data));
268 }
269
270 static int subn_get_nodedescription(struct ib_smp *smp,
271                                     struct ib_device *ibdev)
272 {
273         if (smp->attr_mod)
274                 smp->status |= IB_SMP_INVALID_FIELD;
275
276         memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
277
278         return reply(smp);
279 }
280
281 static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
282                              u8 port)
283 {
284         struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
285         struct qib_devdata *dd = dd_from_ibdev(ibdev);
286         u32 vendor, majrev, minrev;
287         unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
288
289         /* GUID 0 is illegal */
290         if (smp->attr_mod || pidx >= dd->num_pports ||
291             dd->pport[pidx].guid == 0)
292                 smp->status |= IB_SMP_INVALID_FIELD;
293         else
294                 nip->port_guid = dd->pport[pidx].guid;
295
296         nip->base_version = 1;
297         nip->class_version = 1;
298         nip->node_type = 1;     /* channel adapter */
299         nip->num_ports = ibdev->phys_port_cnt;
300         /* This is already in network order */
301         nip->sys_guid = ib_qib_sys_image_guid;
302         nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */
303         nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));
304         nip->device_id = cpu_to_be16(dd->deviceid);
305         majrev = dd->majrev;
306         minrev = dd->minrev;
307         nip->revision = cpu_to_be32((majrev << 16) | minrev);
308         nip->local_port_num = port;
309         vendor = dd->vendorid;
310         nip->vendor_id[0] = QIB_SRC_OUI_1;
311         nip->vendor_id[1] = QIB_SRC_OUI_2;
312         nip->vendor_id[2] = QIB_SRC_OUI_3;
313
314         return reply(smp);
315 }
316
317 static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
318                              u8 port)
319 {
320         struct qib_devdata *dd = dd_from_ibdev(ibdev);
321         u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
322         __be64 *p = (__be64 *) smp->data;
323         unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
324
325         /* 32 blocks of 8 64-bit GUIDs per block */
326
327         memset(smp->data, 0, sizeof(smp->data));
328
329         if (startgx == 0 && pidx < dd->num_pports) {
330                 struct qib_pportdata *ppd = dd->pport + pidx;
331                 struct qib_ibport *ibp = &ppd->ibport_data;
332                 __be64 g = ppd->guid;
333                 unsigned i;
334
335                 /* GUID 0 is illegal */
336                 if (g == 0)
337                         smp->status |= IB_SMP_INVALID_FIELD;
338                 else {
339                         /* The first is a copy of the read-only HW GUID. */
340                         p[0] = g;
341                         for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
342                                 p[i] = ibp->guids[i - 1];
343                 }
344         } else
345                 smp->status |= IB_SMP_INVALID_FIELD;
346
347         return reply(smp);
348 }
349
350 static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w)
351 {
352         (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);
353 }
354
355 static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s)
356 {
357         (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);
358 }
359
360 static int get_overrunthreshold(struct qib_pportdata *ppd)
361 {
362         return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);
363 }
364
365 /**
366  * set_overrunthreshold - set the overrun threshold
367  * @ppd: the physical port data
368  * @n: the new threshold
369  *
370  * Note that this will only take effect when the link state changes.
371  */
372 static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n)
373 {
374         (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,
375                                          (u32)n);
376         return 0;
377 }
378
379 static int get_phyerrthreshold(struct qib_pportdata *ppd)
380 {
381         return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);
382 }
383
384 /**
385  * set_phyerrthreshold - set the physical error threshold
386  * @ppd: the physical port data
387  * @n: the new threshold
388  *
389  * Note that this will only take effect when the link state changes.
390  */
391 static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n)
392 {
393         (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,
394                                          (u32)n);
395         return 0;
396 }
397
398 /**
399  * get_linkdowndefaultstate - get the default linkdown state
400  * @ppd: the physical port data
401  *
402  * Returns zero if the default is POLL, 1 if the default is SLEEP.
403  */
404 static int get_linkdowndefaultstate(struct qib_pportdata *ppd)
405 {
406         return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==
407                 IB_LINKINITCMD_SLEEP;
408 }
409
410 static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
411 {
412         int valid_mkey = 0;
413         int ret = 0;
414
415         /* Is the mkey in the process of expiring? */
416         if (ibp->rvp.mkey_lease_timeout &&
417             time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
418                 /* Clear timeout and mkey protection field. */
419                 ibp->rvp.mkey_lease_timeout = 0;
420                 ibp->rvp.mkeyprot = 0;
421         }
422
423         if ((mad_flags & IB_MAD_IGNORE_MKEY) ||  ibp->rvp.mkey == 0 ||
424             ibp->rvp.mkey == smp->mkey)
425                 valid_mkey = 1;
426
427         /* Unset lease timeout on any valid Get/Set/TrapRepress */
428         if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
429             (smp->method == IB_MGMT_METHOD_GET ||
430              smp->method == IB_MGMT_METHOD_SET ||
431              smp->method == IB_MGMT_METHOD_TRAP_REPRESS))
432                 ibp->rvp.mkey_lease_timeout = 0;
433
434         if (!valid_mkey) {
435                 switch (smp->method) {
436                 case IB_MGMT_METHOD_GET:
437                         /* Bad mkey not a violation below level 2 */
438                         if (ibp->rvp.mkeyprot < 2)
439                                 break;
440                 case IB_MGMT_METHOD_SET:
441                 case IB_MGMT_METHOD_TRAP_REPRESS:
442                         if (ibp->rvp.mkey_violations != 0xFFFF)
443                                 ++ibp->rvp.mkey_violations;
444                         if (!ibp->rvp.mkey_lease_timeout &&
445                             ibp->rvp.mkey_lease_period)
446                                 ibp->rvp.mkey_lease_timeout = jiffies +
447                                         ibp->rvp.mkey_lease_period * HZ;
448                         /* Generate a trap notice. */
449                         qib_bad_mkey(ibp, smp);
450                         ret = 1;
451                 }
452         }
453
454         return ret;
455 }
456
457 static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
458                              u8 port)
459 {
460         struct qib_devdata *dd;
461         struct qib_pportdata *ppd;
462         struct qib_ibport *ibp;
463         struct ib_port_info *pip = (struct ib_port_info *)smp->data;
464         u8 mtu;
465         int ret;
466         u32 state;
467         u32 port_num = be32_to_cpu(smp->attr_mod);
468
469         if (port_num == 0)
470                 port_num = port;
471         else {
472                 if (port_num > ibdev->phys_port_cnt) {
473                         smp->status |= IB_SMP_INVALID_FIELD;
474                         ret = reply(smp);
475                         goto bail;
476                 }
477                 if (port_num != port) {
478                         ibp = to_iport(ibdev, port_num);
479                         ret = check_mkey(ibp, smp, 0);
480                         if (ret) {
481                                 ret = IB_MAD_RESULT_FAILURE;
482                                 goto bail;
483                         }
484                 }
485         }
486
487         dd = dd_from_ibdev(ibdev);
488         /* IB numbers ports from 1, hdw from 0 */
489         ppd = dd->pport + (port_num - 1);
490         ibp = &ppd->ibport_data;
491
492         /* Clear all fields.  Only set the non-zero fields. */
493         memset(smp->data, 0, sizeof(smp->data));
494
495         /* Only return the mkey if the protection field allows it. */
496         if (!(smp->method == IB_MGMT_METHOD_GET &&
497               ibp->rvp.mkey != smp->mkey &&
498               ibp->rvp.mkeyprot == 1))
499                 pip->mkey = ibp->rvp.mkey;
500         pip->gid_prefix = ibp->rvp.gid_prefix;
501         pip->lid = cpu_to_be16(ppd->lid);
502         pip->sm_lid = cpu_to_be16(ibp->rvp.sm_lid);
503         pip->cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
504         /* pip->diag_code; */
505         pip->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
506         pip->local_port_num = port;
507         pip->link_width_enabled = ppd->link_width_enabled;
508         pip->link_width_supported = ppd->link_width_supported;
509         pip->link_width_active = ppd->link_width_active;
510         state = dd->f_iblink_state(ppd->lastibcstat);
511         pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state;
512
513         pip->portphysstate_linkdown =
514                 (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
515                 (get_linkdowndefaultstate(ppd) ? 1 : 2);
516         pip->mkeyprot_resv_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
517         pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
518                 ppd->link_speed_enabled;
519         switch (ppd->ibmtu) {
520         default: /* something is wrong; fall through */
521         case 4096:
522                 mtu = IB_MTU_4096;
523                 break;
524         case 2048:
525                 mtu = IB_MTU_2048;
526                 break;
527         case 1024:
528                 mtu = IB_MTU_1024;
529                 break;
530         case 512:
531                 mtu = IB_MTU_512;
532                 break;
533         case 256:
534                 mtu = IB_MTU_256;
535                 break;
536         }
537         pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->rvp.sm_sl;
538         pip->vlcap_inittype = ppd->vls_supported << 4;  /* InitType = 0 */
539         pip->vl_high_limit = ibp->rvp.vl_high_limit;
540         pip->vl_arb_high_cap =
541                 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
542         pip->vl_arb_low_cap =
543                 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);
544         /* InitTypeReply = 0 */
545         pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
546         /* HCAs ignore VLStallCount and HOQLife */
547         /* pip->vlstallcnt_hoqlife; */
548         pip->operationalvl_pei_peo_fpi_fpo =
549                 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
550         pip->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
551         /* P_KeyViolations are counted by hardware. */
552         pip->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
553         pip->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
554         /* Only the hardware GUID is supported for now */
555         pip->guid_cap = QIB_GUIDS_PER_PORT;
556         pip->clientrereg_resv_subnetto = ibp->rvp.subnet_timeout;
557         /* 32.768 usec. response time (guessing) */
558         pip->resv_resptimevalue = 3;
559         pip->localphyerrors_overrunerrors =
560                 (get_phyerrthreshold(ppd) << 4) |
561                 get_overrunthreshold(ppd);
562         /* pip->max_credit_hint; */
563         if (ibp->rvp.port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
564                 u32 v;
565
566                 v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
567                 pip->link_roundtrip_latency[0] = v >> 16;
568                 pip->link_roundtrip_latency[1] = v >> 8;
569                 pip->link_roundtrip_latency[2] = v;
570         }
571
572         ret = reply(smp);
573
574 bail:
575         return ret;
576 }
577
578 /**
579  * get_pkeys - return the PKEY table
580  * @dd: the qlogic_ib device
581  * @port: the IB port number
582  * @pkeys: the pkey table is placed here
583  */
584 static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
585 {
586         struct qib_pportdata *ppd = dd->pport + port - 1;
587         /*
588          * always a kernel context, no locking needed.
589          * If we get here with ppd setup, no need to check
590          * that pd is valid.
591          */
592         struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];
593
594         memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys));
595
596         return 0;
597 }
598
599 static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
600                               u8 port)
601 {
602         u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
603         u16 *p = (u16 *) smp->data;
604         __be16 *q = (__be16 *) smp->data;
605
606         /* 64 blocks of 32 16-bit P_Key entries */
607
608         memset(smp->data, 0, sizeof(smp->data));
609         if (startpx == 0) {
610                 struct qib_devdata *dd = dd_from_ibdev(ibdev);
611                 unsigned i, n = qib_get_npkeys(dd);
612
613                 get_pkeys(dd, port, p);
614
615                 for (i = 0; i < n; i++)
616                         q[i] = cpu_to_be16(p[i]);
617         } else
618                 smp->status |= IB_SMP_INVALID_FIELD;
619
620         return reply(smp);
621 }
622
623 static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
624                              u8 port)
625 {
626         struct qib_devdata *dd = dd_from_ibdev(ibdev);
627         u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
628         __be64 *p = (__be64 *) smp->data;
629         unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
630
631         /* 32 blocks of 8 64-bit GUIDs per block */
632
633         if (startgx == 0 && pidx < dd->num_pports) {
634                 struct qib_pportdata *ppd = dd->pport + pidx;
635                 struct qib_ibport *ibp = &ppd->ibport_data;
636                 unsigned i;
637
638                 /* The first entry is read-only. */
639                 for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
640                         ibp->guids[i - 1] = p[i];
641         } else
642                 smp->status |= IB_SMP_INVALID_FIELD;
643
644         /* The only GUID we support is the first read-only entry. */
645         return subn_get_guidinfo(smp, ibdev, port);
646 }
647
648 /**
649  * subn_set_portinfo - set port information
650  * @smp: the incoming SM packet
651  * @ibdev: the infiniband device
652  * @port: the port on the device
653  *
654  * Set Portinfo (see ch. 14.2.5.6).
655  */
656 static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
657                              u8 port)
658 {
659         struct ib_port_info *pip = (struct ib_port_info *)smp->data;
660         struct ib_event event;
661         struct qib_devdata *dd;
662         struct qib_pportdata *ppd;
663         struct qib_ibport *ibp;
664         u8 clientrereg = (pip->clientrereg_resv_subnetto & 0x80);
665         unsigned long flags;
666         u16 lid, smlid;
667         u8 lwe;
668         u8 lse;
669         u8 state;
670         u8 vls;
671         u8 msl;
672         u16 lstate;
673         int ret, ore, mtu;
674         u32 port_num = be32_to_cpu(smp->attr_mod);
675
676         if (port_num == 0)
677                 port_num = port;
678         else {
679                 if (port_num > ibdev->phys_port_cnt)
680                         goto err;
681                 /* Port attributes can only be set on the receiving port */
682                 if (port_num != port)
683                         goto get_only;
684         }
685
686         dd = dd_from_ibdev(ibdev);
687         /* IB numbers ports from 1, hdw from 0 */
688         ppd = dd->pport + (port_num - 1);
689         ibp = &ppd->ibport_data;
690         event.device = ibdev;
691         event.element.port_num = port;
692
693         ibp->rvp.mkey = pip->mkey;
694         ibp->rvp.gid_prefix = pip->gid_prefix;
695         ibp->rvp.mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
696
697         lid = be16_to_cpu(pip->lid);
698         /* Must be a valid unicast LID address. */
699         if (lid == 0 || lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
700                 smp->status |= IB_SMP_INVALID_FIELD;
701         else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
702                 if (ppd->lid != lid)
703                         qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
704                 if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
705                         qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT);
706                 qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7);
707                 event.event = IB_EVENT_LID_CHANGE;
708                 ib_dispatch_event(&event);
709         }
710
711         smlid = be16_to_cpu(pip->sm_lid);
712         msl = pip->neighbormtu_mastersmsl & 0xF;
713         /* Must be a valid unicast LID address. */
714         if (smlid == 0 || smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
715                 smp->status |= IB_SMP_INVALID_FIELD;
716         else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
717                 spin_lock_irqsave(&ibp->rvp.lock, flags);
718                 if (ibp->rvp.sm_ah) {
719                         if (smlid != ibp->rvp.sm_lid)
720                                 rdma_ah_set_dlid(&ibp->rvp.sm_ah->attr,
721                                                  smlid);
722                         if (msl != ibp->rvp.sm_sl)
723                                 rdma_ah_set_sl(&ibp->rvp.sm_ah->attr, msl);
724                 }
725                 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
726                 if (smlid != ibp->rvp.sm_lid)
727                         ibp->rvp.sm_lid = smlid;
728                 if (msl != ibp->rvp.sm_sl)
729                         ibp->rvp.sm_sl = msl;
730                 event.event = IB_EVENT_SM_CHANGE;
731                 ib_dispatch_event(&event);
732         }
733
734         /* Allow 1x or 4x to be set (see 14.2.6.6). */
735         lwe = pip->link_width_enabled;
736         if (lwe) {
737                 if (lwe == 0xFF)
738                         set_link_width_enabled(ppd, ppd->link_width_supported);
739                 else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
740                         smp->status |= IB_SMP_INVALID_FIELD;
741                 else if (lwe != ppd->link_width_enabled)
742                         set_link_width_enabled(ppd, lwe);
743         }
744
745         lse = pip->linkspeedactive_enabled & 0xF;
746         if (lse) {
747                 /*
748                  * The IB 1.2 spec. only allows link speed values
749                  * 1, 3, 5, 7, 15.  1.2.1 extended to allow specific
750                  * speeds.
751                  */
752                 if (lse == 15)
753                         set_link_speed_enabled(ppd,
754                                                ppd->link_speed_supported);
755                 else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
756                         smp->status |= IB_SMP_INVALID_FIELD;
757                 else if (lse != ppd->link_speed_enabled)
758                         set_link_speed_enabled(ppd, lse);
759         }
760
761         /* Set link down default state. */
762         switch (pip->portphysstate_linkdown & 0xF) {
763         case 0: /* NOP */
764                 break;
765         case 1: /* SLEEP */
766                 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
767                                         IB_LINKINITCMD_SLEEP);
768                 break;
769         case 2: /* POLL */
770                 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
771                                         IB_LINKINITCMD_POLL);
772                 break;
773         default:
774                 smp->status |= IB_SMP_INVALID_FIELD;
775         }
776
777         ibp->rvp.mkeyprot = pip->mkeyprot_resv_lmc >> 6;
778         ibp->rvp.vl_high_limit = pip->vl_high_limit;
779         (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
780                                     ibp->rvp.vl_high_limit);
781
782         mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
783         if (mtu == -1)
784                 smp->status |= IB_SMP_INVALID_FIELD;
785         else
786                 qib_set_mtu(ppd, mtu);
787
788         /* Set operational VLs */
789         vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
790         if (vls) {
791                 if (vls > ppd->vls_supported)
792                         smp->status |= IB_SMP_INVALID_FIELD;
793                 else
794                         (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
795         }
796
797         if (pip->mkey_violations == 0)
798                 ibp->rvp.mkey_violations = 0;
799
800         if (pip->pkey_violations == 0)
801                 ibp->rvp.pkey_violations = 0;
802
803         if (pip->qkey_violations == 0)
804                 ibp->rvp.qkey_violations = 0;
805
806         ore = pip->localphyerrors_overrunerrors;
807         if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
808                 smp->status |= IB_SMP_INVALID_FIELD;
809
810         if (set_overrunthreshold(ppd, (ore & 0xF)))
811                 smp->status |= IB_SMP_INVALID_FIELD;
812
813         ibp->rvp.subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
814
815         /*
816          * Do the port state change now that the other link parameters
817          * have been set.
818          * Changing the port physical state only makes sense if the link
819          * is down or is being set to down.
820          */
821         state = pip->linkspeed_portstate & 0xF;
822         lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
823         if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
824                 smp->status |= IB_SMP_INVALID_FIELD;
825
826         /*
827          * Only state changes of DOWN, ARM, and ACTIVE are valid
828          * and must be in the correct state to take effect (see 7.2.6).
829          */
830         switch (state) {
831         case IB_PORT_NOP:
832                 if (lstate == 0)
833                         break;
834                 /* FALLTHROUGH */
835         case IB_PORT_DOWN:
836                 if (lstate == 0)
837                         lstate = QIB_IB_LINKDOWN_ONLY;
838                 else if (lstate == 1)
839                         lstate = QIB_IB_LINKDOWN_SLEEP;
840                 else if (lstate == 2)
841                         lstate = QIB_IB_LINKDOWN;
842                 else if (lstate == 3)
843                         lstate = QIB_IB_LINKDOWN_DISABLE;
844                 else {
845                         smp->status |= IB_SMP_INVALID_FIELD;
846                         break;
847                 }
848                 spin_lock_irqsave(&ppd->lflags_lock, flags);
849                 ppd->lflags &= ~QIBL_LINKV;
850                 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
851                 qib_set_linkstate(ppd, lstate);
852                 /*
853                  * Don't send a reply if the response would be sent
854                  * through the disabled port.
855                  */
856                 if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) {
857                         ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
858                         goto done;
859                 }
860                 qib_wait_linkstate(ppd, QIBL_LINKV, 10);
861                 break;
862         case IB_PORT_ARMED:
863                 qib_set_linkstate(ppd, QIB_IB_LINKARM);
864                 break;
865         case IB_PORT_ACTIVE:
866                 qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
867                 break;
868         default:
869                 smp->status |= IB_SMP_INVALID_FIELD;
870         }
871
872         if (clientrereg) {
873                 event.event = IB_EVENT_CLIENT_REREGISTER;
874                 ib_dispatch_event(&event);
875         }
876
877         ret = subn_get_portinfo(smp, ibdev, port);
878
879         /* restore re-reg bit per o14-12.2.1 */
880         pip->clientrereg_resv_subnetto |= clientrereg;
881
882         goto get_only;
883
884 err:
885         smp->status |= IB_SMP_INVALID_FIELD;
886 get_only:
887         ret = subn_get_portinfo(smp, ibdev, port);
888 done:
889         return ret;
890 }
891
892 /**
893  * rm_pkey - decrecment the reference count for the given PKEY
894  * @dd: the qlogic_ib device
895  * @key: the PKEY index
896  *
897  * Return true if this was the last reference and the hardware table entry
898  * needs to be changed.
899  */
900 static int rm_pkey(struct qib_pportdata *ppd, u16 key)
901 {
902         int i;
903         int ret;
904
905         for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
906                 if (ppd->pkeys[i] != key)
907                         continue;
908                 if (atomic_dec_and_test(&ppd->pkeyrefs[i])) {
909                         ppd->pkeys[i] = 0;
910                         ret = 1;
911                         goto bail;
912                 }
913                 break;
914         }
915
916         ret = 0;
917
918 bail:
919         return ret;
920 }
921
922 /**
923  * add_pkey - add the given PKEY to the hardware table
924  * @dd: the qlogic_ib device
925  * @key: the PKEY
926  *
927  * Return an error code if unable to add the entry, zero if no change,
928  * or 1 if the hardware PKEY register needs to be updated.
929  */
930 static int add_pkey(struct qib_pportdata *ppd, u16 key)
931 {
932         int i;
933         u16 lkey = key & 0x7FFF;
934         int any = 0;
935         int ret;
936
937         if (lkey == 0x7FFF) {
938                 ret = 0;
939                 goto bail;
940         }
941
942         /* Look for an empty slot or a matching PKEY. */
943         for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
944                 if (!ppd->pkeys[i]) {
945                         any++;
946                         continue;
947                 }
948                 /* If it matches exactly, try to increment the ref count */
949                 if (ppd->pkeys[i] == key) {
950                         if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) {
951                                 ret = 0;
952                                 goto bail;
953                         }
954                         /* Lost the race. Look for an empty slot below. */
955                         atomic_dec(&ppd->pkeyrefs[i]);
956                         any++;
957                 }
958                 /*
959                  * It makes no sense to have both the limited and unlimited
960                  * PKEY set at the same time since the unlimited one will
961                  * disable the limited one.
962                  */
963                 if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
964                         ret = -EEXIST;
965                         goto bail;
966                 }
967         }
968         if (!any) {
969                 ret = -EBUSY;
970                 goto bail;
971         }
972         for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
973                 if (!ppd->pkeys[i] &&
974                     atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
975                         /* for qibstats, etc. */
976                         ppd->pkeys[i] = key;
977                         ret = 1;
978                         goto bail;
979                 }
980         }
981         ret = -EBUSY;
982
983 bail:
984         return ret;
985 }
986
987 /**
988  * set_pkeys - set the PKEY table for ctxt 0
989  * @dd: the qlogic_ib device
990  * @port: the IB port number
991  * @pkeys: the PKEY table
992  */
993 static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
994 {
995         struct qib_pportdata *ppd;
996         struct qib_ctxtdata *rcd;
997         int i;
998         int changed = 0;
999
1000         /*
1001          * IB port one/two always maps to context zero/one,
1002          * always a kernel context, no locking needed
1003          * If we get here with ppd setup, no need to check
1004          * that rcd is valid.
1005          */
1006         ppd = dd->pport + (port - 1);
1007         rcd = dd->rcd[ppd->hw_pidx];
1008
1009         for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
1010                 u16 key = pkeys[i];
1011                 u16 okey = rcd->pkeys[i];
1012
1013                 if (key == okey)
1014                         continue;
1015                 /*
1016                  * The value of this PKEY table entry is changing.
1017                  * Remove the old entry in the hardware's array of PKEYs.
1018                  */
1019                 if (okey & 0x7FFF)
1020                         changed |= rm_pkey(ppd, okey);
1021                 if (key & 0x7FFF) {
1022                         int ret = add_pkey(ppd, key);
1023
1024                         if (ret < 0)
1025                                 key = 0;
1026                         else
1027                                 changed |= ret;
1028                 }
1029                 rcd->pkeys[i] = key;
1030         }
1031         if (changed) {
1032                 struct ib_event event;
1033
1034                 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
1035
1036                 event.event = IB_EVENT_PKEY_CHANGE;
1037                 event.device = &dd->verbs_dev.rdi.ibdev;
1038                 event.element.port_num = port;
1039                 ib_dispatch_event(&event);
1040         }
1041         return 0;
1042 }
1043
1044 static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
1045                               u8 port)
1046 {
1047         u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
1048         __be16 *p = (__be16 *) smp->data;
1049         u16 *q = (u16 *) smp->data;
1050         struct qib_devdata *dd = dd_from_ibdev(ibdev);
1051         unsigned i, n = qib_get_npkeys(dd);
1052
1053         for (i = 0; i < n; i++)
1054                 q[i] = be16_to_cpu(p[i]);
1055
1056         if (startpx != 0 || set_pkeys(dd, port, q) != 0)
1057                 smp->status |= IB_SMP_INVALID_FIELD;
1058
1059         return subn_get_pkeytable(smp, ibdev, port);
1060 }
1061
1062 static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1063                              u8 port)
1064 {
1065         struct qib_ibport *ibp = to_iport(ibdev, port);
1066         u8 *p = (u8 *) smp->data;
1067         unsigned i;
1068
1069         memset(smp->data, 0, sizeof(smp->data));
1070
1071         if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP))
1072                 smp->status |= IB_SMP_UNSUP_METHOD;
1073         else
1074                 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
1075                         *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];
1076
1077         return reply(smp);
1078 }
1079
1080 static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1081                              u8 port)
1082 {
1083         struct qib_ibport *ibp = to_iport(ibdev, port);
1084         u8 *p = (u8 *) smp->data;
1085         unsigned i;
1086
1087         if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP)) {
1088                 smp->status |= IB_SMP_UNSUP_METHOD;
1089                 return reply(smp);
1090         }
1091
1092         for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {
1093                 ibp->sl_to_vl[i] = *p >> 4;
1094                 ibp->sl_to_vl[i + 1] = *p & 0xF;
1095         }
1096         qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)),
1097                             _QIB_EVENT_SL2VL_CHANGE_BIT);
1098
1099         return subn_get_sl_to_vl(smp, ibdev, port);
1100 }
1101
1102 static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
1103                            u8 port)
1104 {
1105         unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
1106         struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1107
1108         memset(smp->data, 0, sizeof(smp->data));
1109
1110         if (ppd->vls_supported == IB_VL_VL0)
1111                 smp->status |= IB_SMP_UNSUP_METHOD;
1112         else if (which == IB_VLARB_LOWPRI_0_31)
1113                 (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
1114                                                    smp->data);
1115         else if (which == IB_VLARB_HIGHPRI_0_31)
1116                 (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
1117                                                    smp->data);
1118         else
1119                 smp->status |= IB_SMP_INVALID_FIELD;
1120
1121         return reply(smp);
1122 }
1123
1124 static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
1125                            u8 port)
1126 {
1127         unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
1128         struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1129
1130         if (ppd->vls_supported == IB_VL_VL0)
1131                 smp->status |= IB_SMP_UNSUP_METHOD;
1132         else if (which == IB_VLARB_LOWPRI_0_31)
1133                 (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
1134                                                    smp->data);
1135         else if (which == IB_VLARB_HIGHPRI_0_31)
1136                 (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
1137                                                    smp->data);
1138         else
1139                 smp->status |= IB_SMP_INVALID_FIELD;
1140
1141         return subn_get_vl_arb(smp, ibdev, port);
1142 }
1143
1144 static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
1145                              u8 port)
1146 {
1147         /*
1148          * For now, we only send the trap once so no need to process this.
1149          * o13-6, o13-7,
1150          * o14-3.a4 The SMA shall not send any message in response to a valid
1151          * SubnTrapRepress() message.
1152          */
1153         return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1154 }
1155
1156 static int pma_get_classportinfo(struct ib_pma_mad *pmp,
1157                                  struct ib_device *ibdev)
1158 {
1159         struct ib_class_port_info *p =
1160                 (struct ib_class_port_info *)pmp->data;
1161         struct qib_devdata *dd = dd_from_ibdev(ibdev);
1162
1163         memset(pmp->data, 0, sizeof(pmp->data));
1164
1165         if (pmp->mad_hdr.attr_mod != 0)
1166                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1167
1168         /* Note that AllPortSelect is not valid */
1169         p->base_version = 1;
1170         p->class_version = 1;
1171         p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
1172         /*
1173          * Set the most significant bit of CM2 to indicate support for
1174          * congestion statistics
1175          */
1176         ib_set_cpi_capmask2(p,
1177                             dd->psxmitwait_supported <<
1178                             (31 - IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE));
1179         /*
1180          * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
1181          */
1182         ib_set_cpi_resp_time(p, 18);
1183
1184         return reply((struct ib_smp *) pmp);
1185 }
1186
1187 static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
1188                                       struct ib_device *ibdev, u8 port)
1189 {
1190         struct ib_pma_portsamplescontrol *p =
1191                 (struct ib_pma_portsamplescontrol *)pmp->data;
1192         struct qib_ibdev *dev = to_idev(ibdev);
1193         struct qib_devdata *dd = dd_from_dev(dev);
1194         struct qib_ibport *ibp = to_iport(ibdev, port);
1195         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1196         unsigned long flags;
1197         u8 port_select = p->port_select;
1198
1199         memset(pmp->data, 0, sizeof(pmp->data));
1200
1201         p->port_select = port_select;
1202         if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
1203                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1204                 goto bail;
1205         }
1206         spin_lock_irqsave(&ibp->rvp.lock, flags);
1207         p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
1208         p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1209         p->counter_width = 4;   /* 32 bit counters */
1210         p->counter_mask0_9 = COUNTER_MASK0_9;
1211         p->sample_start = cpu_to_be32(ibp->rvp.pma_sample_start);
1212         p->sample_interval = cpu_to_be32(ibp->rvp.pma_sample_interval);
1213         p->tag = cpu_to_be16(ibp->rvp.pma_tag);
1214         p->counter_select[0] = ibp->rvp.pma_counter_select[0];
1215         p->counter_select[1] = ibp->rvp.pma_counter_select[1];
1216         p->counter_select[2] = ibp->rvp.pma_counter_select[2];
1217         p->counter_select[3] = ibp->rvp.pma_counter_select[3];
1218         p->counter_select[4] = ibp->rvp.pma_counter_select[4];
1219         spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1220
1221 bail:
1222         return reply((struct ib_smp *) pmp);
1223 }
1224
1225 static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
1226                                       struct ib_device *ibdev, u8 port)
1227 {
1228         struct ib_pma_portsamplescontrol *p =
1229                 (struct ib_pma_portsamplescontrol *)pmp->data;
1230         struct qib_ibdev *dev = to_idev(ibdev);
1231         struct qib_devdata *dd = dd_from_dev(dev);
1232         struct qib_ibport *ibp = to_iport(ibdev, port);
1233         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1234         unsigned long flags;
1235         u8 status, xmit_flags;
1236         int ret;
1237
1238         if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
1239                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1240                 ret = reply((struct ib_smp *) pmp);
1241                 goto bail;
1242         }
1243
1244         spin_lock_irqsave(&ibp->rvp.lock, flags);
1245
1246         /* Port Sampling code owns the PS* HW counters */
1247         xmit_flags = ppd->cong_stats.flags;
1248         ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE;
1249         status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1250         if (status == IB_PMA_SAMPLE_STATUS_DONE ||
1251             (status == IB_PMA_SAMPLE_STATUS_RUNNING &&
1252              xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
1253                 ibp->rvp.pma_sample_start = be32_to_cpu(p->sample_start);
1254                 ibp->rvp.pma_sample_interval = be32_to_cpu(p->sample_interval);
1255                 ibp->rvp.pma_tag = be16_to_cpu(p->tag);
1256                 ibp->rvp.pma_counter_select[0] = p->counter_select[0];
1257                 ibp->rvp.pma_counter_select[1] = p->counter_select[1];
1258                 ibp->rvp.pma_counter_select[2] = p->counter_select[2];
1259                 ibp->rvp.pma_counter_select[3] = p->counter_select[3];
1260                 ibp->rvp.pma_counter_select[4] = p->counter_select[4];
1261                 dd->f_set_cntr_sample(ppd, ibp->rvp.pma_sample_interval,
1262                                       ibp->rvp.pma_sample_start);
1263         }
1264         spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1265
1266         ret = pma_get_portsamplescontrol(pmp, ibdev, port);
1267
1268 bail:
1269         return ret;
1270 }
1271
1272 static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd,
1273                        __be16 sel)
1274 {
1275         u64 ret;
1276
1277         switch (sel) {
1278         case IB_PMA_PORT_XMIT_DATA:
1279                 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);
1280                 break;
1281         case IB_PMA_PORT_RCV_DATA:
1282                 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);
1283                 break;
1284         case IB_PMA_PORT_XMIT_PKTS:
1285                 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);
1286                 break;
1287         case IB_PMA_PORT_RCV_PKTS:
1288                 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);
1289                 break;
1290         case IB_PMA_PORT_XMIT_WAIT:
1291                 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);
1292                 break;
1293         default:
1294                 ret = 0;
1295         }
1296
1297         return ret;
1298 }
1299
1300 /* This function assumes that the xmit_wait lock is already held */
1301 static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd)
1302 {
1303         u32 delta;
1304
1305         delta = get_counter(&ppd->ibport_data, ppd,
1306                             IB_PMA_PORT_XMIT_WAIT);
1307         return ppd->cong_stats.counter + delta;
1308 }
1309
1310 static void cache_hw_sample_counters(struct qib_pportdata *ppd)
1311 {
1312         struct qib_ibport *ibp = &ppd->ibport_data;
1313
1314         ppd->cong_stats.counter_cache.psxmitdata =
1315                 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA);
1316         ppd->cong_stats.counter_cache.psrcvdata =
1317                 get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA);
1318         ppd->cong_stats.counter_cache.psxmitpkts =
1319                 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS);
1320         ppd->cong_stats.counter_cache.psrcvpkts =
1321                 get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS);
1322         ppd->cong_stats.counter_cache.psxmitwait =
1323                 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT);
1324 }
1325
1326 static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
1327                                         __be16 sel)
1328 {
1329         u64 ret;
1330
1331         switch (sel) {
1332         case IB_PMA_PORT_XMIT_DATA:
1333                 ret = ppd->cong_stats.counter_cache.psxmitdata;
1334                 break;
1335         case IB_PMA_PORT_RCV_DATA:
1336                 ret = ppd->cong_stats.counter_cache.psrcvdata;
1337                 break;
1338         case IB_PMA_PORT_XMIT_PKTS:
1339                 ret = ppd->cong_stats.counter_cache.psxmitpkts;
1340                 break;
1341         case IB_PMA_PORT_RCV_PKTS:
1342                 ret = ppd->cong_stats.counter_cache.psrcvpkts;
1343                 break;
1344         case IB_PMA_PORT_XMIT_WAIT:
1345                 ret = ppd->cong_stats.counter_cache.psxmitwait;
1346                 break;
1347         default:
1348                 ret = 0;
1349         }
1350
1351         return ret;
1352 }
1353
1354 static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
1355                                      struct ib_device *ibdev, u8 port)
1356 {
1357         struct ib_pma_portsamplesresult *p =
1358                 (struct ib_pma_portsamplesresult *)pmp->data;
1359         struct qib_ibdev *dev = to_idev(ibdev);
1360         struct qib_devdata *dd = dd_from_dev(dev);
1361         struct qib_ibport *ibp = to_iport(ibdev, port);
1362         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1363         unsigned long flags;
1364         u8 status;
1365         int i;
1366
1367         memset(pmp->data, 0, sizeof(pmp->data));
1368         spin_lock_irqsave(&ibp->rvp.lock, flags);
1369         p->tag = cpu_to_be16(ibp->rvp.pma_tag);
1370         if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1371                 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1372         else {
1373                 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1374                 p->sample_status = cpu_to_be16(status);
1375                 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
1376                         cache_hw_sample_counters(ppd);
1377                         ppd->cong_stats.counter =
1378                                 xmit_wait_get_value_delta(ppd);
1379                         dd->f_set_cntr_sample(ppd,
1380                                               QIB_CONG_TIMER_PSINTERVAL, 0);
1381                         ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1382                 }
1383         }
1384         for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
1385                 p->counter[i] = cpu_to_be32(
1386                         get_cache_hw_sample_counters(
1387                                 ppd, ibp->rvp.pma_counter_select[i]));
1388         spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1389
1390         return reply((struct ib_smp *) pmp);
1391 }
1392
1393 static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
1394                                          struct ib_device *ibdev, u8 port)
1395 {
1396         struct ib_pma_portsamplesresult_ext *p =
1397                 (struct ib_pma_portsamplesresult_ext *)pmp->data;
1398         struct qib_ibdev *dev = to_idev(ibdev);
1399         struct qib_devdata *dd = dd_from_dev(dev);
1400         struct qib_ibport *ibp = to_iport(ibdev, port);
1401         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1402         unsigned long flags;
1403         u8 status;
1404         int i;
1405
1406         /* Port Sampling code owns the PS* HW counters */
1407         memset(pmp->data, 0, sizeof(pmp->data));
1408         spin_lock_irqsave(&ibp->rvp.lock, flags);
1409         p->tag = cpu_to_be16(ibp->rvp.pma_tag);
1410         if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1411                 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1412         else {
1413                 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1414                 p->sample_status = cpu_to_be16(status);
1415                 /* 64 bits */
1416                 p->extended_width = cpu_to_be32(0x80000000);
1417                 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
1418                         cache_hw_sample_counters(ppd);
1419                         ppd->cong_stats.counter =
1420                                 xmit_wait_get_value_delta(ppd);
1421                         dd->f_set_cntr_sample(ppd,
1422                                               QIB_CONG_TIMER_PSINTERVAL, 0);
1423                         ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1424                 }
1425         }
1426         for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
1427                 p->counter[i] = cpu_to_be64(
1428                         get_cache_hw_sample_counters(
1429                                 ppd, ibp->rvp.pma_counter_select[i]));
1430         spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1431
1432         return reply((struct ib_smp *) pmp);
1433 }
1434
1435 static int pma_get_portcounters(struct ib_pma_mad *pmp,
1436                                 struct ib_device *ibdev, u8 port)
1437 {
1438         struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1439                 pmp->data;
1440         struct qib_ibport *ibp = to_iport(ibdev, port);
1441         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1442         struct qib_verbs_counters cntrs;
1443         u8 port_select = p->port_select;
1444
1445         qib_get_counters(ppd, &cntrs);
1446
1447         /* Adjust counters for any resets done. */
1448         cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
1449         cntrs.link_error_recovery_counter -=
1450                 ibp->z_link_error_recovery_counter;
1451         cntrs.link_downed_counter -= ibp->z_link_downed_counter;
1452         cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
1453         cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors;
1454         cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
1455         cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1456         cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1457         cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
1458         cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
1459         cntrs.local_link_integrity_errors -=
1460                 ibp->z_local_link_integrity_errors;
1461         cntrs.excessive_buffer_overrun_errors -=
1462                 ibp->z_excessive_buffer_overrun_errors;
1463         cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1464         cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
1465
1466         memset(pmp->data, 0, sizeof(pmp->data));
1467
1468         p->port_select = port_select;
1469         if (pmp->mad_hdr.attr_mod != 0 || port_select != port)
1470                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1471
1472         if (cntrs.symbol_error_counter > 0xFFFFUL)
1473                 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1474         else
1475                 p->symbol_error_counter =
1476                         cpu_to_be16((u16)cntrs.symbol_error_counter);
1477         if (cntrs.link_error_recovery_counter > 0xFFUL)
1478                 p->link_error_recovery_counter = 0xFF;
1479         else
1480                 p->link_error_recovery_counter =
1481                         (u8)cntrs.link_error_recovery_counter;
1482         if (cntrs.link_downed_counter > 0xFFUL)
1483                 p->link_downed_counter = 0xFF;
1484         else
1485                 p->link_downed_counter = (u8)cntrs.link_downed_counter;
1486         if (cntrs.port_rcv_errors > 0xFFFFUL)
1487                 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1488         else
1489                 p->port_rcv_errors =
1490                         cpu_to_be16((u16) cntrs.port_rcv_errors);
1491         if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1492                 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1493         else
1494                 p->port_rcv_remphys_errors =
1495                         cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
1496         if (cntrs.port_xmit_discards > 0xFFFFUL)
1497                 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1498         else
1499                 p->port_xmit_discards =
1500                         cpu_to_be16((u16)cntrs.port_xmit_discards);
1501         if (cntrs.local_link_integrity_errors > 0xFUL)
1502                 cntrs.local_link_integrity_errors = 0xFUL;
1503         if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1504                 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1505         p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
1506                 cntrs.excessive_buffer_overrun_errors;
1507         if (cntrs.vl15_dropped > 0xFFFFUL)
1508                 p->vl15_dropped = cpu_to_be16(0xFFFF);
1509         else
1510                 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1511         if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1512                 p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
1513         else
1514                 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
1515         if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
1516                 p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
1517         else
1518                 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
1519         if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
1520                 p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
1521         else
1522                 p->port_xmit_packets =
1523                         cpu_to_be32((u32)cntrs.port_xmit_packets);
1524         if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
1525                 p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
1526         else
1527                 p->port_rcv_packets =
1528                         cpu_to_be32((u32) cntrs.port_rcv_packets);
1529
1530         return reply((struct ib_smp *) pmp);
1531 }
1532
1533 static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
1534                                      struct ib_device *ibdev, u8 port)
1535 {
1536         /* Congestion PMA packets start at offset 24 not 64 */
1537         struct ib_pma_portcounters_cong *p =
1538                 (struct ib_pma_portcounters_cong *)pmp->reserved;
1539         struct qib_verbs_counters cntrs;
1540         struct qib_ibport *ibp = to_iport(ibdev, port);
1541         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1542         struct qib_devdata *dd = dd_from_ppd(ppd);
1543         u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF;
1544         u64 xmit_wait_counter;
1545         unsigned long flags;
1546
1547         /*
1548          * This check is performed only in the GET method because the
1549          * SET method ends up calling this anyway.
1550          */
1551         if (!dd->psxmitwait_supported)
1552                 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
1553         if (port_select != port)
1554                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1555
1556         qib_get_counters(ppd, &cntrs);
1557         spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
1558         xmit_wait_counter = xmit_wait_get_value_delta(ppd);
1559         spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
1560
1561         /* Adjust counters for any resets done. */
1562         cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
1563         cntrs.link_error_recovery_counter -=
1564                 ibp->z_link_error_recovery_counter;
1565         cntrs.link_downed_counter -= ibp->z_link_downed_counter;
1566         cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
1567         cntrs.port_rcv_remphys_errors -=
1568                 ibp->z_port_rcv_remphys_errors;
1569         cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
1570         cntrs.local_link_integrity_errors -=
1571                 ibp->z_local_link_integrity_errors;
1572         cntrs.excessive_buffer_overrun_errors -=
1573                 ibp->z_excessive_buffer_overrun_errors;
1574         cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1575         cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
1576         cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1577         cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1578         cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
1579         cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
1580
1581         memset(pmp->reserved, 0, sizeof(pmp->reserved) +
1582                sizeof(pmp->data));
1583
1584         /*
1585          * Set top 3 bits to indicate interval in picoseconds in
1586          * remaining bits.
1587          */
1588         p->port_check_rate =
1589                 cpu_to_be16((QIB_XMIT_RATE_PICO << 13) |
1590                             (dd->psxmitwait_check_rate &
1591                              ~(QIB_XMIT_RATE_PICO << 13)));
1592         p->port_adr_events = cpu_to_be64(0);
1593         p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);
1594         p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);
1595         p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);
1596         p->port_xmit_packets =
1597                 cpu_to_be64(cntrs.port_xmit_packets);
1598         p->port_rcv_packets =
1599                 cpu_to_be64(cntrs.port_rcv_packets);
1600         if (cntrs.symbol_error_counter > 0xFFFFUL)
1601                 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1602         else
1603                 p->symbol_error_counter =
1604                         cpu_to_be16(
1605                                 (u16)cntrs.symbol_error_counter);
1606         if (cntrs.link_error_recovery_counter > 0xFFUL)
1607                 p->link_error_recovery_counter = 0xFF;
1608         else
1609                 p->link_error_recovery_counter =
1610                         (u8)cntrs.link_error_recovery_counter;
1611         if (cntrs.link_downed_counter > 0xFFUL)
1612                 p->link_downed_counter = 0xFF;
1613         else
1614                 p->link_downed_counter =
1615                         (u8)cntrs.link_downed_counter;
1616         if (cntrs.port_rcv_errors > 0xFFFFUL)
1617                 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1618         else
1619                 p->port_rcv_errors =
1620                         cpu_to_be16((u16) cntrs.port_rcv_errors);
1621         if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1622                 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1623         else
1624                 p->port_rcv_remphys_errors =
1625                         cpu_to_be16(
1626                                 (u16)cntrs.port_rcv_remphys_errors);
1627         if (cntrs.port_xmit_discards > 0xFFFFUL)
1628                 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1629         else
1630                 p->port_xmit_discards =
1631                         cpu_to_be16((u16)cntrs.port_xmit_discards);
1632         if (cntrs.local_link_integrity_errors > 0xFUL)
1633                 cntrs.local_link_integrity_errors = 0xFUL;
1634         if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1635                 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1636         p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
1637                 cntrs.excessive_buffer_overrun_errors;
1638         if (cntrs.vl15_dropped > 0xFFFFUL)
1639                 p->vl15_dropped = cpu_to_be16(0xFFFF);
1640         else
1641                 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1642
1643         return reply((struct ib_smp *)pmp);
1644 }
1645
1646 static void qib_snapshot_pmacounters(
1647         struct qib_ibport *ibp,
1648         struct qib_pma_counters *pmacounters)
1649 {
1650         struct qib_pma_counters *p;
1651         int cpu;
1652
1653         memset(pmacounters, 0, sizeof(*pmacounters));
1654         for_each_possible_cpu(cpu) {
1655                 p = per_cpu_ptr(ibp->pmastats, cpu);
1656                 pmacounters->n_unicast_xmit += p->n_unicast_xmit;
1657                 pmacounters->n_unicast_rcv += p->n_unicast_rcv;
1658                 pmacounters->n_multicast_xmit += p->n_multicast_xmit;
1659                 pmacounters->n_multicast_rcv += p->n_multicast_rcv;
1660         }
1661 }
1662
1663 static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
1664                                     struct ib_device *ibdev, u8 port)
1665 {
1666         struct ib_pma_portcounters_ext *p =
1667                 (struct ib_pma_portcounters_ext *)pmp->data;
1668         struct qib_ibport *ibp = to_iport(ibdev, port);
1669         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1670         u64 swords, rwords, spkts, rpkts, xwait;
1671         struct qib_pma_counters pma;
1672         u8 port_select = p->port_select;
1673
1674         memset(pmp->data, 0, sizeof(pmp->data));
1675
1676         p->port_select = port_select;
1677         if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
1678                 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1679                 goto bail;
1680         }
1681
1682         qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
1683
1684         /* Adjust counters for any resets done. */
1685         swords -= ibp->z_port_xmit_data;
1686         rwords -= ibp->z_port_rcv_data;
1687         spkts -= ibp->z_port_xmit_packets;
1688         rpkts -= ibp->z_port_rcv_packets;
1689
1690         p->port_xmit_data = cpu_to_be64(swords);
1691         p->port_rcv_data = cpu_to_be64(rwords);
1692         p->port_xmit_packets = cpu_to_be64(spkts);
1693         p->port_rcv_packets = cpu_to_be64(rpkts);
1694
1695         qib_snapshot_pmacounters(ibp, &pma);
1696
1697         p->port_unicast_xmit_packets = cpu_to_be64(pma.n_unicast_xmit
1698                 - ibp->z_unicast_xmit);
1699         p->port_unicast_rcv_packets = cpu_to_be64(pma.n_unicast_rcv
1700                 - ibp->z_unicast_rcv);
1701         p->port_multicast_xmit_packets = cpu_to_be64(pma.n_multicast_xmit
1702                 - ibp->z_multicast_xmit);
1703         p->port_multicast_rcv_packets = cpu_to_be64(pma.n_multicast_rcv
1704                 - ibp->z_multicast_rcv);
1705
1706 bail:
1707         return reply((struct ib_smp *) pmp);
1708 }
1709
1710 static int pma_set_portcounters(struct ib_pma_mad *pmp,
1711                                 struct ib_device *ibdev, u8 port)
1712 {
1713         struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1714                 pmp->data;
1715         struct qib_ibport *ibp = to_iport(ibdev, port);
1716         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1717         struct qib_verbs_counters cntrs;
1718
1719         /*
1720          * Since the HW doesn't support clearing counters, we save the
1721          * current count and subtract it from future responses.
1722          */
1723         qib_get_counters(ppd, &cntrs);
1724
1725         if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
1726                 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1727
1728         if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
1729                 ibp->z_link_error_recovery_counter =
1730                         cntrs.link_error_recovery_counter;
1731
1732         if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
1733                 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1734
1735         if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
1736                 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1737
1738         if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
1739                 ibp->z_port_rcv_remphys_errors =
1740                         cntrs.port_rcv_remphys_errors;
1741
1742         if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
1743                 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1744
1745         if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
1746                 ibp->z_local_link_integrity_errors =
1747                         cntrs.local_link_integrity_errors;
1748
1749         if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
1750                 ibp->z_excessive_buffer_overrun_errors =
1751                         cntrs.excessive_buffer_overrun_errors;
1752
1753         if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
1754                 ibp->rvp.n_vl15_dropped = 0;
1755                 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1756         }
1757
1758         if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
1759                 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1760
1761         if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
1762                 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1763
1764         if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
1765                 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1766
1767         if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
1768                 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1769
1770         return pma_get_portcounters(pmp, ibdev, port);
1771 }
1772
1773 static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
1774                                      struct ib_device *ibdev, u8 port)
1775 {
1776         struct qib_ibport *ibp = to_iport(ibdev, port);
1777         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1778         struct qib_devdata *dd = dd_from_ppd(ppd);
1779         struct qib_verbs_counters cntrs;
1780         u32 counter_select = (be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24) & 0xFF;
1781         int ret = 0;
1782         unsigned long flags;
1783
1784         qib_get_counters(ppd, &cntrs);
1785         /* Get counter values before we save them */
1786         ret = pma_get_portcounters_cong(pmp, ibdev, port);
1787
1788         if (counter_select & IB_PMA_SEL_CONG_XMIT) {
1789                 spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
1790                 ppd->cong_stats.counter = 0;
1791                 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
1792                                       0x0);
1793                 spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
1794         }
1795         if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
1796                 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1797                 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1798                 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1799                 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1800         }
1801         if (counter_select & IB_PMA_SEL_CONG_ALL) {
1802                 ibp->z_symbol_error_counter =
1803                         cntrs.symbol_error_counter;
1804                 ibp->z_link_error_recovery_counter =
1805                         cntrs.link_error_recovery_counter;
1806                 ibp->z_link_downed_counter =
1807                         cntrs.link_downed_counter;
1808                 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1809                 ibp->z_port_rcv_remphys_errors =
1810                         cntrs.port_rcv_remphys_errors;
1811                 ibp->z_port_xmit_discards =
1812                         cntrs.port_xmit_discards;
1813                 ibp->z_local_link_integrity_errors =
1814                         cntrs.local_link_integrity_errors;
1815                 ibp->z_excessive_buffer_overrun_errors =
1816                         cntrs.excessive_buffer_overrun_errors;
1817                 ibp->rvp.n_vl15_dropped = 0;
1818                 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1819         }
1820
1821         return ret;
1822 }
1823
1824 static int pma_set_portcounters_ext(struct ib_pma_mad *pmp,
1825                                     struct ib_device *ibdev, u8 port)
1826 {
1827         struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1828                 pmp->data;
1829         struct qib_ibport *ibp = to_iport(ibdev, port);
1830         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1831         u64 swords, rwords, spkts, rpkts, xwait;
1832         struct qib_pma_counters pma;
1833
1834         qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
1835
1836         if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
1837                 ibp->z_port_xmit_data = swords;
1838
1839         if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
1840                 ibp->z_port_rcv_data = rwords;
1841
1842         if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
1843                 ibp->z_port_xmit_packets = spkts;
1844
1845         if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
1846                 ibp->z_port_rcv_packets = rpkts;
1847
1848         qib_snapshot_pmacounters(ibp, &pma);
1849
1850         if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
1851                 ibp->z_unicast_xmit = pma.n_unicast_xmit;
1852
1853         if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
1854                 ibp->z_unicast_rcv = pma.n_unicast_rcv;
1855
1856         if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
1857                 ibp->z_multicast_xmit = pma.n_multicast_xmit;
1858
1859         if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
1860                 ibp->z_multicast_rcv = pma.n_multicast_rcv;
1861
1862         return pma_get_portcounters_ext(pmp, ibdev, port);
1863 }
1864
1865 static int process_subn(struct ib_device *ibdev, int mad_flags,
1866                         u8 port, const struct ib_mad *in_mad,
1867                         struct ib_mad *out_mad)
1868 {
1869         struct ib_smp *smp = (struct ib_smp *)out_mad;
1870         struct qib_ibport *ibp = to_iport(ibdev, port);
1871         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1872         int ret;
1873
1874         *out_mad = *in_mad;
1875         if (smp->class_version != 1) {
1876                 smp->status |= IB_SMP_UNSUP_VERSION;
1877                 ret = reply(smp);
1878                 goto bail;
1879         }
1880
1881         ret = check_mkey(ibp, smp, mad_flags);
1882         if (ret) {
1883                 u32 port_num = be32_to_cpu(smp->attr_mod);
1884
1885                 /*
1886                  * If this is a get/set portinfo, we already check the
1887                  * M_Key if the MAD is for another port and the M_Key
1888                  * is OK on the receiving port. This check is needed
1889                  * to increment the error counters when the M_Key
1890                  * fails to match on *both* ports.
1891                  */
1892                 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
1893                     (smp->method == IB_MGMT_METHOD_GET ||
1894                      smp->method == IB_MGMT_METHOD_SET) &&
1895                     port_num && port_num <= ibdev->phys_port_cnt &&
1896                     port != port_num)
1897                         (void) check_mkey(to_iport(ibdev, port_num), smp, 0);
1898                 ret = IB_MAD_RESULT_FAILURE;
1899                 goto bail;
1900         }
1901
1902         switch (smp->method) {
1903         case IB_MGMT_METHOD_GET:
1904                 switch (smp->attr_id) {
1905                 case IB_SMP_ATTR_NODE_DESC:
1906                         ret = subn_get_nodedescription(smp, ibdev);
1907                         goto bail;
1908                 case IB_SMP_ATTR_NODE_INFO:
1909                         ret = subn_get_nodeinfo(smp, ibdev, port);
1910                         goto bail;
1911                 case IB_SMP_ATTR_GUID_INFO:
1912                         ret = subn_get_guidinfo(smp, ibdev, port);
1913                         goto bail;
1914                 case IB_SMP_ATTR_PORT_INFO:
1915                         ret = subn_get_portinfo(smp, ibdev, port);
1916                         goto bail;
1917                 case IB_SMP_ATTR_PKEY_TABLE:
1918                         ret = subn_get_pkeytable(smp, ibdev, port);
1919                         goto bail;
1920                 case IB_SMP_ATTR_SL_TO_VL_TABLE:
1921                         ret = subn_get_sl_to_vl(smp, ibdev, port);
1922                         goto bail;
1923                 case IB_SMP_ATTR_VL_ARB_TABLE:
1924                         ret = subn_get_vl_arb(smp, ibdev, port);
1925                         goto bail;
1926                 case IB_SMP_ATTR_SM_INFO:
1927                         if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
1928                                 ret = IB_MAD_RESULT_SUCCESS |
1929                                         IB_MAD_RESULT_CONSUMED;
1930                                 goto bail;
1931                         }
1932                         if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
1933                                 ret = IB_MAD_RESULT_SUCCESS;
1934                                 goto bail;
1935                         }
1936                         /* FALLTHROUGH */
1937                 default:
1938                         smp->status |= IB_SMP_UNSUP_METH_ATTR;
1939                         ret = reply(smp);
1940                         goto bail;
1941                 }
1942
1943         case IB_MGMT_METHOD_SET:
1944                 switch (smp->attr_id) {
1945                 case IB_SMP_ATTR_GUID_INFO:
1946                         ret = subn_set_guidinfo(smp, ibdev, port);
1947                         goto bail;
1948                 case IB_SMP_ATTR_PORT_INFO:
1949                         ret = subn_set_portinfo(smp, ibdev, port);
1950                         goto bail;
1951                 case IB_SMP_ATTR_PKEY_TABLE:
1952                         ret = subn_set_pkeytable(smp, ibdev, port);
1953                         goto bail;
1954                 case IB_SMP_ATTR_SL_TO_VL_TABLE:
1955                         ret = subn_set_sl_to_vl(smp, ibdev, port);
1956                         goto bail;
1957                 case IB_SMP_ATTR_VL_ARB_TABLE:
1958                         ret = subn_set_vl_arb(smp, ibdev, port);
1959                         goto bail;
1960                 case IB_SMP_ATTR_SM_INFO:
1961                         if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
1962                                 ret = IB_MAD_RESULT_SUCCESS |
1963                                         IB_MAD_RESULT_CONSUMED;
1964                                 goto bail;
1965                         }
1966                         if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
1967                                 ret = IB_MAD_RESULT_SUCCESS;
1968                                 goto bail;
1969                         }
1970                         /* FALLTHROUGH */
1971                 default:
1972                         smp->status |= IB_SMP_UNSUP_METH_ATTR;
1973                         ret = reply(smp);
1974                         goto bail;
1975                 }
1976
1977         case IB_MGMT_METHOD_TRAP_REPRESS:
1978                 if (smp->attr_id == IB_SMP_ATTR_NOTICE)
1979                         ret = subn_trap_repress(smp, ibdev, port);
1980                 else {
1981                         smp->status |= IB_SMP_UNSUP_METH_ATTR;
1982                         ret = reply(smp);
1983                 }
1984                 goto bail;
1985
1986         case IB_MGMT_METHOD_TRAP:
1987         case IB_MGMT_METHOD_REPORT:
1988         case IB_MGMT_METHOD_REPORT_RESP:
1989         case IB_MGMT_METHOD_GET_RESP:
1990                 /*
1991                  * The ib_mad module will call us to process responses
1992                  * before checking for other consumers.
1993                  * Just tell the caller to process it normally.
1994                  */
1995                 ret = IB_MAD_RESULT_SUCCESS;
1996                 goto bail;
1997
1998         case IB_MGMT_METHOD_SEND:
1999                 if (ib_get_smp_direction(smp) &&
2000                     smp->attr_id == QIB_VENDOR_IPG) {
2001                         ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,
2002                                               smp->data[0]);
2003                         ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
2004                 } else
2005                         ret = IB_MAD_RESULT_SUCCESS;
2006                 goto bail;
2007
2008         default:
2009                 smp->status |= IB_SMP_UNSUP_METHOD;
2010                 ret = reply(smp);
2011         }
2012
2013 bail:
2014         return ret;
2015 }
2016
2017 static int process_perf(struct ib_device *ibdev, u8 port,
2018                         const struct ib_mad *in_mad,
2019                         struct ib_mad *out_mad)
2020 {
2021         struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
2022         int ret;
2023
2024         *out_mad = *in_mad;
2025         if (pmp->mad_hdr.class_version != 1) {
2026                 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
2027                 ret = reply((struct ib_smp *) pmp);
2028                 goto bail;
2029         }
2030
2031         switch (pmp->mad_hdr.method) {
2032         case IB_MGMT_METHOD_GET:
2033                 switch (pmp->mad_hdr.attr_id) {
2034                 case IB_PMA_CLASS_PORT_INFO:
2035                         ret = pma_get_classportinfo(pmp, ibdev);
2036                         goto bail;
2037                 case IB_PMA_PORT_SAMPLES_CONTROL:
2038                         ret = pma_get_portsamplescontrol(pmp, ibdev, port);
2039                         goto bail;
2040                 case IB_PMA_PORT_SAMPLES_RESULT:
2041                         ret = pma_get_portsamplesresult(pmp, ibdev, port);
2042                         goto bail;
2043                 case IB_PMA_PORT_SAMPLES_RESULT_EXT:
2044                         ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);
2045                         goto bail;
2046                 case IB_PMA_PORT_COUNTERS:
2047                         ret = pma_get_portcounters(pmp, ibdev, port);
2048                         goto bail;
2049                 case IB_PMA_PORT_COUNTERS_EXT:
2050                         ret = pma_get_portcounters_ext(pmp, ibdev, port);
2051                         goto bail;
2052                 case IB_PMA_PORT_COUNTERS_CONG:
2053                         ret = pma_get_portcounters_cong(pmp, ibdev, port);
2054                         goto bail;
2055                 default:
2056                         pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
2057                         ret = reply((struct ib_smp *) pmp);
2058                         goto bail;
2059                 }
2060
2061         case IB_MGMT_METHOD_SET:
2062                 switch (pmp->mad_hdr.attr_id) {
2063                 case IB_PMA_PORT_SAMPLES_CONTROL:
2064                         ret = pma_set_portsamplescontrol(pmp, ibdev, port);
2065                         goto bail;
2066                 case IB_PMA_PORT_COUNTERS:
2067                         ret = pma_set_portcounters(pmp, ibdev, port);
2068                         goto bail;
2069                 case IB_PMA_PORT_COUNTERS_EXT:
2070                         ret = pma_set_portcounters_ext(pmp, ibdev, port);
2071                         goto bail;
2072                 case IB_PMA_PORT_COUNTERS_CONG:
2073                         ret = pma_set_portcounters_cong(pmp, ibdev, port);
2074                         goto bail;
2075                 default:
2076                         pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
2077                         ret = reply((struct ib_smp *) pmp);
2078                         goto bail;
2079                 }
2080
2081         case IB_MGMT_METHOD_TRAP:
2082         case IB_MGMT_METHOD_GET_RESP:
2083                 /*
2084                  * The ib_mad module will call us to process responses
2085                  * before checking for other consumers.
2086                  * Just tell the caller to process it normally.
2087                  */
2088                 ret = IB_MAD_RESULT_SUCCESS;
2089                 goto bail;
2090
2091         default:
2092                 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
2093                 ret = reply((struct ib_smp *) pmp);
2094         }
2095
2096 bail:
2097         return ret;
2098 }
2099
2100 static int cc_get_classportinfo(struct ib_cc_mad *ccp,
2101                                 struct ib_device *ibdev)
2102 {
2103         struct ib_cc_classportinfo_attr *p =
2104                 (struct ib_cc_classportinfo_attr *)ccp->mgmt_data;
2105
2106         memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
2107
2108         p->base_version = 1;
2109         p->class_version = 1;
2110         p->cap_mask = 0;
2111
2112         /*
2113          * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
2114          */
2115         p->resp_time_value = 18;
2116
2117         return reply((struct ib_smp *) ccp);
2118 }
2119
2120 static int cc_get_congestion_info(struct ib_cc_mad *ccp,
2121                                 struct ib_device *ibdev, u8 port)
2122 {
2123         struct ib_cc_info_attr *p =
2124                 (struct ib_cc_info_attr *)ccp->mgmt_data;
2125         struct qib_ibport *ibp = to_iport(ibdev, port);
2126         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2127
2128         memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
2129
2130         p->congestion_info = 0;
2131         p->control_table_cap = ppd->cc_max_table_entries;
2132
2133         return reply((struct ib_smp *) ccp);
2134 }
2135
2136 static int cc_get_congestion_setting(struct ib_cc_mad *ccp,
2137                                 struct ib_device *ibdev, u8 port)
2138 {
2139         int i;
2140         struct ib_cc_congestion_setting_attr *p =
2141                 (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
2142         struct qib_ibport *ibp = to_iport(ibdev, port);
2143         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2144         struct ib_cc_congestion_entry_shadow *entries;
2145
2146         memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
2147
2148         spin_lock(&ppd->cc_shadow_lock);
2149
2150         entries = ppd->congestion_entries_shadow->entries;
2151         p->port_control = cpu_to_be16(
2152                 ppd->congestion_entries_shadow->port_control);
2153         p->control_map = cpu_to_be16(
2154                 ppd->congestion_entries_shadow->control_map);
2155         for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
2156                 p->entries[i].ccti_increase = entries[i].ccti_increase;
2157                 p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
2158                 p->entries[i].trigger_threshold = entries[i].trigger_threshold;
2159                 p->entries[i].ccti_min = entries[i].ccti_min;
2160         }
2161
2162         spin_unlock(&ppd->cc_shadow_lock);
2163
2164         return reply((struct ib_smp *) ccp);
2165 }
2166
2167 static int cc_get_congestion_control_table(struct ib_cc_mad *ccp,
2168                                 struct ib_device *ibdev, u8 port)
2169 {
2170         struct ib_cc_table_attr *p =
2171                 (struct ib_cc_table_attr *)ccp->mgmt_data;
2172         struct qib_ibport *ibp = to_iport(ibdev, port);
2173         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2174         u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
2175         u32 max_cct_block;
2176         u32 cct_entry;
2177         struct ib_cc_table_entry_shadow *entries;
2178         int i;
2179
2180         /* Is the table index more than what is supported? */
2181         if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
2182                 goto bail;
2183
2184         memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
2185
2186         spin_lock(&ppd->cc_shadow_lock);
2187
2188         max_cct_block =
2189                 (ppd->ccti_entries_shadow->ccti_last_entry + 1)/IB_CCT_ENTRIES;
2190         max_cct_block = max_cct_block ? max_cct_block - 1 : 0;
2191
2192         if (cct_block_index > max_cct_block) {
2193                 spin_unlock(&ppd->cc_shadow_lock);
2194                 goto bail;
2195         }
2196
2197         ccp->attr_mod = cpu_to_be32(cct_block_index);
2198
2199         cct_entry = IB_CCT_ENTRIES * (cct_block_index + 1);
2200
2201         cct_entry--;
2202
2203         p->ccti_limit = cpu_to_be16(cct_entry);
2204
2205         entries = &ppd->ccti_entries_shadow->
2206                         entries[IB_CCT_ENTRIES * cct_block_index];
2207         cct_entry %= IB_CCT_ENTRIES;
2208
2209         for (i = 0; i <= cct_entry; i++)
2210                 p->ccti_entries[i].entry = cpu_to_be16(entries[i].entry);
2211
2212         spin_unlock(&ppd->cc_shadow_lock);
2213
2214         return reply((struct ib_smp *) ccp);
2215
2216 bail:
2217         return reply_failure((struct ib_smp *) ccp);
2218 }
2219
2220 static int cc_set_congestion_setting(struct ib_cc_mad *ccp,
2221                                 struct ib_device *ibdev, u8 port)
2222 {
2223         struct ib_cc_congestion_setting_attr *p =
2224                 (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
2225         struct qib_ibport *ibp = to_iport(ibdev, port);
2226         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2227         int i;
2228
2229         ppd->cc_sl_control_map = be16_to_cpu(p->control_map);
2230
2231         for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
2232                 ppd->congestion_entries[i].ccti_increase =
2233                         p->entries[i].ccti_increase;
2234
2235                 ppd->congestion_entries[i].ccti_timer =
2236                         be16_to_cpu(p->entries[i].ccti_timer);
2237
2238                 ppd->congestion_entries[i].trigger_threshold =
2239                         p->entries[i].trigger_threshold;
2240
2241                 ppd->congestion_entries[i].ccti_min =
2242                         p->entries[i].ccti_min;
2243         }
2244
2245         return reply((struct ib_smp *) ccp);
2246 }
2247
2248 static int cc_set_congestion_control_table(struct ib_cc_mad *ccp,
2249                                 struct ib_device *ibdev, u8 port)
2250 {
2251         struct ib_cc_table_attr *p =
2252                 (struct ib_cc_table_attr *)ccp->mgmt_data;
2253         struct qib_ibport *ibp = to_iport(ibdev, port);
2254         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2255         u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
2256         u32 cct_entry;
2257         struct ib_cc_table_entry_shadow *entries;
2258         int i;
2259
2260         /* Is the table index more than what is supported? */
2261         if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
2262                 goto bail;
2263
2264         /* If this packet is the first in the sequence then
2265          * zero the total table entry count.
2266          */
2267         if (be16_to_cpu(p->ccti_limit) < IB_CCT_ENTRIES)
2268                 ppd->total_cct_entry = 0;
2269
2270         cct_entry = (be16_to_cpu(p->ccti_limit))%IB_CCT_ENTRIES;
2271
2272         /* ccti_limit is 0 to 63 */
2273         ppd->total_cct_entry += (cct_entry + 1);
2274
2275         if (ppd->total_cct_entry > ppd->cc_supported_table_entries)
2276                 goto bail;
2277
2278         ppd->ccti_limit = be16_to_cpu(p->ccti_limit);
2279
2280         entries = ppd->ccti_entries + (IB_CCT_ENTRIES * cct_block_index);
2281
2282         for (i = 0; i <= cct_entry; i++)
2283                 entries[i].entry = be16_to_cpu(p->ccti_entries[i].entry);
2284
2285         spin_lock(&ppd->cc_shadow_lock);
2286
2287         ppd->ccti_entries_shadow->ccti_last_entry = ppd->total_cct_entry - 1;
2288         memcpy(ppd->ccti_entries_shadow->entries, ppd->ccti_entries,
2289                 (ppd->total_cct_entry * sizeof(struct ib_cc_table_entry)));
2290
2291         ppd->congestion_entries_shadow->port_control = IB_CC_CCS_PC_SL_BASED;
2292         ppd->congestion_entries_shadow->control_map = ppd->cc_sl_control_map;
2293         memcpy(ppd->congestion_entries_shadow->entries, ppd->congestion_entries,
2294                 IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry));
2295
2296         spin_unlock(&ppd->cc_shadow_lock);
2297
2298         return reply((struct ib_smp *) ccp);
2299
2300 bail:
2301         return reply_failure((struct ib_smp *) ccp);
2302 }
2303
2304 static int check_cc_key(struct qib_ibport *ibp,
2305                         struct ib_cc_mad *ccp, int mad_flags)
2306 {
2307         return 0;
2308 }
2309
2310 static int process_cc(struct ib_device *ibdev, int mad_flags,
2311                         u8 port, const struct ib_mad *in_mad,
2312                         struct ib_mad *out_mad)
2313 {
2314         struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad;
2315         struct qib_ibport *ibp = to_iport(ibdev, port);
2316         int ret;
2317
2318         *out_mad = *in_mad;
2319
2320         if (ccp->class_version != 2) {
2321                 ccp->status |= IB_SMP_UNSUP_VERSION;
2322                 ret = reply((struct ib_smp *)ccp);
2323                 goto bail;
2324         }
2325
2326         ret = check_cc_key(ibp, ccp, mad_flags);
2327         if (ret)
2328                 goto bail;
2329
2330         switch (ccp->method) {
2331         case IB_MGMT_METHOD_GET:
2332                 switch (ccp->attr_id) {
2333                 case IB_CC_ATTR_CLASSPORTINFO:
2334                         ret = cc_get_classportinfo(ccp, ibdev);
2335                         goto bail;
2336
2337                 case IB_CC_ATTR_CONGESTION_INFO:
2338                         ret = cc_get_congestion_info(ccp, ibdev, port);
2339                         goto bail;
2340
2341                 case IB_CC_ATTR_CA_CONGESTION_SETTING:
2342                         ret = cc_get_congestion_setting(ccp, ibdev, port);
2343                         goto bail;
2344
2345                 case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
2346                         ret = cc_get_congestion_control_table(ccp, ibdev, port);
2347                         goto bail;
2348
2349                         /* FALLTHROUGH */
2350                 default:
2351                         ccp->status |= IB_SMP_UNSUP_METH_ATTR;
2352                         ret = reply((struct ib_smp *) ccp);
2353                         goto bail;
2354                 }
2355
2356         case IB_MGMT_METHOD_SET:
2357                 switch (ccp->attr_id) {
2358                 case IB_CC_ATTR_CA_CONGESTION_SETTING:
2359                         ret = cc_set_congestion_setting(ccp, ibdev, port);
2360                         goto bail;
2361
2362                 case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
2363                         ret = cc_set_congestion_control_table(ccp, ibdev, port);
2364                         goto bail;
2365
2366                         /* FALLTHROUGH */
2367                 default:
2368                         ccp->status |= IB_SMP_UNSUP_METH_ATTR;
2369                         ret = reply((struct ib_smp *) ccp);
2370                         goto bail;
2371                 }
2372
2373         case IB_MGMT_METHOD_GET_RESP:
2374                 /*
2375                  * The ib_mad module will call us to process responses
2376                  * before checking for other consumers.
2377                  * Just tell the caller to process it normally.
2378                  */
2379                 ret = IB_MAD_RESULT_SUCCESS;
2380                 goto bail;
2381
2382         case IB_MGMT_METHOD_TRAP:
2383         default:
2384                 ccp->status |= IB_SMP_UNSUP_METHOD;
2385                 ret = reply((struct ib_smp *) ccp);
2386         }
2387
2388 bail:
2389         return ret;
2390 }
2391
2392 /**
2393  * qib_process_mad - process an incoming MAD packet
2394  * @ibdev: the infiniband device this packet came in on
2395  * @mad_flags: MAD flags
2396  * @port: the port number this packet came in on
2397  * @in_wc: the work completion entry for this packet
2398  * @in_grh: the global route header for this packet
2399  * @in_mad: the incoming MAD
2400  * @out_mad: any outgoing MAD reply
2401  *
2402  * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
2403  * interested in processing.
2404  *
2405  * Note that the verbs framework has already done the MAD sanity checks,
2406  * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
2407  * MADs.
2408  *
2409  * This is called by the ib_mad module.
2410  */
2411 int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
2412                     const struct ib_wc *in_wc, const struct ib_grh *in_grh,
2413                     const struct ib_mad_hdr *in, size_t in_mad_size,
2414                     struct ib_mad_hdr *out, size_t *out_mad_size,
2415                     u16 *out_mad_pkey_index)
2416 {
2417         int ret;
2418         struct qib_ibport *ibp = to_iport(ibdev, port);
2419         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2420         const struct ib_mad *in_mad = (const struct ib_mad *)in;
2421         struct ib_mad *out_mad = (struct ib_mad *)out;
2422
2423         if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
2424                          *out_mad_size != sizeof(*out_mad)))
2425                 return IB_MAD_RESULT_FAILURE;
2426
2427         switch (in_mad->mad_hdr.mgmt_class) {
2428         case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
2429         case IB_MGMT_CLASS_SUBN_LID_ROUTED:
2430                 ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
2431                 goto bail;
2432
2433         case IB_MGMT_CLASS_PERF_MGMT:
2434                 ret = process_perf(ibdev, port, in_mad, out_mad);
2435                 goto bail;
2436
2437         case IB_MGMT_CLASS_CONG_MGMT:
2438                 if (!ppd->congestion_entries_shadow ||
2439                          !qib_cc_table_size) {
2440                         ret = IB_MAD_RESULT_SUCCESS;
2441                         goto bail;
2442                 }
2443                 ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad);
2444                 goto bail;
2445
2446         default:
2447                 ret = IB_MAD_RESULT_SUCCESS;
2448         }
2449
2450 bail:
2451         return ret;
2452 }
2453
2454 static void xmit_wait_timer_func(unsigned long opaque)
2455 {
2456         struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
2457         struct qib_devdata *dd = dd_from_ppd(ppd);
2458         unsigned long flags;
2459         u8 status;
2460
2461         spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
2462         if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
2463                 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
2464                 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
2465                         /* save counter cache */
2466                         cache_hw_sample_counters(ppd);
2467                         ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
2468                 } else
2469                         goto done;
2470         }
2471         ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
2472         dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
2473 done:
2474         spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
2475         mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
2476 }
2477
2478 void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx)
2479 {
2480         struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
2481         struct qib_devdata *dd = container_of(ibdev,
2482                                               struct qib_devdata, verbs_dev);
2483
2484         /* Initialize xmit_wait structure */
2485         dd->pport[port_idx].cong_stats.counter = 0;
2486         init_timer(&dd->pport[port_idx].cong_stats.timer);
2487         dd->pport[port_idx].cong_stats.timer.function = xmit_wait_timer_func;
2488         dd->pport[port_idx].cong_stats.timer.data =
2489                 (unsigned long)(&dd->pport[port_idx]);
2490         dd->pport[port_idx].cong_stats.timer.expires = 0;
2491         add_timer(&dd->pport[port_idx].cong_stats.timer);
2492 }
2493
2494 void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx)
2495 {
2496         struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
2497         struct qib_devdata *dd = container_of(ibdev,
2498                                               struct qib_devdata, verbs_dev);
2499
2500         if (dd->pport[port_idx].cong_stats.timer.data)
2501                 del_timer_sync(&dd->pport[port_idx].cong_stats.timer);
2502
2503         if (dd->pport[port_idx].ibport_data.smi_ah)
2504                 rdma_destroy_ah(&dd->pport[port_idx].ibport_data.smi_ah->ibah);
2505 }