2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/string.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
42 #include <rdma/ib_verbs.h>
43 #include <rdma/ib_cache.h>
44 #include <rdma/ib_pack.h>
46 #include "mthca_dev.h"
47 #include "mthca_cmd.h"
48 #include "mthca_memfree.h"
49 #include "mthca_wqe.h"
52 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
53 MTHCA_ACK_REQ_FREQ = 10,
54 MTHCA_FLIGHT_LIMIT = 9,
55 MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */
56 MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */
57 MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */
61 MTHCA_QP_STATE_RST = 0,
62 MTHCA_QP_STATE_INIT = 1,
63 MTHCA_QP_STATE_RTR = 2,
64 MTHCA_QP_STATE_RTS = 3,
65 MTHCA_QP_STATE_SQE = 4,
66 MTHCA_QP_STATE_SQD = 5,
67 MTHCA_QP_STATE_ERR = 6,
68 MTHCA_QP_STATE_DRAINING = 7
80 MTHCA_QP_PM_MIGRATED = 0x3,
81 MTHCA_QP_PM_ARMED = 0x0,
82 MTHCA_QP_PM_REARM = 0x1
86 /* qp_context flags */
87 MTHCA_QP_BIT_DE = 1 << 8,
89 MTHCA_QP_BIT_SRE = 1 << 15,
90 MTHCA_QP_BIT_SWE = 1 << 14,
91 MTHCA_QP_BIT_SAE = 1 << 13,
92 MTHCA_QP_BIT_SIC = 1 << 4,
93 MTHCA_QP_BIT_SSC = 1 << 3,
95 MTHCA_QP_BIT_RRE = 1 << 15,
96 MTHCA_QP_BIT_RWE = 1 << 14,
97 MTHCA_QP_BIT_RAE = 1 << 13,
98 MTHCA_QP_BIT_RIC = 1 << 4,
99 MTHCA_QP_BIT_RSC = 1 << 3
103 MTHCA_SEND_DOORBELL_FENCE = 1 << 5
106 struct mthca_qp_path {
115 __be32 sl_tclass_flowlabel;
117 } __attribute__((packed));
119 struct mthca_qp_context {
121 __be32 tavor_sched_queue; /* Reserved on Arbel */
123 u8 rq_size_stride; /* Reserved on Tavor */
124 u8 sq_size_stride; /* Reserved on Tavor */
125 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */
130 struct mthca_qp_path pri_path;
131 struct mthca_qp_path alt_path;
138 __be32 next_send_psn;
140 __be32 snd_wqe_base_l; /* Next send WQE on Tavor */
141 __be32 snd_db_index; /* (debugging only entries) */
142 __be32 last_acked_psn;
145 __be32 rnr_nextrecvpsn;
148 __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */
149 __be32 rcv_db_index; /* (debugging only entries) */
153 __be16 rq_wqe_counter; /* reserved on Tavor */
154 __be16 sq_wqe_counter; /* reserved on Tavor */
156 } __attribute__((packed));
158 struct mthca_qp_param {
159 __be32 opt_param_mask;
161 struct mthca_qp_context context;
163 } __attribute__((packed));
166 MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
167 MTHCA_QP_OPTPAR_RRE = 1 << 1,
168 MTHCA_QP_OPTPAR_RAE = 1 << 2,
169 MTHCA_QP_OPTPAR_RWE = 1 << 3,
170 MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4,
171 MTHCA_QP_OPTPAR_Q_KEY = 1 << 5,
172 MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
173 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
174 MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8,
175 MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9,
176 MTHCA_QP_OPTPAR_PM_STATE = 1 << 10,
177 MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11,
178 MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12,
179 MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13,
180 MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
181 MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15,
182 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16
185 static const u8 mthca_opcode[] = {
186 [IB_WR_SEND] = MTHCA_OPCODE_SEND,
187 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM,
188 [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE,
189 [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM,
190 [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ,
191 [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS,
192 [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
195 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
197 return qp->qpn >= dev->qp_table.sqp_start &&
198 qp->qpn <= dev->qp_table.sqp_start + 3;
201 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
203 return qp->qpn >= dev->qp_table.sqp_start &&
204 qp->qpn <= dev->qp_table.sqp_start + 1;
207 static void *get_recv_wqe(struct mthca_qp *qp, int n)
210 return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
213 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
216 static void *get_send_wqe(struct mthca_qp *qp, int n)
219 return qp->queue.direct.buf + qp->send_wqe_offset +
220 (n << qp->sq.wqe_shift);
222 return qp->queue.page_list[(qp->send_wqe_offset +
223 (n << qp->sq.wqe_shift)) >>
225 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
229 static void mthca_wq_reset(struct mthca_wq *wq)
232 wq->last_comp = wq->max - 1;
237 void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
238 enum ib_event_type event_type)
241 struct ib_event event;
243 spin_lock(&dev->qp_table.lock);
244 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
247 spin_unlock(&dev->qp_table.lock);
250 mthca_warn(dev, "Async event %d for bogus QP %08x\n",
255 if (event_type == IB_EVENT_PATH_MIG)
256 qp->port = qp->alt_port;
258 event.device = &dev->ib_dev;
259 event.event = event_type;
260 event.element.qp = &qp->ibqp;
261 if (qp->ibqp.event_handler)
262 qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
264 spin_lock(&dev->qp_table.lock);
267 spin_unlock(&dev->qp_table.lock);
270 static int to_mthca_state(enum ib_qp_state ib_state)
273 case IB_QPS_RESET: return MTHCA_QP_STATE_RST;
274 case IB_QPS_INIT: return MTHCA_QP_STATE_INIT;
275 case IB_QPS_RTR: return MTHCA_QP_STATE_RTR;
276 case IB_QPS_RTS: return MTHCA_QP_STATE_RTS;
277 case IB_QPS_SQD: return MTHCA_QP_STATE_SQD;
278 case IB_QPS_SQE: return MTHCA_QP_STATE_SQE;
279 case IB_QPS_ERR: return MTHCA_QP_STATE_ERR;
284 enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
286 static int to_mthca_st(int transport)
289 case RC: return MTHCA_QP_ST_RC;
290 case UC: return MTHCA_QP_ST_UC;
291 case UD: return MTHCA_QP_ST_UD;
292 case RD: return MTHCA_QP_ST_RD;
293 case MLX: return MTHCA_QP_ST_MLX;
298 static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr,
301 if (attr_mask & IB_QP_PKEY_INDEX)
302 sqp->pkey_index = attr->pkey_index;
303 if (attr_mask & IB_QP_QKEY)
304 sqp->qkey = attr->qkey;
305 if (attr_mask & IB_QP_SQ_PSN)
306 sqp->send_psn = attr->sq_psn;
309 static void init_port(struct mthca_dev *dev, int port)
312 struct mthca_init_ib_param param;
314 memset(¶m, 0, sizeof param);
316 param.port_width = dev->limits.port_width_cap;
317 param.vl_cap = dev->limits.vl_cap;
318 param.mtu_cap = dev->limits.mtu_cap;
319 param.gid_cap = dev->limits.gid_table_len;
320 param.pkey_cap = dev->limits.pkey_table_len;
322 err = mthca_INIT_IB(dev, ¶m, port);
324 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
327 static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr,
332 u32 hw_access_flags = 0;
334 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
335 dest_rd_atomic = attr->max_dest_rd_atomic;
337 dest_rd_atomic = qp->resp_depth;
339 if (attr_mask & IB_QP_ACCESS_FLAGS)
340 access_flags = attr->qp_access_flags;
342 access_flags = qp->atomic_rd_en;
345 access_flags &= IB_ACCESS_REMOTE_WRITE;
347 if (access_flags & IB_ACCESS_REMOTE_READ)
348 hw_access_flags |= MTHCA_QP_BIT_RRE;
349 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
350 hw_access_flags |= MTHCA_QP_BIT_RAE;
351 if (access_flags & IB_ACCESS_REMOTE_WRITE)
352 hw_access_flags |= MTHCA_QP_BIT_RWE;
354 return cpu_to_be32(hw_access_flags);
357 static inline enum ib_qp_state to_ib_qp_state(int mthca_state)
359 switch (mthca_state) {
360 case MTHCA_QP_STATE_RST: return IB_QPS_RESET;
361 case MTHCA_QP_STATE_INIT: return IB_QPS_INIT;
362 case MTHCA_QP_STATE_RTR: return IB_QPS_RTR;
363 case MTHCA_QP_STATE_RTS: return IB_QPS_RTS;
364 case MTHCA_QP_STATE_DRAINING:
365 case MTHCA_QP_STATE_SQD: return IB_QPS_SQD;
366 case MTHCA_QP_STATE_SQE: return IB_QPS_SQE;
367 case MTHCA_QP_STATE_ERR: return IB_QPS_ERR;
372 static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state)
374 switch (mthca_mig_state) {
375 case 0: return IB_MIG_ARMED;
376 case 1: return IB_MIG_REARM;
377 case 3: return IB_MIG_MIGRATED;
382 static int to_ib_qp_access_flags(int mthca_flags)
386 if (mthca_flags & MTHCA_QP_BIT_RRE)
387 ib_flags |= IB_ACCESS_REMOTE_READ;
388 if (mthca_flags & MTHCA_QP_BIT_RWE)
389 ib_flags |= IB_ACCESS_REMOTE_WRITE;
390 if (mthca_flags & MTHCA_QP_BIT_RAE)
391 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
396 static void to_rdma_ah_attr(struct mthca_dev *dev,
397 struct rdma_ah_attr *ib_ah_attr,
398 struct mthca_qp_path *path)
400 memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
401 ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
403 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports)
406 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
407 ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
408 ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
409 ib_ah_attr->static_rate = mthca_rate_to_ib(dev,
410 path->static_rate & 0xf,
411 ib_ah_attr->port_num);
412 ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
413 if (ib_ah_attr->ah_flags) {
414 ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
415 ib_ah_attr->grh.hop_limit = path->hop_limit;
416 ib_ah_attr->grh.traffic_class =
417 (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff;
418 ib_ah_attr->grh.flow_label =
419 be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff;
420 memcpy(ib_ah_attr->grh.dgid.raw,
421 path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
425 int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
426 struct ib_qp_init_attr *qp_init_attr)
428 struct mthca_dev *dev = to_mdev(ibqp->device);
429 struct mthca_qp *qp = to_mqp(ibqp);
431 struct mthca_mailbox *mailbox = NULL;
432 struct mthca_qp_param *qp_param;
433 struct mthca_qp_context *context;
436 mutex_lock(&qp->mutex);
438 if (qp->state == IB_QPS_RESET) {
439 qp_attr->qp_state = IB_QPS_RESET;
443 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
444 if (IS_ERR(mailbox)) {
445 err = PTR_ERR(mailbox);
449 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox);
451 mthca_warn(dev, "QUERY_QP failed (%d)\n", err);
455 qp_param = mailbox->buf;
456 context = &qp_param->context;
457 mthca_state = be32_to_cpu(context->flags) >> 28;
459 qp->state = to_ib_qp_state(mthca_state);
460 qp_attr->qp_state = qp->state;
461 qp_attr->path_mtu = context->mtu_msgmax >> 5;
462 qp_attr->path_mig_state =
463 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
464 qp_attr->qkey = be32_to_cpu(context->qkey);
465 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
466 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
467 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff;
468 qp_attr->qp_access_flags =
469 to_ib_qp_access_flags(be32_to_cpu(context->params2));
471 if (qp->transport == RC || qp->transport == UC) {
472 to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
473 to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
474 qp_attr->alt_pkey_index =
475 be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
476 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
479 qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
481 (be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3;
483 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
484 qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
486 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
488 qp_attr->max_dest_rd_atomic =
489 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
490 qp_attr->min_rnr_timer =
491 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
492 qp_attr->timeout = context->pri_path.ackto >> 3;
493 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
494 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5;
495 qp_attr->alt_timeout = context->alt_path.ackto >> 3;
498 qp_attr->cur_qp_state = qp_attr->qp_state;
499 qp_attr->cap.max_send_wr = qp->sq.max;
500 qp_attr->cap.max_recv_wr = qp->rq.max;
501 qp_attr->cap.max_send_sge = qp->sq.max_gs;
502 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
503 qp_attr->cap.max_inline_data = qp->max_inline_data;
505 qp_init_attr->cap = qp_attr->cap;
506 qp_init_attr->sq_sig_type = qp->sq_policy;
509 mthca_free_mailbox(dev, mailbox);
512 mutex_unlock(&qp->mutex);
516 static int mthca_path_set(struct mthca_dev *dev, const struct rdma_ah_attr *ah,
517 struct mthca_qp_path *path, u8 port)
519 path->g_mylmc = ah->src_path_bits & 0x7f;
520 path->rlid = cpu_to_be16(ah->dlid);
521 path->static_rate = mthca_get_rate(dev, ah->static_rate, port);
523 if (ah->ah_flags & IB_AH_GRH) {
524 if (ah->grh.sgid_index >= dev->limits.gid_table_len) {
525 mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n",
526 ah->grh.sgid_index, dev->limits.gid_table_len-1);
530 path->g_mylmc |= 1 << 7;
531 path->mgid_index = ah->grh.sgid_index;
532 path->hop_limit = ah->grh.hop_limit;
533 path->sl_tclass_flowlabel =
534 cpu_to_be32((ah->sl << 28) |
535 (ah->grh.traffic_class << 20) |
536 (ah->grh.flow_label));
537 memcpy(path->rgid, ah->grh.dgid.raw, 16);
539 path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
544 static int __mthca_modify_qp(struct ib_qp *ibqp,
545 const struct ib_qp_attr *attr, int attr_mask,
546 enum ib_qp_state cur_state, enum ib_qp_state new_state)
548 struct mthca_dev *dev = to_mdev(ibqp->device);
549 struct mthca_qp *qp = to_mqp(ibqp);
550 struct mthca_mailbox *mailbox;
551 struct mthca_qp_param *qp_param;
552 struct mthca_qp_context *qp_context;
556 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
557 if (IS_ERR(mailbox)) {
558 err = PTR_ERR(mailbox);
561 qp_param = mailbox->buf;
562 qp_context = &qp_param->context;
563 memset(qp_param, 0, sizeof *qp_param);
565 qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) |
566 (to_mthca_st(qp->transport) << 16));
567 qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE);
568 if (!(attr_mask & IB_QP_PATH_MIG_STATE))
569 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
571 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);
572 switch (attr->path_mig_state) {
573 case IB_MIG_MIGRATED:
574 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
577 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);
580 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);
585 /* leave tavor_sched_queue as 0 */
587 if (qp->transport == MLX || qp->transport == UD)
588 qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
589 else if (attr_mask & IB_QP_PATH_MTU) {
590 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {
591 mthca_dbg(dev, "path MTU (%u) is invalid\n",
595 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
598 if (mthca_is_memfree(dev)) {
600 qp_context->rq_size_stride = ilog2(qp->rq.max) << 3;
601 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
604 qp_context->sq_size_stride = ilog2(qp->sq.max) << 3;
605 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
608 /* leave arbel_sched_queue as 0 */
610 if (qp->ibqp.uobject)
611 qp_context->usr_page =
612 cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
614 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
615 qp_context->local_qpn = cpu_to_be32(qp->qpn);
616 if (attr_mask & IB_QP_DEST_QPN) {
617 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
620 if (qp->transport == MLX)
621 qp_context->pri_path.port_pkey |=
622 cpu_to_be32(qp->port << 24);
624 if (attr_mask & IB_QP_PORT) {
625 qp_context->pri_path.port_pkey |=
626 cpu_to_be32(attr->port_num << 24);
627 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);
631 if (attr_mask & IB_QP_PKEY_INDEX) {
632 qp_context->pri_path.port_pkey |=
633 cpu_to_be32(attr->pkey_index);
634 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);
637 if (attr_mask & IB_QP_RNR_RETRY) {
638 qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =
639 attr->rnr_retry << 5;
640 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
641 MTHCA_QP_OPTPAR_ALT_RNR_RETRY);
644 if (attr_mask & IB_QP_AV) {
645 if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,
646 attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
649 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
652 if (ibqp->qp_type == IB_QPT_RC &&
653 cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
654 u8 sched_queue = ibqp->uobject ? 0x2 : 0x1;
656 if (mthca_is_memfree(dev))
657 qp_context->rlkey_arbel_sched_queue |= sched_queue;
659 qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue);
661 qp_param->opt_param_mask |=
662 cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE);
665 if (attr_mask & IB_QP_TIMEOUT) {
666 qp_context->pri_path.ackto = attr->timeout << 3;
667 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
670 if (attr_mask & IB_QP_ALT_PATH) {
671 if (attr->alt_pkey_index >= dev->limits.pkey_table_len) {
672 mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n",
673 attr->alt_pkey_index, dev->limits.pkey_table_len-1);
677 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
678 mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
683 if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
684 attr->alt_ah_attr.port_num))
687 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
688 attr->alt_port_num << 24);
689 qp_context->alt_path.ackto = attr->alt_timeout << 3;
690 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);
694 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
695 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
696 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey);
697 qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
698 (MTHCA_FLIGHT_LIMIT << 24) |
700 if (qp->sq_policy == IB_SIGNAL_ALL_WR)
701 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
702 if (attr_mask & IB_QP_RETRY_CNT) {
703 qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
704 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
707 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
708 if (attr->max_rd_atomic) {
709 qp_context->params1 |=
710 cpu_to_be32(MTHCA_QP_BIT_SRE |
712 qp_context->params1 |=
713 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
715 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
718 if (attr_mask & IB_QP_SQ_PSN)
719 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
720 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
722 if (mthca_is_memfree(dev)) {
723 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
724 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index);
727 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
728 if (attr->max_dest_rd_atomic)
729 qp_context->params2 |=
730 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
732 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
735 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
736 qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask);
737 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
738 MTHCA_QP_OPTPAR_RRE |
739 MTHCA_QP_OPTPAR_RAE);
742 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
745 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
747 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
748 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
749 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
751 if (attr_mask & IB_QP_RQ_PSN)
752 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
754 qp_context->ra_buff_indx =
755 cpu_to_be32(dev->qp_table.rdb_base +
756 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
757 dev->qp_table.rdb_shift));
759 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
761 if (mthca_is_memfree(dev))
762 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index);
764 if (attr_mask & IB_QP_QKEY) {
765 qp_context->qkey = cpu_to_be32(attr->qkey);
766 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
770 qp_context->srqn = cpu_to_be32(1 << 24 |
771 to_msrq(ibqp->srq)->srqn);
773 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
774 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY &&
775 attr->en_sqd_async_notify)
778 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
781 mthca_warn(dev, "modify QP %d->%d returned %d.\n",
782 cur_state, new_state, err);
786 qp->state = new_state;
787 if (attr_mask & IB_QP_ACCESS_FLAGS)
788 qp->atomic_rd_en = attr->qp_access_flags;
789 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
790 qp->resp_depth = attr->max_dest_rd_atomic;
791 if (attr_mask & IB_QP_PORT)
792 qp->port = attr->port_num;
793 if (attr_mask & IB_QP_ALT_PATH)
794 qp->alt_port = attr->alt_port_num;
797 store_attrs(to_msqp(qp), attr, attr_mask);
800 * If we moved QP0 to RTR, bring the IB link up; if we moved
801 * QP0 to RESET or ERROR, bring the link back down.
803 if (is_qp0(dev, qp)) {
804 if (cur_state != IB_QPS_RTR &&
805 new_state == IB_QPS_RTR)
806 init_port(dev, qp->port);
808 if (cur_state != IB_QPS_RESET &&
809 cur_state != IB_QPS_ERR &&
810 (new_state == IB_QPS_RESET ||
811 new_state == IB_QPS_ERR))
812 mthca_CLOSE_IB(dev, qp->port);
816 * If we moved a kernel QP to RESET, clean up all old CQ
817 * entries and reinitialize the QP.
819 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
820 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
821 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
822 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
823 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL);
825 mthca_wq_reset(&qp->sq);
826 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
828 mthca_wq_reset(&qp->rq);
829 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
831 if (mthca_is_memfree(dev)) {
838 mthca_free_mailbox(dev, mailbox);
843 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
844 struct ib_udata *udata)
846 struct mthca_dev *dev = to_mdev(ibqp->device);
847 struct mthca_qp *qp = to_mqp(ibqp);
848 enum ib_qp_state cur_state, new_state;
851 mutex_lock(&qp->mutex);
852 if (attr_mask & IB_QP_CUR_STATE) {
853 cur_state = attr->cur_qp_state;
855 spin_lock_irq(&qp->sq.lock);
856 spin_lock(&qp->rq.lock);
857 cur_state = qp->state;
858 spin_unlock(&qp->rq.lock);
859 spin_unlock_irq(&qp->sq.lock);
862 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
864 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
865 IB_LINK_LAYER_UNSPECIFIED)) {
866 mthca_dbg(dev, "Bad QP transition (transport %d) "
867 "%d->%d with attr 0x%08x\n",
868 qp->transport, cur_state, new_state,
873 if ((attr_mask & IB_QP_PKEY_INDEX) &&
874 attr->pkey_index >= dev->limits.pkey_table_len) {
875 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
876 attr->pkey_index, dev->limits.pkey_table_len-1);
880 if ((attr_mask & IB_QP_PORT) &&
881 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
882 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
886 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
887 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
888 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
889 attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
893 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
894 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
895 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
896 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
900 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
905 err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
908 mutex_unlock(&qp->mutex);
912 static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
915 * Calculate the maximum size of WQE s/g segments, excluding
916 * the next segment and other non-data segments.
918 int max_data_size = desc_sz - sizeof (struct mthca_next_seg);
920 switch (qp->transport) {
922 max_data_size -= 2 * sizeof (struct mthca_data_seg);
926 if (mthca_is_memfree(dev))
927 max_data_size -= sizeof (struct mthca_arbel_ud_seg);
929 max_data_size -= sizeof (struct mthca_tavor_ud_seg);
933 max_data_size -= sizeof (struct mthca_raddr_seg);
937 return max_data_size;
940 static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
942 /* We don't support inline data for kernel QPs (yet). */
943 return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
946 static void mthca_adjust_qp_caps(struct mthca_dev *dev,
950 int max_data_size = mthca_max_data_size(dev, qp,
951 min(dev->limits.max_desc_sz,
952 1 << qp->sq.wqe_shift));
954 qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);
956 qp->sq.max_gs = min_t(int, dev->limits.max_sg,
957 max_data_size / sizeof (struct mthca_data_seg));
958 qp->rq.max_gs = min_t(int, dev->limits.max_sg,
959 (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
960 sizeof (struct mthca_next_seg)) /
961 sizeof (struct mthca_data_seg));
965 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
966 * rq.max_gs and sq.max_gs must all be assigned.
967 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
968 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
971 static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
978 size = sizeof (struct mthca_next_seg) +
979 qp->rq.max_gs * sizeof (struct mthca_data_seg);
981 if (size > dev->limits.max_desc_sz)
984 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
988 size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
989 switch (qp->transport) {
991 size += 2 * sizeof (struct mthca_data_seg);
995 size += mthca_is_memfree(dev) ?
996 sizeof (struct mthca_arbel_ud_seg) :
997 sizeof (struct mthca_tavor_ud_seg);
1001 size += sizeof (struct mthca_raddr_seg);
1005 size += sizeof (struct mthca_raddr_seg);
1007 * An atomic op will require an atomic segment, a
1008 * remote address segment and one scatter entry.
1010 size = max_t(int, size,
1011 sizeof (struct mthca_atomic_seg) +
1012 sizeof (struct mthca_raddr_seg) +
1013 sizeof (struct mthca_data_seg));
1020 /* Make sure that we have enough space for a bind request */
1021 size = max_t(int, size, sizeof (struct mthca_bind_seg));
1023 size += sizeof (struct mthca_next_seg);
1025 if (size > dev->limits.max_desc_sz)
1028 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
1032 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
1033 1 << qp->sq.wqe_shift);
1036 * If this is a userspace QP, we don't actually have to
1037 * allocate anything. All we need is to calculate the WQE
1038 * sizes and the send_wqe_offset, so we're done now.
1040 if (pd->ibpd.uobject)
1043 size = PAGE_ALIGN(qp->send_wqe_offset +
1044 (qp->sq.max << qp->sq.wqe_shift));
1046 qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
1051 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
1052 &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
1063 static void mthca_free_wqe_buf(struct mthca_dev *dev,
1064 struct mthca_qp *qp)
1066 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
1067 (qp->sq.max << qp->sq.wqe_shift)),
1068 &qp->queue, qp->is_direct, &qp->mr);
1072 static int mthca_map_memfree(struct mthca_dev *dev,
1073 struct mthca_qp *qp)
1077 if (mthca_is_memfree(dev)) {
1078 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
1082 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
1086 ret = mthca_table_get(dev, dev->qp_table.rdb_table,
1087 qp->qpn << dev->qp_table.rdb_shift);
1096 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1099 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1104 static void mthca_unmap_memfree(struct mthca_dev *dev,
1105 struct mthca_qp *qp)
1107 mthca_table_put(dev, dev->qp_table.rdb_table,
1108 qp->qpn << dev->qp_table.rdb_shift);
1109 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1110 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1113 static int mthca_alloc_memfree(struct mthca_dev *dev,
1114 struct mthca_qp *qp)
1116 if (mthca_is_memfree(dev)) {
1117 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1118 qp->qpn, &qp->rq.db);
1119 if (qp->rq.db_index < 0)
1122 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1123 qp->qpn, &qp->sq.db);
1124 if (qp->sq.db_index < 0) {
1125 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1133 static void mthca_free_memfree(struct mthca_dev *dev,
1134 struct mthca_qp *qp)
1136 if (mthca_is_memfree(dev)) {
1137 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
1138 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1142 static int mthca_alloc_qp_common(struct mthca_dev *dev,
1143 struct mthca_pd *pd,
1144 struct mthca_cq *send_cq,
1145 struct mthca_cq *recv_cq,
1146 enum ib_sig_type send_policy,
1147 struct mthca_qp *qp)
1151 struct mthca_next_seg *next;
1154 init_waitqueue_head(&qp->wait);
1155 mutex_init(&qp->mutex);
1156 qp->state = IB_QPS_RESET;
1157 qp->atomic_rd_en = 0;
1159 qp->sq_policy = send_policy;
1160 mthca_wq_reset(&qp->sq);
1161 mthca_wq_reset(&qp->rq);
1163 spin_lock_init(&qp->sq.lock);
1164 spin_lock_init(&qp->rq.lock);
1166 ret = mthca_map_memfree(dev, qp);
1170 ret = mthca_alloc_wqe_buf(dev, pd, qp);
1172 mthca_unmap_memfree(dev, qp);
1176 mthca_adjust_qp_caps(dev, pd, qp);
1179 * If this is a userspace QP, we're done now. The doorbells
1180 * will be allocated and buffers will be initialized in
1183 if (pd->ibpd.uobject)
1186 ret = mthca_alloc_memfree(dev, qp);
1188 mthca_free_wqe_buf(dev, qp);
1189 mthca_unmap_memfree(dev, qp);
1193 if (mthca_is_memfree(dev)) {
1194 struct mthca_data_seg *scatter;
1195 int size = (sizeof (struct mthca_next_seg) +
1196 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
1198 for (i = 0; i < qp->rq.max; ++i) {
1199 next = get_recv_wqe(qp, i);
1200 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
1202 next->ee_nds = cpu_to_be32(size);
1204 for (scatter = (void *) (next + 1);
1205 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
1207 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
1210 for (i = 0; i < qp->sq.max; ++i) {
1211 next = get_send_wqe(qp, i);
1212 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
1214 qp->send_wqe_offset);
1217 for (i = 0; i < qp->rq.max; ++i) {
1218 next = get_recv_wqe(qp, i);
1219 next->nda_op = htonl((((i + 1) % qp->rq.max) <<
1220 qp->rq.wqe_shift) | 1);
1225 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
1226 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
1231 static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
1232 struct mthca_pd *pd, struct mthca_qp *qp)
1234 int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
1236 /* Sanity check QP size before proceeding */
1237 if (cap->max_send_wr > dev->limits.max_wqes ||
1238 cap->max_recv_wr > dev->limits.max_wqes ||
1239 cap->max_send_sge > dev->limits.max_sg ||
1240 cap->max_recv_sge > dev->limits.max_sg ||
1241 cap->max_inline_data > mthca_max_inline_data(pd, max_data_size))
1245 * For MLX transport we need 2 extra send gather entries:
1246 * one for the header and one for the checksum at the end
1248 if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg)
1251 if (mthca_is_memfree(dev)) {
1252 qp->rq.max = cap->max_recv_wr ?
1253 roundup_pow_of_two(cap->max_recv_wr) : 0;
1254 qp->sq.max = cap->max_send_wr ?
1255 roundup_pow_of_two(cap->max_send_wr) : 0;
1257 qp->rq.max = cap->max_recv_wr;
1258 qp->sq.max = cap->max_send_wr;
1261 qp->rq.max_gs = cap->max_recv_sge;
1262 qp->sq.max_gs = max_t(int, cap->max_send_sge,
1263 ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
1264 MTHCA_INLINE_CHUNK_SIZE) /
1265 sizeof (struct mthca_data_seg));
1270 int mthca_alloc_qp(struct mthca_dev *dev,
1271 struct mthca_pd *pd,
1272 struct mthca_cq *send_cq,
1273 struct mthca_cq *recv_cq,
1274 enum ib_qp_type type,
1275 enum ib_sig_type send_policy,
1276 struct ib_qp_cap *cap,
1277 struct mthca_qp *qp)
1282 case IB_QPT_RC: qp->transport = RC; break;
1283 case IB_QPT_UC: qp->transport = UC; break;
1284 case IB_QPT_UD: qp->transport = UD; break;
1285 default: return -EINVAL;
1288 err = mthca_set_qp_size(dev, cap, pd, qp);
1292 qp->qpn = mthca_alloc(&dev->qp_table.alloc);
1296 /* initialize port to zero for error-catching. */
1299 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1302 mthca_free(&dev->qp_table.alloc, qp->qpn);
1306 spin_lock_irq(&dev->qp_table.lock);
1307 mthca_array_set(&dev->qp_table.qp,
1308 qp->qpn & (dev->limits.num_qps - 1), qp);
1309 spin_unlock_irq(&dev->qp_table.lock);
1314 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1315 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1317 if (send_cq == recv_cq) {
1318 spin_lock_irq(&send_cq->lock);
1319 __acquire(&recv_cq->lock);
1320 } else if (send_cq->cqn < recv_cq->cqn) {
1321 spin_lock_irq(&send_cq->lock);
1322 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1324 spin_lock_irq(&recv_cq->lock);
1325 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1329 static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1330 __releases(&send_cq->lock) __releases(&recv_cq->lock)
1332 if (send_cq == recv_cq) {
1333 __release(&recv_cq->lock);
1334 spin_unlock_irq(&send_cq->lock);
1335 } else if (send_cq->cqn < recv_cq->cqn) {
1336 spin_unlock(&recv_cq->lock);
1337 spin_unlock_irq(&send_cq->lock);
1339 spin_unlock(&send_cq->lock);
1340 spin_unlock_irq(&recv_cq->lock);
1344 int mthca_alloc_sqp(struct mthca_dev *dev,
1345 struct mthca_pd *pd,
1346 struct mthca_cq *send_cq,
1347 struct mthca_cq *recv_cq,
1348 enum ib_sig_type send_policy,
1349 struct ib_qp_cap *cap,
1352 struct mthca_sqp *sqp)
1354 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
1357 sqp->qp.transport = MLX;
1358 err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
1362 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
1363 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
1364 &sqp->header_dma, GFP_KERNEL);
1365 if (!sqp->header_buf)
1368 spin_lock_irq(&dev->qp_table.lock);
1369 if (mthca_array_get(&dev->qp_table.qp, mqpn))
1372 mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
1373 spin_unlock_irq(&dev->qp_table.lock);
1378 sqp->qp.port = port;
1380 sqp->qp.transport = MLX;
1382 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1383 send_policy, &sqp->qp);
1387 atomic_inc(&pd->sqp_count);
1393 * Lock CQs here, so that CQ polling code can do QP lookup
1394 * without taking a lock.
1396 mthca_lock_cqs(send_cq, recv_cq);
1398 spin_lock(&dev->qp_table.lock);
1399 mthca_array_clear(&dev->qp_table.qp, mqpn);
1400 spin_unlock(&dev->qp_table.lock);
1402 mthca_unlock_cqs(send_cq, recv_cq);
1405 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
1406 sqp->header_buf, sqp->header_dma);
1411 static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
1415 spin_lock_irq(&dev->qp_table.lock);
1417 spin_unlock_irq(&dev->qp_table.lock);
1422 void mthca_free_qp(struct mthca_dev *dev,
1423 struct mthca_qp *qp)
1425 struct mthca_cq *send_cq;
1426 struct mthca_cq *recv_cq;
1428 send_cq = to_mcq(qp->ibqp.send_cq);
1429 recv_cq = to_mcq(qp->ibqp.recv_cq);
1432 * Lock CQs here, so that CQ polling code can do QP lookup
1433 * without taking a lock.
1435 mthca_lock_cqs(send_cq, recv_cq);
1437 spin_lock(&dev->qp_table.lock);
1438 mthca_array_clear(&dev->qp_table.qp,
1439 qp->qpn & (dev->limits.num_qps - 1));
1441 spin_unlock(&dev->qp_table.lock);
1443 mthca_unlock_cqs(send_cq, recv_cq);
1445 wait_event(qp->wait, !get_qp_refcount(dev, qp));
1447 if (qp->state != IB_QPS_RESET)
1448 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
1452 * If this is a userspace QP, the buffers, MR, CQs and so on
1453 * will be cleaned up in userspace, so all we have to do is
1454 * unref the mem-free tables and free the QPN in our table.
1456 if (!qp->ibqp.uobject) {
1457 mthca_cq_clean(dev, recv_cq, qp->qpn,
1458 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1459 if (send_cq != recv_cq)
1460 mthca_cq_clean(dev, send_cq, qp->qpn, NULL);
1462 mthca_free_memfree(dev, qp);
1463 mthca_free_wqe_buf(dev, qp);
1466 mthca_unmap_memfree(dev, qp);
1468 if (is_sqp(dev, qp)) {
1469 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
1470 dma_free_coherent(&dev->pdev->dev,
1471 to_msqp(qp)->header_buf_size,
1472 to_msqp(qp)->header_buf,
1473 to_msqp(qp)->header_dma);
1475 mthca_free(&dev->qp_table.alloc, qp->qpn);
1478 /* Create UD header for an MLX send and build a data segment for it */
1479 static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1480 int ind, struct ib_ud_wr *wr,
1481 struct mthca_mlx_seg *mlx,
1482 struct mthca_data_seg *data)
1488 ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0,
1489 mthca_ah_grh_present(to_mah(wr->ah)), 0, 0, 0,
1492 err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header);
1495 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1496 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
1497 (sqp->ud_header.lrh.destination_lid ==
1498 IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
1499 (sqp->ud_header.lrh.service_level << 8));
1500 mlx->rlid = sqp->ud_header.lrh.destination_lid;
1503 switch (wr->wr.opcode) {
1505 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1506 sqp->ud_header.immediate_present = 0;
1508 case IB_WR_SEND_WITH_IMM:
1509 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1510 sqp->ud_header.immediate_present = 1;
1511 sqp->ud_header.immediate_data = wr->wr.ex.imm_data;
1517 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
1518 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1519 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
1520 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
1521 if (!sqp->qp.ibqp.qp_num)
1522 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
1523 sqp->pkey_index, &pkey);
1525 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
1526 wr->pkey_index, &pkey);
1527 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1528 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
1529 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1530 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
1531 sqp->qkey : wr->remote_qkey);
1532 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1534 header_size = ib_ud_header_pack(&sqp->ud_header,
1536 ind * MTHCA_UD_HEADER_SIZE);
1538 data->byte_count = cpu_to_be32(header_size);
1539 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
1540 data->addr = cpu_to_be64(sqp->header_dma +
1541 ind * MTHCA_UD_HEADER_SIZE);
1546 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
1547 struct ib_cq *ib_cq)
1550 struct mthca_cq *cq;
1552 cur = wq->head - wq->tail;
1553 if (likely(cur + nreq < wq->max))
1557 spin_lock(&cq->lock);
1558 cur = wq->head - wq->tail;
1559 spin_unlock(&cq->lock);
1561 return cur + nreq >= wq->max;
1564 static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
1565 u64 remote_addr, u32 rkey)
1567 rseg->raddr = cpu_to_be64(remote_addr);
1568 rseg->rkey = cpu_to_be32(rkey);
1572 static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
1573 struct ib_atomic_wr *wr)
1575 if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1576 aseg->swap_add = cpu_to_be64(wr->swap);
1577 aseg->compare = cpu_to_be64(wr->compare_add);
1579 aseg->swap_add = cpu_to_be64(wr->compare_add);
1585 static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
1586 struct ib_ud_wr *wr)
1588 useg->lkey = cpu_to_be32(to_mah(wr->ah)->key);
1589 useg->av_addr = cpu_to_be64(to_mah(wr->ah)->avdma);
1590 useg->dqpn = cpu_to_be32(wr->remote_qpn);
1591 useg->qkey = cpu_to_be32(wr->remote_qkey);
1595 static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
1596 struct ib_ud_wr *wr)
1598 memcpy(useg->av, to_mah(wr->ah)->av, MTHCA_AV_SIZE);
1599 useg->dqpn = cpu_to_be32(wr->remote_qpn);
1600 useg->qkey = cpu_to_be32(wr->remote_qkey);
1603 int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1604 struct ib_send_wr **bad_wr)
1606 struct mthca_dev *dev = to_mdev(ibqp->device);
1607 struct mthca_qp *qp = to_mqp(ibqp);
1610 unsigned long flags;
1616 * f0 and size0 are only used if nreq != 0, and they will
1617 * always be initialized the first time through the main loop
1618 * before nreq is incremented. So nreq cannot become non-zero
1619 * without initializing f0 and size0, and they are in fact
1620 * never used uninitialized.
1622 int uninitialized_var(size0);
1623 u32 uninitialized_var(f0);
1627 spin_lock_irqsave(&qp->sq.lock, flags);
1629 /* XXX check that state is OK to post send */
1631 ind = qp->sq.next_ind;
1633 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1634 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1635 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1636 " %d max, %d nreq)\n", qp->qpn,
1637 qp->sq.head, qp->sq.tail,
1644 wqe = get_send_wqe(qp, ind);
1645 prev_wqe = qp->sq.last;
1648 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1649 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
1650 ((struct mthca_next_seg *) wqe)->flags =
1651 ((wr->send_flags & IB_SEND_SIGNALED) ?
1652 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1653 ((wr->send_flags & IB_SEND_SOLICITED) ?
1654 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
1656 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1657 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1658 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
1660 wqe += sizeof (struct mthca_next_seg);
1661 size = sizeof (struct mthca_next_seg) / 16;
1663 switch (qp->transport) {
1665 switch (wr->opcode) {
1666 case IB_WR_ATOMIC_CMP_AND_SWP:
1667 case IB_WR_ATOMIC_FETCH_AND_ADD:
1668 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
1669 atomic_wr(wr)->rkey);
1670 wqe += sizeof (struct mthca_raddr_seg);
1672 set_atomic_seg(wqe, atomic_wr(wr));
1673 wqe += sizeof (struct mthca_atomic_seg);
1674 size += (sizeof (struct mthca_raddr_seg) +
1675 sizeof (struct mthca_atomic_seg)) / 16;
1678 case IB_WR_RDMA_WRITE:
1679 case IB_WR_RDMA_WRITE_WITH_IMM:
1680 case IB_WR_RDMA_READ:
1681 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
1683 wqe += sizeof (struct mthca_raddr_seg);
1684 size += sizeof (struct mthca_raddr_seg) / 16;
1688 /* No extra segments required for sends */
1695 switch (wr->opcode) {
1696 case IB_WR_RDMA_WRITE:
1697 case IB_WR_RDMA_WRITE_WITH_IMM:
1698 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
1700 wqe += sizeof (struct mthca_raddr_seg);
1701 size += sizeof (struct mthca_raddr_seg) / 16;
1705 /* No extra segments required for sends */
1712 set_tavor_ud_seg(wqe, ud_wr(wr));
1713 wqe += sizeof (struct mthca_tavor_ud_seg);
1714 size += sizeof (struct mthca_tavor_ud_seg) / 16;
1718 err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr),
1719 wqe - sizeof (struct mthca_next_seg),
1725 wqe += sizeof (struct mthca_data_seg);
1726 size += sizeof (struct mthca_data_seg) / 16;
1730 if (wr->num_sge > qp->sq.max_gs) {
1731 mthca_err(dev, "too many gathers\n");
1737 for (i = 0; i < wr->num_sge; ++i) {
1738 mthca_set_data_seg(wqe, wr->sg_list + i);
1739 wqe += sizeof (struct mthca_data_seg);
1740 size += sizeof (struct mthca_data_seg) / 16;
1743 /* Add one more inline data segment for ICRC */
1744 if (qp->transport == MLX) {
1745 ((struct mthca_data_seg *) wqe)->byte_count =
1746 cpu_to_be32((1 << 31) | 4);
1747 ((u32 *) wqe)[1] = 0;
1748 wqe += sizeof (struct mthca_data_seg);
1749 size += sizeof (struct mthca_data_seg) / 16;
1752 qp->wrid[ind + qp->rq.max] = wr->wr_id;
1754 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
1755 mthca_err(dev, "opcode invalid\n");
1761 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1762 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1763 qp->send_wqe_offset) |
1764 mthca_opcode[wr->opcode]);
1766 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1767 cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size |
1768 ((wr->send_flags & IB_SEND_FENCE) ?
1769 MTHCA_NEXT_FENCE : 0));
1773 op0 = mthca_opcode[wr->opcode];
1774 f0 = wr->send_flags & IB_SEND_FENCE ?
1775 MTHCA_SEND_DOORBELL_FENCE : 0;
1779 if (unlikely(ind >= qp->sq.max))
1787 mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) +
1788 qp->send_wqe_offset) | f0 | op0,
1789 (qp->qpn << 8) | size0,
1790 dev->kar + MTHCA_SEND_DOORBELL,
1791 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1793 * Make sure doorbells don't leak out of SQ spinlock
1794 * and reach the HCA out of order:
1799 qp->sq.next_ind = ind;
1800 qp->sq.head += nreq;
1802 spin_unlock_irqrestore(&qp->sq.lock, flags);
1806 int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1807 struct ib_recv_wr **bad_wr)
1809 struct mthca_dev *dev = to_mdev(ibqp->device);
1810 struct mthca_qp *qp = to_mqp(ibqp);
1811 unsigned long flags;
1817 * size0 is only used if nreq != 0, and it will always be
1818 * initialized the first time through the main loop before
1819 * nreq is incremented. So nreq cannot become non-zero
1820 * without initializing size0, and it is in fact never used
1823 int uninitialized_var(size0);
1828 spin_lock_irqsave(&qp->rq.lock, flags);
1830 /* XXX check that state is OK to post receive */
1832 ind = qp->rq.next_ind;
1834 for (nreq = 0; wr; wr = wr->next) {
1835 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1836 mthca_err(dev, "RQ %06x full (%u head, %u tail,"
1837 " %d max, %d nreq)\n", qp->qpn,
1838 qp->rq.head, qp->rq.tail,
1845 wqe = get_recv_wqe(qp, ind);
1846 prev_wqe = qp->rq.last;
1849 ((struct mthca_next_seg *) wqe)->ee_nds =
1850 cpu_to_be32(MTHCA_NEXT_DBD);
1851 ((struct mthca_next_seg *) wqe)->flags = 0;
1853 wqe += sizeof (struct mthca_next_seg);
1854 size = sizeof (struct mthca_next_seg) / 16;
1856 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1862 for (i = 0; i < wr->num_sge; ++i) {
1863 mthca_set_data_seg(wqe, wr->sg_list + i);
1864 wqe += sizeof (struct mthca_data_seg);
1865 size += sizeof (struct mthca_data_seg) / 16;
1868 qp->wrid[ind] = wr->wr_id;
1870 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1871 cpu_to_be32(MTHCA_NEXT_DBD | size);
1877 if (unlikely(ind >= qp->rq.max))
1881 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
1886 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
1887 qp->qpn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL,
1888 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1890 qp->rq.next_ind = ind;
1891 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
1899 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
1900 qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL,
1901 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1904 qp->rq.next_ind = ind;
1905 qp->rq.head += nreq;
1908 * Make sure doorbells don't leak out of RQ spinlock and reach
1909 * the HCA out of order:
1913 spin_unlock_irqrestore(&qp->rq.lock, flags);
1917 int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1918 struct ib_send_wr **bad_wr)
1920 struct mthca_dev *dev = to_mdev(ibqp->device);
1921 struct mthca_qp *qp = to_mqp(ibqp);
1925 unsigned long flags;
1931 * f0 and size0 are only used if nreq != 0, and they will
1932 * always be initialized the first time through the main loop
1933 * before nreq is incremented. So nreq cannot become non-zero
1934 * without initializing f0 and size0, and they are in fact
1935 * never used uninitialized.
1937 int uninitialized_var(size0);
1938 u32 uninitialized_var(f0);
1942 spin_lock_irqsave(&qp->sq.lock, flags);
1944 /* XXX check that state is OK to post send */
1946 ind = qp->sq.head & (qp->sq.max - 1);
1948 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1949 if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) {
1952 dbhi = (MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) |
1953 ((qp->sq.head & 0xffff) << 8) | f0 | op0;
1955 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
1958 * Make sure that descriptors are written before
1962 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
1965 * Make sure doorbell record is written before we
1966 * write MMIO send doorbell.
1970 mthca_write64(dbhi, (qp->qpn << 8) | size0,
1971 dev->kar + MTHCA_SEND_DOORBELL,
1972 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1975 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1976 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1977 " %d max, %d nreq)\n", qp->qpn,
1978 qp->sq.head, qp->sq.tail,
1985 wqe = get_send_wqe(qp, ind);
1986 prev_wqe = qp->sq.last;
1989 ((struct mthca_next_seg *) wqe)->flags =
1990 ((wr->send_flags & IB_SEND_SIGNALED) ?
1991 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1992 ((wr->send_flags & IB_SEND_SOLICITED) ?
1993 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
1994 ((wr->send_flags & IB_SEND_IP_CSUM) ?
1995 cpu_to_be32(MTHCA_NEXT_IP_CSUM | MTHCA_NEXT_TCP_UDP_CSUM) : 0) |
1997 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1998 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1999 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
2001 wqe += sizeof (struct mthca_next_seg);
2002 size = sizeof (struct mthca_next_seg) / 16;
2004 switch (qp->transport) {
2006 switch (wr->opcode) {
2007 case IB_WR_ATOMIC_CMP_AND_SWP:
2008 case IB_WR_ATOMIC_FETCH_AND_ADD:
2009 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
2010 atomic_wr(wr)->rkey);
2011 wqe += sizeof (struct mthca_raddr_seg);
2013 set_atomic_seg(wqe, atomic_wr(wr));
2014 wqe += sizeof (struct mthca_atomic_seg);
2015 size += (sizeof (struct mthca_raddr_seg) +
2016 sizeof (struct mthca_atomic_seg)) / 16;
2019 case IB_WR_RDMA_READ:
2020 case IB_WR_RDMA_WRITE:
2021 case IB_WR_RDMA_WRITE_WITH_IMM:
2022 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
2024 wqe += sizeof (struct mthca_raddr_seg);
2025 size += sizeof (struct mthca_raddr_seg) / 16;
2029 /* No extra segments required for sends */
2036 switch (wr->opcode) {
2037 case IB_WR_RDMA_WRITE:
2038 case IB_WR_RDMA_WRITE_WITH_IMM:
2039 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
2041 wqe += sizeof (struct mthca_raddr_seg);
2042 size += sizeof (struct mthca_raddr_seg) / 16;
2046 /* No extra segments required for sends */
2053 set_arbel_ud_seg(wqe, ud_wr(wr));
2054 wqe += sizeof (struct mthca_arbel_ud_seg);
2055 size += sizeof (struct mthca_arbel_ud_seg) / 16;
2059 err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr),
2060 wqe - sizeof (struct mthca_next_seg),
2066 wqe += sizeof (struct mthca_data_seg);
2067 size += sizeof (struct mthca_data_seg) / 16;
2071 if (wr->num_sge > qp->sq.max_gs) {
2072 mthca_err(dev, "too many gathers\n");
2078 for (i = 0; i < wr->num_sge; ++i) {
2079 mthca_set_data_seg(wqe, wr->sg_list + i);
2080 wqe += sizeof (struct mthca_data_seg);
2081 size += sizeof (struct mthca_data_seg) / 16;
2084 /* Add one more inline data segment for ICRC */
2085 if (qp->transport == MLX) {
2086 ((struct mthca_data_seg *) wqe)->byte_count =
2087 cpu_to_be32((1 << 31) | 4);
2088 ((u32 *) wqe)[1] = 0;
2089 wqe += sizeof (struct mthca_data_seg);
2090 size += sizeof (struct mthca_data_seg) / 16;
2093 qp->wrid[ind + qp->rq.max] = wr->wr_id;
2095 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
2096 mthca_err(dev, "opcode invalid\n");
2102 ((struct mthca_next_seg *) prev_wqe)->nda_op =
2103 cpu_to_be32(((ind << qp->sq.wqe_shift) +
2104 qp->send_wqe_offset) |
2105 mthca_opcode[wr->opcode]);
2107 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
2108 cpu_to_be32(MTHCA_NEXT_DBD | size |
2109 ((wr->send_flags & IB_SEND_FENCE) ?
2110 MTHCA_NEXT_FENCE : 0));
2114 op0 = mthca_opcode[wr->opcode];
2115 f0 = wr->send_flags & IB_SEND_FENCE ?
2116 MTHCA_SEND_DOORBELL_FENCE : 0;
2120 if (unlikely(ind >= qp->sq.max))
2126 dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0;
2128 qp->sq.head += nreq;
2131 * Make sure that descriptors are written before
2135 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
2138 * Make sure doorbell record is written before we
2139 * write MMIO send doorbell.
2143 mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL,
2144 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
2148 * Make sure doorbells don't leak out of SQ spinlock and reach
2149 * the HCA out of order:
2153 spin_unlock_irqrestore(&qp->sq.lock, flags);
2157 int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2158 struct ib_recv_wr **bad_wr)
2160 struct mthca_dev *dev = to_mdev(ibqp->device);
2161 struct mthca_qp *qp = to_mqp(ibqp);
2162 unsigned long flags;
2169 spin_lock_irqsave(&qp->rq.lock, flags);
2171 /* XXX check that state is OK to post receive */
2173 ind = qp->rq.head & (qp->rq.max - 1);
2175 for (nreq = 0; wr; ++nreq, wr = wr->next) {
2176 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2177 mthca_err(dev, "RQ %06x full (%u head, %u tail,"
2178 " %d max, %d nreq)\n", qp->qpn,
2179 qp->rq.head, qp->rq.tail,
2186 wqe = get_recv_wqe(qp, ind);
2188 ((struct mthca_next_seg *) wqe)->flags = 0;
2190 wqe += sizeof (struct mthca_next_seg);
2192 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2198 for (i = 0; i < wr->num_sge; ++i) {
2199 mthca_set_data_seg(wqe, wr->sg_list + i);
2200 wqe += sizeof (struct mthca_data_seg);
2203 if (i < qp->rq.max_gs)
2204 mthca_set_data_seg_inval(wqe);
2206 qp->wrid[ind] = wr->wr_id;
2209 if (unlikely(ind >= qp->rq.max))
2214 qp->rq.head += nreq;
2217 * Make sure that descriptors are written before
2221 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
2224 spin_unlock_irqrestore(&qp->rq.lock, flags);
2228 void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2229 int index, int *dbd, __be32 *new_wqe)
2231 struct mthca_next_seg *next;
2234 * For SRQs, all receive WQEs generate a CQE, so we're always
2235 * at the end of the doorbell chain.
2237 if (qp->ibqp.srq && !is_send) {
2243 next = get_send_wqe(qp, index);
2245 next = get_recv_wqe(qp, index);
2247 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
2248 if (next->ee_nds & cpu_to_be32(0x3f))
2249 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
2250 (next->ee_nds & cpu_to_be32(0x3f));
2255 int mthca_init_qp_table(struct mthca_dev *dev)
2260 spin_lock_init(&dev->qp_table.lock);
2263 * We reserve 2 extra QPs per port for the special QPs. The
2264 * special QP for port 1 has to be even, so round up.
2266 dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;
2267 err = mthca_alloc_init(&dev->qp_table.alloc,
2268 dev->limits.num_qps,
2270 dev->qp_table.sqp_start +
2271 MTHCA_MAX_PORTS * 2);
2275 err = mthca_array_init(&dev->qp_table.qp,
2276 dev->limits.num_qps);
2278 mthca_alloc_cleanup(&dev->qp_table.alloc);
2282 for (i = 0; i < 2; ++i) {
2283 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
2284 dev->qp_table.sqp_start + i * 2);
2286 mthca_warn(dev, "CONF_SPECIAL_QP returned "
2287 "%d, aborting.\n", err);
2294 for (i = 0; i < 2; ++i)
2295 mthca_CONF_SPECIAL_QP(dev, i, 0);
2297 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2298 mthca_alloc_cleanup(&dev->qp_table.alloc);
2303 void mthca_cleanup_qp_table(struct mthca_dev *dev)
2307 for (i = 0; i < 2; ++i)
2308 mthca_CONF_SPECIAL_QP(dev, i, 0);
2310 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2311 mthca_alloc_cleanup(&dev->qp_table.alloc);