]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branches 'cma', 'cxgb3', 'cxgb4', 'ipoib', 'misc', 'mlx4', 'mlx5', 'nes', ...
authorRoland Dreier <roland@purestorage.com>
Wed, 31 Jul 2013 21:24:06 +0000 (14:24 -0700)
committerRoland Dreier <roland@purestorage.com>
Wed, 31 Jul 2013 21:24:06 +0000 (14:24 -0700)
20 files changed:
drivers/infiniband/core/cma.c
drivers/infiniband/core/mad.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/infiniband/hw/qib/qib_sdma.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_netlink.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
include/linux/mlx5/device.h
include/linux/mlx5/driver.h

index f1c279fabe646f80d3962f3354475087f9c37bf8..7c0f9535fb7d443bf2c4b1647b3494816292020a 100644 (file)
@@ -423,7 +423,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
        struct sockaddr_ib *addr;
        union ib_gid gid, sgid, *dgid;
        u16 pkey, index;
-       u8 port, p;
+       u8 p;
        int i;
 
        cma_dev = NULL;
@@ -443,7 +443,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
                                if (!memcmp(&gid, dgid, sizeof(gid))) {
                                        cma_dev = cur_dev;
                                        sgid = gid;
-                                       port = p;
+                                       id_priv->id.port_num = p;
                                        goto found;
                                }
 
@@ -451,7 +451,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
                                                 dgid->global.subnet_prefix)) {
                                        cma_dev = cur_dev;
                                        sgid = gid;
-                                       port = p;
+                                       id_priv->id.port_num = p;
                                }
                        }
                }
@@ -462,7 +462,6 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
 
 found:
        cma_attach_to_dev(id_priv, cma_dev);
-       id_priv->id.port_num = port;
        addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
        memcpy(&addr->sib_addr, &sgid, sizeof sgid);
        cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
@@ -880,7 +879,8 @@ static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
 {
        struct cma_hdr *hdr;
 
-       if (listen_id->route.addr.src_addr.ss_family == AF_IB) {
+       if ((listen_id->route.addr.src_addr.ss_family == AF_IB) &&
+           (ib_event->event == IB_CM_REQ_RECEIVED)) {
                cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
                return 0;
        }
@@ -2677,29 +2677,32 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
 {
        struct ib_cm_sidr_req_param req;
        struct ib_cm_id *id;
+       void *private_data;
        int offset, ret;
 
+       memset(&req, 0, sizeof req);
        offset = cma_user_data_offset(id_priv);
        req.private_data_len = offset + conn_param->private_data_len;
        if (req.private_data_len < conn_param->private_data_len)
                return -EINVAL;
 
        if (req.private_data_len) {
-               req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
-               if (!req.private_data)
+               private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
+               if (!private_data)
                        return -ENOMEM;
        } else {
-               req.private_data = NULL;
+               private_data = NULL;
        }
 
        if (conn_param->private_data && conn_param->private_data_len)
-               memcpy((void *) req.private_data + offset,
-                      conn_param->private_data, conn_param->private_data_len);
+               memcpy(private_data + offset, conn_param->private_data,
+                      conn_param->private_data_len);
 
-       if (req.private_data) {
-               ret = cma_format_hdr((void *) req.private_data, id_priv);
+       if (private_data) {
+               ret = cma_format_hdr(private_data, id_priv);
                if (ret)
                        goto out;
+               req.private_data = private_data;
        }
 
        id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
@@ -2721,7 +2724,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
                id_priv->cm_id.ib = NULL;
        }
 out:
-       kfree(req.private_data);
+       kfree(private_data);
        return ret;
 }
 
index dc3fd1e8af07f0f8f4c6da455c38e315074a8e1e..4c837e66516b894f88f689d93c9a84c501fa6b47 100644 (file)
@@ -2663,6 +2663,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
        int ret, i;
        struct ib_qp_attr *attr;
        struct ib_qp *qp;
+       u16 pkey_index;
 
        attr = kmalloc(sizeof *attr, GFP_KERNEL);
        if (!attr) {
@@ -2670,6 +2671,11 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
                return -ENOMEM;
        }
 
+       ret = ib_find_pkey(port_priv->device, port_priv->port_num,
+                          IB_DEFAULT_PKEY_FULL, &pkey_index);
+       if (ret)
+               pkey_index = 0;
+
        for (i = 0; i < IB_MAD_QPS_CORE; i++) {
                qp = port_priv->qp_info[i].qp;
                if (!qp)
@@ -2680,7 +2686,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
                 * one is needed for the Reset to Init transition
                 */
                attr->qp_state = IB_QPS_INIT;
-               attr->pkey_index = 0;
+               attr->pkey_index = pkey_index;
                attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
                ret = ib_modify_qp(qp, attr, IB_QP_STATE |
                                             IB_QP_PKEY_INDEX | IB_QP_QKEY);
index e87f2201b220673030ca18a3957642b623d065a7..d2283837d45168070707eba5ac11aab4c2fe9c5e 100644 (file)
@@ -226,6 +226,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
                        mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
                                             sizeof(struct t3_cqe));
                        uresp.memsize = mm->len;
+                       uresp.reserved = 0;
                        resplen = sizeof uresp;
                }
                if (ib_copy_to_udata(udata, &uresp, resplen)) {
index 4d599cedbb0b1ccceab997a4945be0d8d6e89fde..f2a3f48107e71e1d53b0abb61757f1f0ba38bb59 100644 (file)
@@ -1511,8 +1511,14 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
 
        memset(&attr, 0, sizeof attr);
        attr.qp_state = IB_QPS_INIT;
-       attr.pkey_index =
-               to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
+       ret = 0;
+       if (create_tun)
+               ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
+                                             ctx->port, IB_DEFAULT_PKEY_FULL,
+                                             &attr.pkey_index);
+       if (ret || !create_tun)
+               attr.pkey_index =
+                       to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
        attr.qkey = IB_QP1_QKEY;
        attr.port_num = ctx->port;
        ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
index 8000fff4d4444168ac4091a78112593d11b212dd..3f831de9a4d8a901607afc26c81827cc5c1e3bd5 100644 (file)
@@ -619,7 +619,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
 
        resp.tot_uuars = req.total_num_uuars;
        resp.num_ports = dev->mdev.caps.num_ports;
-       err = ib_copy_to_udata(udata, &resp, sizeof(resp));
+       err = ib_copy_to_udata(udata, &resp,
+                              sizeof(resp) - sizeof(resp.reserved));
        if (err)
                goto out_uars;
 
@@ -1426,7 +1427,8 @@ static int init_one(struct pci_dev *pdev,
        if (err)
                goto err_eqs;
 
-       if (ib_register_device(&dev->ib_dev, NULL))
+       err = ib_register_device(&dev->ib_dev, NULL);
+       if (err)
                goto err_rsrc;
 
        err = create_umr_res(dev);
@@ -1434,8 +1436,9 @@ static int init_one(struct pci_dev *pdev,
                goto err_dev;
 
        for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
-               if (device_create_file(&dev->ib_dev.dev,
-                                      mlx5_class_attributes[i]))
+               err = device_create_file(&dev->ib_dev.dev,
+                                        mlx5_class_attributes[i]);
+               if (err)
                        goto err_umrc;
        }
 
index 16ac54c9819f24c113ad02f594a47608a004a828..045f8cdbd303deba81e60c5f1f52330b536b9617 100644 (file)
@@ -199,7 +199,7 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
 
 static int sq_overhead(enum ib_qp_type qp_type)
 {
-       int size;
+       int size = 0;
 
        switch (qp_type) {
        case IB_QPT_XRC_INI:
index 418004c93feb772c641dde6b102620fc2be45900..90200245c5ebfd5fe71000d17b78e52bfa8d4734 100644 (file)
@@ -3570,10 +3570,10 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
        tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
        iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
        nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p,"
-                       " Tcp state = %d, iWARP state = %d\n",
+                       " Tcp state = %s, iWARP state = %s\n",
                        async_event_id,
                        le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
-                       tcp_state, iwarp_state);
+                       nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
 
        aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
        if (aeq_info & NES_AEQE_QP) {
index 8f67fe2e91e623a87878a8790ff9f2e28db0c0b8..5b53ca5a22840c14e31ddb0d7e978dfac5c179b7 100644 (file)
@@ -1384,6 +1384,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
 
                        if (ibpd->uobject) {
                                uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index;
+                               uresp.mmap_rq_db_index = 0;
                                uresp.actual_sq_size = sq_size;
                                uresp.actual_rq_size = rq_size;
                                uresp.qp_id = nesqp->hwqp.qp_id;
@@ -1767,7 +1768,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
                resp.cq_id = nescq->hw_cq.cq_number;
                resp.cq_size = nescq->hw_cq.cq_size;
                resp.mmap_db_index = 0;
-               if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
+               if (ib_copy_to_udata(udata, &resp, sizeof resp - sizeof resp.reserved)) {
                        nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
                        kfree(nescq);
                        return ERR_PTR(-EFAULT);
index a877a8ed79077dbd085d28d1fd834b3f1e01d019..f4c587c68f648055546deafce7f8708798f4d109 100644 (file)
@@ -29,7 +29,6 @@
 #include <net/netevent.h>
 
 #include <rdma/ib_addr.h>
-#include <rdma/ib_cache.h>
 
 #include "ocrdma.h"
 #include "ocrdma_verbs.h"
index dcfbab177faa63eee063a428ac7848e5763f8125..f36630e4b6be182c735b7aa81d357b4207266c82 100644 (file)
@@ -242,6 +242,7 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
        memset(ctx->ah_tbl.va, 0, map_len);
        ctx->ah_tbl.len = map_len;
 
+       memset(&resp, 0, sizeof(resp));
        resp.ah_tbl_len = ctx->ah_tbl.len;
        resp.ah_tbl_page = ctx->ah_tbl.pa;
 
@@ -253,7 +254,6 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
        resp.wqe_size = dev->attr.wqe_size;
        resp.rqe_size = dev->attr.rqe_size;
        resp.dpp_wqe_size = dev->attr.wqe_size;
-       resp.rsvd = 0;
 
        memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
        status = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -338,6 +338,7 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
        struct ocrdma_alloc_pd_uresp rsp;
        struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
 
+       memset(&rsp, 0, sizeof(rsp));
        rsp.id = pd->id;
        rsp.dpp_enabled = pd->dpp_enabled;
        db_page_addr = pd->dev->nic_info.unmapped_db +
@@ -692,6 +693,7 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata,
        struct ocrdma_ucontext *uctx;
        struct ocrdma_create_cq_uresp uresp;
 
+       memset(&uresp, 0, sizeof(uresp));
        uresp.cq_id = cq->id;
        uresp.page_size = cq->len;
        uresp.num_pages = 1;
@@ -1460,6 +1462,7 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
        int status;
        struct ocrdma_create_srq_uresp uresp;
 
+       memset(&uresp, 0, sizeof(uresp));
        uresp.rq_dbid = srq->rq.dbid;
        uresp.num_rq_pages = 1;
        uresp.rq_page_addr[0] = srq->rq.pa;
index 21e8b09d4bf87687a9c569ac87dae42108b7cd8a..016e7429adf66b9ffb945cb4fed168e358c57742 100644 (file)
@@ -1596,6 +1596,8 @@ static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
        struct qib_devdata *dd = ppd->dd;
 
        errs &= QIB_E_P_SDMAERRS;
+       err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
+                  errs, qib_7322p_error_msgs);
 
        if (errs & QIB_E_P_SDMAUNEXPDATA)
                qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
index 32162d355370f95b56970b19da33783052c24c36..9b5322d8cd5accca85737745ba06973ca7212efe 100644 (file)
@@ -717,7 +717,7 @@ void dump_sdma_state(struct qib_pportdata *ppd)
        struct qib_sdma_txreq *txp, *txpnext;
        __le64 *descqp;
        u64 desc[2];
-       dma_addr_t addr;
+       u64 addr;
        u16 gen, dwlen, dwoffset;
        u16 head, tail, cnt;
 
index 2cfa76f5d99eac87bf788eb41bb2a7c3815ec700..196b1d13cbcbc09548e92a4be6c9a18cdc6aae36 100644 (file)
@@ -932,12 +932,47 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
        return 0;
 }
 
+/*
+ * Takes whatever value which is in pkey index 0 and updates priv->pkey
+ * returns 0 if the pkey value was changed.
+ */
+static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
+{
+       int result;
+       u16 prev_pkey;
+
+       prev_pkey = priv->pkey;
+       result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
+       if (result) {
+               ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
+                          priv->port, result);
+               return result;
+       }
+
+       priv->pkey |= 0x8000;
+
+       if (prev_pkey != priv->pkey) {
+               ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
+                         prev_pkey, priv->pkey);
+               /*
+                * Update the pkey in the broadcast address, while making sure to set
+                * the full membership bit, so that we join the right broadcast group.
+                */
+               priv->dev->broadcast[8] = priv->pkey >> 8;
+               priv->dev->broadcast[9] = priv->pkey & 0xff;
+               return 0;
+       }
+
+       return 1;
+}
+
 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
                                enum ipoib_flush_level level)
 {
        struct ipoib_dev_priv *cpriv;
        struct net_device *dev = priv->dev;
        u16 new_index;
+       int result;
 
        mutex_lock(&priv->vlan_mutex);
 
@@ -951,6 +986,10 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
        mutex_unlock(&priv->vlan_mutex);
 
        if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
+               /* for non-child devices must check/update the pkey value here */
+               if (level == IPOIB_FLUSH_HEAVY &&
+                   !test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
+                       update_parent_pkey(priv);
                ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
                return;
        }
@@ -961,21 +1000,32 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
        }
 
        if (level == IPOIB_FLUSH_HEAVY) {
-               if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
-                       clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
-                       ipoib_ib_dev_down(dev, 0);
-                       ipoib_ib_dev_stop(dev, 0);
-                       if (ipoib_pkey_dev_delay_open(dev))
+               /* child devices chase their origin pkey value, while non-child
+                * (parent) devices should always takes what present in pkey index 0
+                */
+               if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+                       if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
+                               clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
+                               ipoib_ib_dev_down(dev, 0);
+                               ipoib_ib_dev_stop(dev, 0);
+                               if (ipoib_pkey_dev_delay_open(dev))
+                                       return;
+                       }
+                       /* restart QP only if P_Key index is changed */
+                       if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
+                           new_index == priv->pkey_index) {
+                               ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
                                return;
+                       }
+                       priv->pkey_index = new_index;
+               } else {
+                       result = update_parent_pkey(priv);
+                       /* restart QP only if P_Key value changed */
+                       if (result) {
+                               ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
+                               return;
+                       }
                }
-
-               /* restart QP only if P_Key index is changed */
-               if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
-                   new_index == priv->pkey_index) {
-                       ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
-                       return;
-               }
-               priv->pkey_index = new_index;
        }
 
        if (level == IPOIB_FLUSH_LIGHT) {
index b6e049a3c7a853b92d5c9d126c61a2d850f290cb..c6f71a88c55ca9649b6098cc81d64de229857055 100644 (file)
@@ -1461,7 +1461,7 @@ static ssize_t create_child(struct device *dev,
        if (sscanf(buf, "%i", &pkey) != 1)
                return -EINVAL;
 
-       if (pkey < 0 || pkey > 0xffff)
+       if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
                return -EINVAL;
 
        /*
index 74685936c9482be96c09a09e5af6f314bdc2b741..f81abe16cf093d6c74ad31dd179f03f7dc90b997 100644 (file)
@@ -119,6 +119,15 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
        } else
                child_pkey  = nla_get_u16(data[IFLA_IPOIB_PKEY]);
 
+       if (child_pkey == 0 || child_pkey == 0x8000)
+               return -EINVAL;
+
+       /*
+        * Set the full membership bit, so that we join the right
+        * broadcast group, etc.
+        */
+       child_pkey |= 0x8000;
+
        err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD);
 
        if (!err && data)
index 205753a04cfcb22e8904dbe2e3e203d82df32552..c571de85d0f995bb2c302210cfbd979b9e1bec9e 100644 (file)
@@ -46,7 +46,7 @@
 #include "mlx5_core.h"
 
 enum {
-       CMD_IF_REV = 3,
+       CMD_IF_REV = 4,
 };
 
 enum {
@@ -282,6 +282,12 @@ const char *mlx5_command_str(int command)
        case MLX5_CMD_OP_TEARDOWN_HCA:
                return "TEARDOWN_HCA";
 
+       case MLX5_CMD_OP_ENABLE_HCA:
+               return "MLX5_CMD_OP_ENABLE_HCA";
+
+       case MLX5_CMD_OP_DISABLE_HCA:
+               return "MLX5_CMD_OP_DISABLE_HCA";
+
        case MLX5_CMD_OP_QUERY_PAGES:
                return "QUERY_PAGES";
 
@@ -1113,7 +1119,13 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
 
        for (i = 0; i < (1 << cmd->log_sz); i++) {
                if (test_bit(i, &vector)) {
+                       struct semaphore *sem;
+
                        ent = cmd->ent_arr[i];
+                       if (ent->page_queue)
+                               sem = &cmd->pages_sem;
+                       else
+                               sem = &cmd->sem;
                        ktime_get_ts(&ent->ts2);
                        memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
                        dump_command(dev, ent, 0);
@@ -1136,10 +1148,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
                        } else {
                                complete(&ent->done);
                        }
-                       if (ent->page_queue)
-                               up(&cmd->pages_sem);
-                       else
-                               up(&cmd->sem);
+                       up(sem);
                }
        }
 }
index 12242de2b0e3ec7950a01f966def239e4b1ce74f..b47739b0b5f6dfb34139ad74e02ac10db1d4fbbd 100644 (file)
@@ -249,6 +249,44 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev)
        return err;
 }
 
+static int mlx5_core_enable_hca(struct mlx5_core_dev *dev)
+{
+       int err;
+       struct mlx5_enable_hca_mbox_in in;
+       struct mlx5_enable_hca_mbox_out out;
+
+       memset(&in, 0, sizeof(in));
+       memset(&out, 0, sizeof(out));
+       in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ENABLE_HCA);
+       err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+       if (err)
+               return err;
+
+       if (out.hdr.status)
+               return mlx5_cmd_status_to_err(&out.hdr);
+
+       return 0;
+}
+
+static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
+{
+       int err;
+       struct mlx5_disable_hca_mbox_in in;
+       struct mlx5_disable_hca_mbox_out out;
+
+       memset(&in, 0, sizeof(in));
+       memset(&out, 0, sizeof(out));
+       in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DISABLE_HCA);
+       err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+       if (err)
+               return err;
+
+       if (out.hdr.status)
+               return mlx5_cmd_status_to_err(&out.hdr);
+
+       return 0;
+}
+
 int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 {
        struct mlx5_priv *priv = &dev->priv;
@@ -304,28 +342,41 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
        }
 
        mlx5_pagealloc_init(dev);
+
+       err = mlx5_core_enable_hca(dev);
+       if (err) {
+               dev_err(&pdev->dev, "enable hca failed\n");
+               goto err_pagealloc_cleanup;
+       }
+
+       err = mlx5_satisfy_startup_pages(dev, 1);
+       if (err) {
+               dev_err(&pdev->dev, "failed to allocate boot pages\n");
+               goto err_disable_hca;
+       }
+
        err = set_hca_ctrl(dev);
        if (err) {
                dev_err(&pdev->dev, "set_hca_ctrl failed\n");
-               goto err_pagealloc_cleanup;
+               goto reclaim_boot_pages;
        }
 
        err = handle_hca_cap(dev);
        if (err) {
                dev_err(&pdev->dev, "handle_hca_cap failed\n");
-               goto err_pagealloc_cleanup;
+               goto reclaim_boot_pages;
        }
 
-       err = mlx5_satisfy_startup_pages(dev);
+       err = mlx5_satisfy_startup_pages(dev, 0);
        if (err) {
-               dev_err(&pdev->dev, "failed to allocate startup pages\n");
-               goto err_pagealloc_cleanup;
+               dev_err(&pdev->dev, "failed to allocate init pages\n");
+               goto reclaim_boot_pages;
        }
 
        err = mlx5_pagealloc_start(dev);
        if (err) {
                dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
-               goto err_reclaim_pages;
+               goto reclaim_boot_pages;
        }
 
        err = mlx5_cmd_init_hca(dev);
@@ -396,9 +447,12 @@ err_stop_poll:
 err_pagealloc_stop:
        mlx5_pagealloc_stop(dev);
 
-err_reclaim_pages:
+reclaim_boot_pages:
        mlx5_reclaim_startup_pages(dev);
 
+err_disable_hca:
+       mlx5_core_disable_hca(dev);
+
 err_pagealloc_cleanup:
        mlx5_pagealloc_cleanup(dev);
        mlx5_cmd_cleanup(dev);
@@ -434,6 +488,7 @@ void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
        mlx5_cmd_teardown_hca(dev);
        mlx5_pagealloc_stop(dev);
        mlx5_reclaim_startup_pages(dev);
+       mlx5_core_disable_hca(dev);
        mlx5_pagealloc_cleanup(dev);
        mlx5_cmd_cleanup(dev);
        iounmap(dev->iseg);
index f0bf46339b2805d745c89eca8a413a5cd002efb8..4a3e137931a38b898468061ae8334016e4e2f23e 100644 (file)
@@ -64,7 +64,7 @@ struct mlx5_query_pages_inbox {
 
 struct mlx5_query_pages_outbox {
        struct mlx5_outbox_hdr  hdr;
-       u8                      reserved[2];
+       __be16                  num_boot_pages;
        __be16                  func_id;
        __be16                  init_pages;
        __be16                  num_pages;
@@ -146,7 +146,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
 }
 
 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
-                               s16 *pages, s16 *init_pages)
+                               s16 *pages, s16 *init_pages, u16 *boot_pages)
 {
        struct mlx5_query_pages_inbox   in;
        struct mlx5_query_pages_outbox  out;
@@ -164,8 +164,13 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
 
        if (pages)
                *pages = be16_to_cpu(out.num_pages);
+
        if (init_pages)
                *init_pages = be16_to_cpu(out.init_pages);
+
+       if (boot_pages)
+               *boot_pages = be16_to_cpu(out.num_boot_pages);
+
        *func_id = be16_to_cpu(out.func_id);
 
        return err;
@@ -357,19 +362,22 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
        queue_work(dev->priv.pg_wq, &req->work);
 }
 
-int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev)
+int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
 {
+       u16 uninitialized_var(boot_pages);
        s16 uninitialized_var(init_pages);
        u16 uninitialized_var(func_id);
        int err;
 
-       err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages);
+       err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages,
+                                  &boot_pages);
        if (err)
                return err;
 
-       mlx5_core_dbg(dev, "requested %d init pages for func_id 0x%x\n", init_pages, func_id);
 
-       return give_pages(dev, func_id, init_pages, 0);
+       mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n",
+                     init_pages, boot_pages, func_id);
+       return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0);
 }
 
 static int optimal_reclaimed_pages(void)
index 8de8d8f22384b9abcd83ce0f76898c7afdf7ded9..737685e9e852e91fd93ccf142500c33773f42e71 100644 (file)
@@ -690,6 +690,26 @@ struct mlx5_query_cq_mbox_out {
        __be64                  pas[0];
 };
 
+struct mlx5_enable_hca_mbox_in {
+       struct mlx5_inbox_hdr   hdr;
+       u8                      rsvd[8];
+};
+
+struct mlx5_enable_hca_mbox_out {
+       struct mlx5_outbox_hdr  hdr;
+       u8                      rsvd[8];
+};
+
+struct mlx5_disable_hca_mbox_in {
+       struct mlx5_inbox_hdr   hdr;
+       u8                      rsvd[8];
+};
+
+struct mlx5_disable_hca_mbox_out {
+       struct mlx5_outbox_hdr  hdr;
+       u8                      rsvd[8];
+};
+
 struct mlx5_eq_context {
        u8                      status;
        u8                      ec_oi;
index f22e4419839b372b9cf6b6535b27cacf721ba9fc..2aa258b0ced1449b8281f6728d6f243d0c72d15f 100644 (file)
@@ -101,6 +101,8 @@ enum {
        MLX5_CMD_OP_QUERY_ADAPTER               = 0x101,
        MLX5_CMD_OP_INIT_HCA                    = 0x102,
        MLX5_CMD_OP_TEARDOWN_HCA                = 0x103,
+       MLX5_CMD_OP_ENABLE_HCA                  = 0x104,
+       MLX5_CMD_OP_DISABLE_HCA                 = 0x105,
        MLX5_CMD_OP_QUERY_PAGES                 = 0x107,
        MLX5_CMD_OP_MANAGE_PAGES                = 0x108,
        MLX5_CMD_OP_SET_HCA_CAP                 = 0x109,
@@ -690,7 +692,7 @@ int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
                                 s16 npages);
-int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev);
+int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
 void mlx5_register_debugfs(void);
 void mlx5_unregister_debugfs(void);