]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branches 'bart-srp', 'generic-errors', 'ira-cleanups' and 'mwang-v8' into k...
authorDoug Ledford <dledford@redhat.com>
Wed, 20 May 2015 20:12:40 +0000 (16:12 -0400)
committerDoug Ledford <dledford@redhat.com>
Wed, 20 May 2015 20:12:40 +0000 (16:12 -0400)
1  2  3  4 
drivers/infiniband/core/cma.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/user_mad.c
drivers/infiniband/core/verbs.c
drivers/infiniband/ulp/srp/ib_srp.c
include/rdma/ib_verbs.h
net/sunrpc/xprtrdma/svc_rdma_transport.c

index 06441a43c3aacd1c6aecb22e15a17529b4a890a8,b2114efcb89e2d1bb05a8b5ddb516cfda4dd56b1,06441a43c3aacd1c6aecb22e15a17529b4a890a8,ea92a0daa61cdc9fd6dc27492bf0dffca1581422..c34d650463bd94e178f1f8cfe789a35c39190194
@@@@@ -65,6 -65,34 -65,6 -65,6 +65,34 @@@@@ MODULE_LICENSE("Dual BSD/GPL")
    #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
    #define CMA_IBOE_PACKET_LIFETIME 18
    
+ ++static const char * const cma_events[] = {
+ ++    [RDMA_CM_EVENT_ADDR_RESOLVED]    = "address resolved",
+ ++    [RDMA_CM_EVENT_ADDR_ERROR]       = "address error",
+ ++    [RDMA_CM_EVENT_ROUTE_RESOLVED]   = "route resolved ",
+ ++    [RDMA_CM_EVENT_ROUTE_ERROR]      = "route error",
+ ++    [RDMA_CM_EVENT_CONNECT_REQUEST]  = "connect request",
+ ++    [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response",
+ ++    [RDMA_CM_EVENT_CONNECT_ERROR]    = "connect error",
+ ++    [RDMA_CM_EVENT_UNREACHABLE]      = "unreachable",
+ ++    [RDMA_CM_EVENT_REJECTED]         = "rejected",
+ ++    [RDMA_CM_EVENT_ESTABLISHED]      = "established",
+ ++    [RDMA_CM_EVENT_DISCONNECTED]     = "disconnected",
+ ++    [RDMA_CM_EVENT_DEVICE_REMOVAL]   = "device removal",
+ ++    [RDMA_CM_EVENT_MULTICAST_JOIN]   = "multicast join",
+ ++    [RDMA_CM_EVENT_MULTICAST_ERROR]  = "multicast error",
+ ++    [RDMA_CM_EVENT_ADDR_CHANGE]      = "address change",
+ ++    [RDMA_CM_EVENT_TIMEWAIT_EXIT]    = "timewait exit",
+ ++};
+ ++
+ ++const char *rdma_event_msg(enum rdma_cm_event_type event)
+ ++{
+ ++    size_t index = event;
+ ++
+ ++    return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ?
+ ++                    cma_events[index] : "unrecognized event";
+ ++}
+ ++EXPORT_SYMBOL(rdma_event_msg);
+ ++
    static void cma_add_one(struct ib_device *device);
    static void cma_remove_one(struct ib_device *device);
    
@@@@@ -349,18 -377,18 -349,18 -349,35 +377,35 @@@@@ static int cma_translate_addr(struct so
        return ret;
    }
    
+++ static inline int cma_validate_port(struct ib_device *device, u8 port,
+++                                   union ib_gid *gid, int dev_type)
+++ {
+++     u8 found_port;
+++     int ret = -ENODEV;
+++ 
+++     if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
+++             return ret;
+++ 
+++     if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
+++             return ret;
+++ 
+++     ret = ib_find_cached_gid(device, gid, &found_port, NULL);
+++     if (port != found_port)
+++             return -ENODEV;
+++ 
+++     return ret;
+++ }
+++ 
    static int cma_acquire_dev(struct rdma_id_private *id_priv,
                           struct rdma_id_private *listen_id_priv)
    {
        struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
        struct cma_device *cma_dev;
---     union ib_gid gid, iboe_gid;
+++     union ib_gid gid, iboe_gid, *gidp;
        int ret = -ENODEV;
---     u8 port, found_port;
---     enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
---             IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
+++     u8 port;
    
---     if (dev_ll != IB_LINK_LAYER_INFINIBAND &&
+++     if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
            id_priv->id.ps == RDMA_PS_IPOIB)
                return -EINVAL;
    
    
        memcpy(&gid, dev_addr->src_dev_addr +
               rdma_addr_gid_offset(dev_addr), sizeof gid);
---     if (listen_id_priv &&
---         rdma_port_get_link_layer(listen_id_priv->id.device,
---                                  listen_id_priv->id.port_num) == dev_ll) {
+++ 
+++     if (listen_id_priv) {
                cma_dev = listen_id_priv->cma_dev;
                port = listen_id_priv->id.port_num;
---             if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
---                 rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
---                     ret = ib_find_cached_gid(cma_dev->device, &iboe_gid,
---                                              &found_port, NULL);
---             else
---                     ret = ib_find_cached_gid(cma_dev->device, &gid,
---                                              &found_port, NULL);
+++             gidp = rdma_protocol_roce(cma_dev->device, port) ?
+++                    &iboe_gid : &gid;
    
---             if (!ret && (port  == found_port)) {
---                     id_priv->id.port_num = found_port;
+++             ret = cma_validate_port(cma_dev->device, port, gidp,
+++                                     dev_addr->dev_type);
+++             if (!ret) {
+++                     id_priv->id.port_num = port;
                        goto out;
                }
        }
+++ 
        list_for_each_entry(cma_dev, &dev_list, list) {
                for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
                        if (listen_id_priv &&
                            listen_id_priv->cma_dev == cma_dev &&
                            listen_id_priv->id.port_num == port)
                                continue;
---                     if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
---                             if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
---                                 rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
---                                     ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL);
---                             else
---                                     ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL);
--- 
---                             if (!ret && (port == found_port)) {
---                                     id_priv->id.port_num = found_port;
---                                     goto out;
---                             }
+++ 
+++                     gidp = rdma_protocol_roce(cma_dev->device, port) ?
+++                            &iboe_gid : &gid;
+++ 
+++                     ret = cma_validate_port(cma_dev->device, port, gidp,
+++                                             dev_addr->dev_type);
+++                     if (!ret) {
+++                             id_priv->id.port_num = port;
+++                             goto out;
                        }
                }
        }
@@@@@ -435,10 -463,10 -435,10 -447,10 +475,10 @@@@@ static int cma_resolve_ib_dev(struct rd
        pkey = ntohs(addr->sib_pkey);
    
        list_for_each_entry(cur_dev, &dev_list, list) {
---             if (rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
---                     continue;
--- 
                for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
+++                     if (!rdma_cap_af_ib(cur_dev->device, p))
+++                             continue;
+++ 
                        if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
                                continue;
    
@@@@@ -633,10 -661,10 -633,10 -645,9 +673,9 @@@@@ static int cma_modify_qp_rtr(struct rdm
        if (ret)
                goto out;
    
---     if (rdma_node_get_transport(id_priv->cma_dev->device->node_type)
---         == RDMA_TRANSPORT_IB &&
---         rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)
---         == IB_LINK_LAYER_ETHERNET) {
+++     BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
+++ 
+++     if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
                ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
    
                if (ret)
@@@@@ -700,11 -728,11 -700,11 -711,10 +739,10 @@@@@ static int cma_ib_init_qp_attr(struct r
        int ret;
        u16 pkey;
    
---     if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) ==
---         IB_LINK_LAYER_INFINIBAND)
---             pkey = ib_addr_get_pkey(dev_addr);
---     else
+++     if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num))
                pkey = 0xffff;
+++     else
+++             pkey = ib_addr_get_pkey(dev_addr);
    
        ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
                                  pkey, &qp_attr->pkey_index);
@@@@@ -735,8 -763,8 -735,8 -745,7 +773,7 @@@@@ int rdma_init_qp_attr(struct rdma_cm_i
        int ret = 0;
    
        id_priv = container_of(id, struct rdma_id_private, id);
---     switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
---     case RDMA_TRANSPORT_IB:
+++     if (rdma_cap_ib_cm(id->device, id->port_num)) {
                if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
                        ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
                else
    
                if (qp_attr->qp_state == IB_QPS_RTR)
                        qp_attr->rq_psn = id_priv->seq_num;
---             break;
---     case RDMA_TRANSPORT_IWARP:
+++     } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
                if (!id_priv->cm_id.iw) {
                        qp_attr->qp_access_flags = 0;
                        *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
                } else
                        ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
                                                 qp_attr_mask);
---             break;
---     default:
+++     } else
                ret = -ENOSYS;
---             break;
---     }
    
        return ret;
    }
@@@@@ -935,13 -963,13 -935,13 -940,9 +968,9 @@@@@ static inline int cma_user_data_offset(
    
    static void cma_cancel_route(struct rdma_id_private *id_priv)
    {
---     switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) {
---     case IB_LINK_LAYER_INFINIBAND:
+++     if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) {
                if (id_priv->query)
                        ib_sa_cancel_query(id_priv->query_id, id_priv->query);
---             break;
---     default:
---             break;
        }
    }
    
@@@@@ -1013,17 -1041,17 -1013,17 -1014,12 +1042,12 @@@@@ static void cma_leave_mc_groups(struct 
                mc = container_of(id_priv->mc_list.next,
                                  struct cma_multicast, list);
                list_del(&mc->list);
---             switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) {
---             case IB_LINK_LAYER_INFINIBAND:
+++             if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
+++                                   id_priv->id.port_num)) {
                        ib_sa_free_multicast(mc->multicast.ib);
                        kfree(mc);
---                     break;
---             case IB_LINK_LAYER_ETHERNET:
+++             } else
                        kref_put(&mc->mcref, release_mc);
---                     break;
---             default:
---                     break;
---             }
        }
    }
    
@@@@@ -1044,17 -1072,17 -1044,17 -1040,12 +1068,12 @@@@@ void rdma_destroy_id(struct rdma_cm_id 
        mutex_unlock(&id_priv->handler_mutex);
    
        if (id_priv->cma_dev) {
---             switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
---             case RDMA_TRANSPORT_IB:
+++             if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
                        if (id_priv->cm_id.ib)
                                ib_destroy_cm_id(id_priv->cm_id.ib);
---                     break;
---             case RDMA_TRANSPORT_IWARP:
+++             } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) {
                        if (id_priv->cm_id.iw)
                                iw_destroy_cm_id(id_priv->cm_id.iw);
---                     break;
---             default:
---                     break;
                }
                cma_leave_mc_groups(id_priv);
                cma_release_dev(id_priv);
@@@@@ -1632,8 -1660,8 -1632,8 -1623,7 +1651,7 @@@@@ static void cma_listen_on_dev(struct rd
        struct rdma_cm_id *id;
        int ret;
    
---     if (cma_family(id_priv) == AF_IB &&
---         rdma_node_get_transport(cma_dev->device->node_type) != RDMA_TRANSPORT_IB)
+++     if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
                return;
    
        id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
@@@@@ -1974,26 -2002,26 -1974,26 -1964,15 +1992,15 @@@@@ int rdma_resolve_route(struct rdma_cm_i
                return -EINVAL;
    
        atomic_inc(&id_priv->refcount);
---     switch (rdma_node_get_transport(id->device->node_type)) {
---     case RDMA_TRANSPORT_IB:
---             switch (rdma_port_get_link_layer(id->device, id->port_num)) {
---             case IB_LINK_LAYER_INFINIBAND:
---                     ret = cma_resolve_ib_route(id_priv, timeout_ms);
---                     break;
---             case IB_LINK_LAYER_ETHERNET:
---                     ret = cma_resolve_iboe_route(id_priv);
---                     break;
---             default:
---                     ret = -ENOSYS;
---             }
---             break;
---     case RDMA_TRANSPORT_IWARP:
+++     if (rdma_cap_ib_sa(id->device, id->port_num))
+++             ret = cma_resolve_ib_route(id_priv, timeout_ms);
+++     else if (rdma_protocol_roce(id->device, id->port_num))
+++             ret = cma_resolve_iboe_route(id_priv);
+++     else if (rdma_protocol_iwarp(id->device, id->port_num))
                ret = cma_resolve_iw_route(id_priv, timeout_ms);
---             break;
---     default:
+++     else
                ret = -ENOSYS;
---             break;
---     }
+++ 
        if (ret)
                goto err;
    
@@@@@ -2035,7 -2063,7 -2035,7 -2014,7 +2042,7 @@@@@ static int cma_bind_loopback(struct rdm
        mutex_lock(&lock);
        list_for_each_entry(cur_dev, &dev_list, list) {
                if (cma_family(id_priv) == AF_IB &&
---                 rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
+++                 !rdma_cap_ib_cm(cur_dev->device, 1))
                        continue;
    
                if (!cma_dev)
@@@@@ -2067,7 -2095,7 -2067,7 -2046,7 +2074,7 @@@@@ port_found
                goto out;
    
        id_priv->id.route.addr.dev_addr.dev_type =
---             (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ?
+++             (rdma_protocol_ib(cma_dev->device, p)) ?
                ARPHRD_INFINIBAND : ARPHRD_ETHER;
    
        rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
@@@@@ -2544,18 -2572,18 -2544,18 -2523,15 +2551,15 @@@@@ int rdma_listen(struct rdma_cm_id *id, 
    
        id_priv->backlog = backlog;
        if (id->device) {
---             switch (rdma_node_get_transport(id->device->node_type)) {
---             case RDMA_TRANSPORT_IB:
+++             if (rdma_cap_ib_cm(id->device, 1)) {
                        ret = cma_ib_listen(id_priv);
                        if (ret)
                                goto err;
---                     break;
---             case RDMA_TRANSPORT_IWARP:
+++             } else if (rdma_cap_iw_cm(id->device, 1)) {
                        ret = cma_iw_listen(id_priv, backlog);
                        if (ret)
                                goto err;
---                     break;
---             default:
+++             } else {
                        ret = -ENOSYS;
                        goto err;
                }
@@@@@ -2891,20 -2919,20 -2891,20 -2867,15 +2895,15 @@@@@ int rdma_connect(struct rdma_cm_id *id
                id_priv->srq = conn_param->srq;
        }
    
---     switch (rdma_node_get_transport(id->device->node_type)) {
---     case RDMA_TRANSPORT_IB:
+++     if (rdma_cap_ib_cm(id->device, id->port_num)) {
                if (id->qp_type == IB_QPT_UD)
                        ret = cma_resolve_ib_udp(id_priv, conn_param);
                else
                        ret = cma_connect_ib(id_priv, conn_param);
---             break;
---     case RDMA_TRANSPORT_IWARP:
+++     } else if (rdma_cap_iw_cm(id->device, id->port_num))
                ret = cma_connect_iw(id_priv, conn_param);
---             break;
---     default:
+++     else
                ret = -ENOSYS;
---             break;
---     }
        if (ret)
                goto err;
    
@@@@@ -3007,8 -3035,8 -3007,8 -2978,7 +3006,7 @@@@@ int rdma_accept(struct rdma_cm_id *id, 
                id_priv->srq = conn_param->srq;
        }
    
---     switch (rdma_node_get_transport(id->device->node_type)) {
---     case RDMA_TRANSPORT_IB:
+++     if (rdma_cap_ib_cm(id->device, id->port_num)) {
                if (id->qp_type == IB_QPT_UD) {
                        if (conn_param)
                                ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
                        else
                                ret = cma_rep_recv(id_priv);
                }
---             break;
---     case RDMA_TRANSPORT_IWARP:
+++     } else if (rdma_cap_iw_cm(id->device, id->port_num))
                ret = cma_accept_iw(id_priv, conn_param);
---             break;
---     default:
+++     else
                ret = -ENOSYS;
---             break;
---     }
    
        if (ret)
                goto reject;
@@@@@ -3075,8 -3103,8 -3075,8 -3041,7 +3069,7 @@@@@ int rdma_reject(struct rdma_cm_id *id, 
        if (!id_priv->cm_id.ib)
                return -EINVAL;
    
---     switch (rdma_node_get_transport(id->device->node_type)) {
---     case RDMA_TRANSPORT_IB:
+++     if (rdma_cap_ib_cm(id->device, id->port_num)) {
                if (id->qp_type == IB_QPT_UD)
                        ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
                                                private_data, private_data_len);
                        ret = ib_send_cm_rej(id_priv->cm_id.ib,
                                             IB_CM_REJ_CONSUMER_DEFINED, NULL,
                                             0, private_data, private_data_len);
---             break;
---     case RDMA_TRANSPORT_IWARP:
+++     } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
                ret = iw_cm_reject(id_priv->cm_id.iw,
                                   private_data, private_data_len);
---             break;
---     default:
+++     } else
                ret = -ENOSYS;
---             break;
---     }
+++ 
        return ret;
    }
    EXPORT_SYMBOL(rdma_reject);
@@@@@ -3106,22 -3134,22 -3106,22 -3068,18 +3096,18 @@@@@ int rdma_disconnect(struct rdma_cm_id *
        if (!id_priv->cm_id.ib)
                return -EINVAL;
    
---     switch (rdma_node_get_transport(id->device->node_type)) {
---     case RDMA_TRANSPORT_IB:
+++     if (rdma_cap_ib_cm(id->device, id->port_num)) {
                ret = cma_modify_qp_err(id_priv);
                if (ret)
                        goto out;
                /* Initiate or respond to a disconnect. */
                if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
                        ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
---             break;
---     case RDMA_TRANSPORT_IWARP:
+++     } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
                ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
---             break;
---     default:
+++     } else
                ret = -EINVAL;
---             break;
---     }
+++ 
    out:
        return ret;
    }
@@@@@ -3367,24 -3395,24 -3367,24 -3325,13 +3353,13 @@@@@ int rdma_join_multicast(struct rdma_cm_
        list_add(&mc->list, &id_priv->mc_list);
        spin_unlock(&id_priv->lock);
    
---     switch (rdma_node_get_transport(id->device->node_type)) {
---     case RDMA_TRANSPORT_IB:
---             switch (rdma_port_get_link_layer(id->device, id->port_num)) {
---             case IB_LINK_LAYER_INFINIBAND:
---                     ret = cma_join_ib_multicast(id_priv, mc);
---                     break;
---             case IB_LINK_LAYER_ETHERNET:
---                     kref_init(&mc->mcref);
---                     ret = cma_iboe_join_multicast(id_priv, mc);
---                     break;
---             default:
---                     ret = -EINVAL;
---             }
---             break;
---     default:
+++     if (rdma_protocol_roce(id->device, id->port_num)) {
+++             kref_init(&mc->mcref);
+++             ret = cma_iboe_join_multicast(id_priv, mc);
+++     } else if (rdma_cap_ib_mcast(id->device, id->port_num))
+++             ret = cma_join_ib_multicast(id_priv, mc);
+++     else
                ret = -ENOSYS;
---             break;
---     }
    
        if (ret) {
                spin_lock_irq(&id_priv->lock);
@@@@@ -3412,19 -3440,19 -3412,19 -3359,15 +3387,15 @@@@@ void rdma_leave_multicast(struct rdma_c
                                ib_detach_mcast(id->qp,
                                                &mc->multicast.ib->rec.mgid,
                                                be16_to_cpu(mc->multicast.ib->rec.mlid));
---                     if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
---                             switch (rdma_port_get_link_layer(id->device, id->port_num)) {
---                             case IB_LINK_LAYER_INFINIBAND:
---                                     ib_sa_free_multicast(mc->multicast.ib);
---                                     kfree(mc);
---                                     break;
---                             case IB_LINK_LAYER_ETHERNET:
---                                     kref_put(&mc->mcref, release_mc);
---                                     break;
---                             default:
---                                     break;
---                             }
---                     }
+++ 
+++                     BUG_ON(id_priv->cma_dev->device != id->device);
+++ 
+++                     if (rdma_cap_ib_mcast(id->device, id->port_num)) {
+++                             ib_sa_free_multicast(mc->multicast.ib);
+++                             kfree(mc);
+++                     } else if (rdma_protocol_roce(id->device, id->port_num))
+++                             kref_put(&mc->mcref, release_mc);
+++ 
                        return;
                }
        }
index 74c30f4c557e015df74ec153417e09d626f8da2e,74c30f4c557e015df74ec153417e09d626f8da2e,87e222ec7ee1fd8a41dad1c231f17f4e628dee3a,e9699c9942a99ca75e3ee5bddc6146a04795994d..600af266838ce6dc24698223066afd02b379f148
@@@@@ -179,12 -179,12 -179,12 -179,12 +179,12 @@@@@ static int is_vendor_method_in_use
        return 0;
    }
    
-- -int ib_response_mad(struct ib_mad *mad)
++ +int ib_response_mad(const struct ib_mad_hdr *hdr)
    {
-- -    return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
-- -            (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
-- -            ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
-- -             (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
++ +    return ((hdr->method & IB_MGMT_METHOD_RESP) ||
++ +            (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
++ +            ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
++ +             (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
    }
    EXPORT_SYMBOL(ib_response_mad);
    
@@@@@ -791,7 -791,7 -791,7 -791,7 +791,7 @@@@@ static int handle_outgoing_dr_smp(struc
        switch (ret)
        {
        case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
-- -            if (ib_response_mad(&mad_priv->mad.mad) &&
++ +            if (ib_response_mad(&mad_priv->mad.mad.mad_hdr) &&
                    mad_agent_priv->agent.recv_handler) {
                        local->mad_priv = mad_priv;
                        local->recv_mad_agent = mad_agent_priv;
@@@@@ -910,7 -910,7 -910,7 -910,7 +910,7 @@@@@ static int alloc_send_rmpp_list(struct 
        return 0;
    }
    
-- -int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent)
++ +int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
    {
        return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
    }
@@@@@ -1628,7 -1628,7 -1628,7 -1628,7 +1628,7 @@@@@ find_mad_agent(struct ib_mad_port_priva
        unsigned long flags;
    
        spin_lock_irqsave(&port_priv->reg_lock, flags);
-- -    if (ib_response_mad(mad)) {
++ +    if (ib_response_mad(&mad->mad_hdr)) {
                u32 hi_tid;
                struct ib_mad_agent_private *entry;
    
        return mad_agent;
    }
    
-- -static int validate_mad(struct ib_mad *mad, u32 qp_num)
++ +static int validate_mad(const struct ib_mad_hdr *mad_hdr, u32 qp_num)
    {
        int valid = 0;
    
        /* Make sure MAD base version is understood */
-- -    if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
++ +    if (mad_hdr->base_version != IB_MGMT_BASE_VERSION) {
                pr_err("MAD received with unsupported base version %d\n",
-- -                    mad->mad_hdr.base_version);
++ +                    mad_hdr->base_version);
                goto out;
        }
    
        /* Filter SMI packets sent to other than QP0 */
-- -    if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
-- -        (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
++ +    if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
++ +        (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
                if (qp_num == 0)
                        valid = 1;
        } else {
        return valid;
    }
    
-- -static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
-- -                   struct ib_mad_hdr *mad_hdr)
++ +static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
++ +                        const struct ib_mad_hdr *mad_hdr)
    {
        struct ib_rmpp_mad *rmpp_mad;
    
                (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
    }
    
-- -static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
-- -                                 struct ib_mad_recv_wc *rwc)
++ +static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
++ +                                 const struct ib_mad_recv_wc *rwc)
    {
-- -    return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
++ +    return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
                rwc->recv_buf.mad->mad_hdr.mgmt_class;
    }
    
-- -static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
-- -                               struct ib_mad_send_wr_private *wr,
-- -                               struct ib_mad_recv_wc *rwc )
++ +static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
++ +                               const struct ib_mad_send_wr_private *wr,
++ +                               const struct ib_mad_recv_wc *rwc )
    {
        struct ib_ah_attr attr;
        u8 send_resp, rcv_resp;
        u8 port_num = mad_agent_priv->agent.port_num;
        u8 lmc;
    
-- -    send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
-- -    rcv_resp = ib_response_mad(rwc->recv_buf.mad);
++ +    send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
++ +    rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
    
        if (send_resp == rcv_resp)
                /* both requests, or both responses. GIDs different */
@@@@@ -1811,8 -1811,8 -1811,8 -1811,8 +1811,8 @@@@@ static inline int is_direct(u8 class
    }
    
    struct ib_mad_send_wr_private*
-- -ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
-- -             struct ib_mad_recv_wc *wc)
++ +ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
++ +             const struct ib_mad_recv_wc *wc)
    {
        struct ib_mad_send_wr_private *wr;
        struct ib_mad *mad;
         * been notified that the send has completed
         */
        list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
-- -            if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
++ +            if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
                    wr->tid == mad->mad_hdr.tid &&
                    wr->timeout &&
                    rcv_has_same_class(wr, wc) &&
@@@@@ -1879,7 -1879,7 -1879,7 -1879,7 +1879,7 @@@@@ static void ib_mad_complete_recv(struc
        }
    
        /* Complete corresponding request */
-- -    if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
++ +    if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
                spin_lock_irqsave(&mad_agent_priv->lock, flags);
                mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
                if (!mad_send_wr) {
@@@@@ -1979,7 -1979,7 -1979,7 -1979,7 +1979,7 @@@@@ static void ib_mad_recv_done_handler(st
                snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
    
        /* Validate MAD */
-- -    if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
++ +    if (!validate_mad(&recv->mad.mad.mad_hdr, qp_info->qp->qp_num))
                goto out;
    
        response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
@@@@@ -2411,7 -2411,7 -2411,8 -2411,7 +2411,8 @@@@@ find_send_wr(struct ib_mad_agent_privat
    
        list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
                            agent_list) {
-- -            if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
++ +            if (is_rmpp_data_mad(mad_agent_priv,
++ +                                 mad_send_wr->send_buf.mad) &&
                    &mad_send_wr->send_buf == send_buf)
                        return mad_send_wr;
        }
@@@@@ -2938,7 -2938,7 -2939,7 -2938,7 +2939,7 @@@@@ static int ib_mad_port_open(struct ib_d
        init_mad_qp(port_priv, &port_priv->qp_info[1]);
    
        cq_size = mad_sendq_size + mad_recvq_size;
---     has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
+++     has_smi = rdma_cap_ib_smi(device, port_num);
        if (has_smi)
                cq_size *= 2;
    
@@@@@ -3057,9 -3057,9 -3058,9 -3057,6 +3058,6 @@@@@ static void ib_mad_init_device(struct i
    {
        int start, end, i;
    
---     if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
---             return;
--- 
        if (device->node_type == RDMA_NODE_IB_SWITCH) {
                start = 0;
                end   = 0;
        }
    
        for (i = start; i <= end; i++) {
+++             if (!rdma_cap_ib_mad(device, i))
+++                     continue;
+++ 
                if (ib_mad_port_open(device, i)) {
                        dev_err(&device->dev, "Couldn't open port %d\n", i);
                        goto error;
@@@@@ -3086,40 -3086,40 -3087,40 -3086,39 +3087,39 @@@@@ error_agent
                dev_err(&device->dev, "Couldn't close port %d\n", i);
    
    error:
---     i--;
+++     while (--i >= start) {
+++             if (!rdma_cap_ib_mad(device, i))
+++                     continue;
    
---     while (i >= start) {
                if (ib_agent_port_close(device, i))
                        dev_err(&device->dev,
                                "Couldn't close port %d for agents\n", i);
                if (ib_mad_port_close(device, i))
                        dev_err(&device->dev, "Couldn't close port %d\n", i);
---             i--;
        }
    }
    
    static void ib_mad_remove_device(struct ib_device *device)
    {
---     int i, num_ports, cur_port;
--- 
---     if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
---             return;
+++     int start, end, i;
    
        if (device->node_type == RDMA_NODE_IB_SWITCH) {
---             num_ports = 1;
---             cur_port = 0;
+++             start = 0;
+++             end   = 0;
        } else {
---             num_ports = device->phys_port_cnt;
---             cur_port = 1;
+++             start = 1;
+++             end   = device->phys_port_cnt;
        }
---     for (i = 0; i < num_ports; i++, cur_port++) {
---             if (ib_agent_port_close(device, cur_port))
+++ 
+++     for (i = start; i <= end; i++) {
+++             if (!rdma_cap_ib_mad(device, i))
+++                     continue;
+++ 
+++             if (ib_agent_port_close(device, i))
                        dev_err(&device->dev,
---                             "Couldn't close port %d for agents\n",
---                             cur_port);
---             if (ib_mad_port_close(device, cur_port))
---                     dev_err(&device->dev, "Couldn't close port %d\n",
---                             cur_port);
+++                             "Couldn't close port %d for agents\n", i);
+++             if (ib_mad_port_close(device, i))
+++                     dev_err(&device->dev, "Couldn't close port %d\n", i);
        }
    }
    
index 928cdd20e2d11a1abd7c0afb2125f5e7cb6bcc6d,928cdd20e2d11a1abd7c0afb2125f5e7cb6bcc6d,66b5217841beec8622a151d296867f1203f86833,278cfaee9a94fc24822bd64aa507c4a2549a1a0b..e58d701b779110eb29e20539b2c77ec5d96abc93
@@@@@ -99,7 -99,7 -99,7 -99,6 +99,6 @@@@@ struct ib_umad_port 
    };
    
    struct ib_umad_device {
---     int                  start_port, end_port;
        struct kobject       kobj;
        struct ib_umad_port  port[0];
    };
@@@@@ -426,11 -426,11 -426,11 -425,11 +425,11 @@@@@ static int is_duplicate(struct ib_umad_
                 * the same TID, reject the second as a duplicate.  This is more
                 * restrictive than required by the spec.
                 */
-- -            if (!ib_response_mad((struct ib_mad *) hdr)) {
-- -                    if (!ib_response_mad((struct ib_mad *) sent_hdr))
++ +            if (!ib_response_mad(hdr)) {
++ +                    if (!ib_response_mad(sent_hdr))
                                return 1;
                        continue;
-- -            } else if (!ib_response_mad((struct ib_mad *) sent_hdr))
++ +            } else if (!ib_response_mad(sent_hdr))
                        continue;
    
                if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr))
@@@@@ -1273,16 -1273,16 -1273,16 -1272,10 +1272,10 @@@@@ static void ib_umad_add_one(struct ib_d
    {
        struct ib_umad_device *umad_dev;
        int s, e, i;
+++     int count = 0;
    
---     if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
---             return;
--- 
---     if (device->node_type == RDMA_NODE_IB_SWITCH)
---             s = e = 0;
---     else {
---             s = 1;
---             e = device->phys_port_cnt;
---     }
+++     s = rdma_start_port(device);
+++     e = rdma_end_port(device);
    
        umad_dev = kzalloc(sizeof *umad_dev +
                           (e - s + 1) * sizeof (struct ib_umad_port),
    
        kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype);
    
---     umad_dev->start_port = s;
---     umad_dev->end_port   = e;
--- 
        for (i = s; i <= e; ++i) {
+++             if (!rdma_cap_ib_mad(device, i))
+++                     continue;
+++ 
                umad_dev->port[i - s].umad_dev = umad_dev;
    
                if (ib_umad_init_port(device, i, umad_dev,
                                      &umad_dev->port[i - s]))
                        goto err;
+++ 
+++             count++;
        }
    
+++     if (!count)
+++             goto free;
+++ 
        ib_set_client_data(device, &umad_client, umad_dev);
    
        return;
    
    err:
---     while (--i >= s)
---             ib_umad_kill_port(&umad_dev->port[i - s]);
+++     while (--i >= s) {
+++             if (!rdma_cap_ib_mad(device, i))
+++                     continue;
    
+++             ib_umad_kill_port(&umad_dev->port[i - s]);
+++     }
+++ free:
        kobject_put(&umad_dev->kobj);
    }
    
@@@@@ -1322,8 -1322,8 -1322,8 -1324,10 +1324,10 @@@@@ static void ib_umad_remove_one(struct i
        if (!umad_dev)
                return;
    
---     for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i)
---             ib_umad_kill_port(&umad_dev->port[i]);
+++     for (i = 0; i <= rdma_end_port(device) - rdma_start_port(device); ++i) {
+++             if (rdma_cap_ib_mad(device, i + rdma_start_port(device)))
+++                     ib_umad_kill_port(&umad_dev->port[i]);
+++     }
    
        kobject_put(&umad_dev->kobj);
    }
index f93eb8da7b5ad443900c3b8b423da505d0531a95,4c01a34512daa533ed267c5fcd674aacab6c5e11,f93eb8da7b5ad443900c3b8b423da505d0531a95,d110a5eb77a820ec62187f7f69a40242b59d52bf..685a362f6ed6e7822d2707904292153e0c5c0f10
    
    #include "core_priv.h"
    
+ ++static const char * const ib_events[] = {
+ ++    [IB_EVENT_CQ_ERR]               = "CQ error",
+ ++    [IB_EVENT_QP_FATAL]             = "QP fatal error",
+ ++    [IB_EVENT_QP_REQ_ERR]           = "QP request error",
+ ++    [IB_EVENT_QP_ACCESS_ERR]        = "QP access error",
+ ++    [IB_EVENT_COMM_EST]             = "communication established",
+ ++    [IB_EVENT_SQ_DRAINED]           = "send queue drained",
+ ++    [IB_EVENT_PATH_MIG]             = "path migration successful",
+ ++    [IB_EVENT_PATH_MIG_ERR]         = "path migration error",
+ ++    [IB_EVENT_DEVICE_FATAL]         = "device fatal error",
+ ++    [IB_EVENT_PORT_ACTIVE]          = "port active",
+ ++    [IB_EVENT_PORT_ERR]             = "port error",
+ ++    [IB_EVENT_LID_CHANGE]           = "LID change",
+ ++    [IB_EVENT_PKEY_CHANGE]          = "P_key change",
+ ++    [IB_EVENT_SM_CHANGE]            = "SM change",
+ ++    [IB_EVENT_SRQ_ERR]              = "SRQ error",
+ ++    [IB_EVENT_SRQ_LIMIT_REACHED]    = "SRQ limit reached",
+ ++    [IB_EVENT_QP_LAST_WQE_REACHED]  = "last WQE reached",
+ ++    [IB_EVENT_CLIENT_REREGISTER]    = "client reregister",
+ ++    [IB_EVENT_GID_CHANGE]           = "GID changed",
+ ++};
+ ++
+ ++const char *ib_event_msg(enum ib_event_type event)
+ ++{
+ ++    size_t index = event;
+ ++
+ ++    return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
+ ++                    ib_events[index] : "unrecognized event";
+ ++}
+ ++EXPORT_SYMBOL(ib_event_msg);
+ ++
+ ++static const char * const wc_statuses[] = {
+ ++    [IB_WC_SUCCESS]                 = "success",
+ ++    [IB_WC_LOC_LEN_ERR]             = "local length error",
+ ++    [IB_WC_LOC_QP_OP_ERR]           = "local QP operation error",
+ ++    [IB_WC_LOC_EEC_OP_ERR]          = "local EE context operation error",
+ ++    [IB_WC_LOC_PROT_ERR]            = "local protection error",
+ ++    [IB_WC_WR_FLUSH_ERR]            = "WR flushed",
+ ++    [IB_WC_MW_BIND_ERR]             = "memory management operation error",
+ ++    [IB_WC_BAD_RESP_ERR]            = "bad response error",
+ ++    [IB_WC_LOC_ACCESS_ERR]          = "local access error",
+ ++    [IB_WC_REM_INV_REQ_ERR]         = "invalid request error",
+ ++    [IB_WC_REM_ACCESS_ERR]          = "remote access error",
+ ++    [IB_WC_REM_OP_ERR]              = "remote operation error",
+ ++    [IB_WC_RETRY_EXC_ERR]           = "transport retry counter exceeded",
+ ++    [IB_WC_RNR_RETRY_EXC_ERR]       = "RNR retry counter exceeded",
+ ++    [IB_WC_LOC_RDD_VIOL_ERR]        = "local RDD violation error",
+ ++    [IB_WC_REM_INV_RD_REQ_ERR]      = "remote invalid RD request",
+ ++    [IB_WC_REM_ABORT_ERR]           = "operation aborted",
+ ++    [IB_WC_INV_EECN_ERR]            = "invalid EE context number",
+ ++    [IB_WC_INV_EEC_STATE_ERR]       = "invalid EE context state",
+ ++    [IB_WC_FATAL_ERR]               = "fatal error",
+ ++    [IB_WC_RESP_TIMEOUT_ERR]        = "response timeout error",
+ ++    [IB_WC_GENERAL_ERR]             = "general error",
+ ++};
+ ++
+ ++const char *ib_wc_status_msg(enum ib_wc_status status)
+ ++{
+ ++    size_t index = status;
+ ++
+ ++    return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
+ ++                    wc_statuses[index] : "unrecognized status";
+ ++}
+ ++EXPORT_SYMBOL(ib_wc_status_msg);
+ ++
    __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
    {
        switch (rate) {
@@@@@ -198,11 -263,11 -198,11 -198,9 +263,9 @@@@@ int ib_init_ah_from_wc(struct ib_devic
        u32 flow_class;
        u16 gid_index;
        int ret;
---     int is_eth = (rdma_port_get_link_layer(device, port_num) ==
---                     IB_LINK_LAYER_ETHERNET);
    
        memset(ah_attr, 0, sizeof *ah_attr);
---     if (is_eth) {
+++     if (rdma_cap_eth_ah(device, port_num)) {
                if (!(wc->wc_flags & IB_WC_GRH))
                        return -EPROTOTYPE;
    
@@@@@ -871,7 -936,7 -871,7 -869,7 +934,7 @@@@@ int ib_resolve_eth_l2_attrs(struct ib_q
        union ib_gid  sgid;
    
        if ((*qp_attr_mask & IB_QP_AV)  &&
---         (rdma_port_get_link_layer(qp->device, qp_attr->ah_attr.port_num) == IB_LINK_LAYER_ETHERNET)) {
+++         (rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))) {
                ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num,
                                   qp_attr->ah_attr.grh.sgid_index, &sgid);
                if (ret)
index 06d91e7e4163ce2e0a86424a12b66ceb549f28ef,667df9d423eff1168020bfc6196a66324604b5f7,918814cd0f806f5344e5f293e2bb059010237727,918814cd0f806f5344e5f293e2bb059010237727..c3f654d20038eaab65ee66da2924e7404ced8c52
    #define DRV_RELDATE "July 1, 2013"
    
    MODULE_AUTHOR("Roland Dreier");
 ---MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
 ---               "v" DRV_VERSION " (" DRV_RELDATE ")");
 +++MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
    MODULE_LICENSE("Dual BSD/GPL");
 +++MODULE_VERSION(DRV_VERSION);
 +++MODULE_INFO(release_date, DRV_RELDATE);
    
    static unsigned int srp_sg_tablesize;
    static unsigned int cmd_sg_entries;
@@@@@ -254,7 -253,8 -253,7 -253,7 +254,8 @@@@@ static void srp_free_iu(struct srp_hos
    
    static void srp_qp_event(struct ib_event *event, void *context)
    {
- --    pr_debug("QP event %d\n", event->event);
+ ++    pr_debug("QP event %s (%d)\n",
+ ++             ib_event_msg(event->event), event->event);
    }
    
    static int srp_init_qp(struct srp_target_port *target,
@@@@@ -466,13 -466,14 -465,14 -465,14 +467,13 @@@@@ static struct srp_fr_pool *srp_alloc_fr
     */
    static void srp_destroy_qp(struct srp_rdma_ch *ch)
    {
 ---    struct srp_target_port *target = ch->target;
        static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
        static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
        struct ib_recv_wr *bad_wr;
        int ret;
    
        /* Destroying a QP and reusing ch->done is only safe if not connected */
 ---    WARN_ON_ONCE(target->connected);
 +++    WARN_ON_ONCE(ch->connected);
    
        ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
        WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
@@@@@ -781,7 -782,7 -781,7 -781,7 +782,7 @@@@@ static int srp_send_req(struct srp_rdma
                shost_printk(KERN_DEBUG, target->scsi_host,
                             PFX "Topspin/Cisco initiator port ID workaround "
                             "activated for target GUID %016llx\n",
 ---                         (unsigned long long) be64_to_cpu(target->ioc_guid));
 +++                         be64_to_cpu(target->ioc_guid));
                memset(req->priv.initiator_port_id, 0, 8);
                memcpy(req->priv.initiator_port_id + 8,
                       &target->srp_host->srp_dev->dev->node_guid, 8);
@@@@@ -811,19 -812,35 -811,35 -811,35 +812,19 @@@@@ static bool srp_queue_remove_work(struc
        return changed;
    }
    
 ---static bool srp_change_conn_state(struct srp_target_port *target,
 ---                              bool connected)
 ---{
 ---    bool changed = false;
 ---
 ---    spin_lock_irq(&target->lock);
 ---    if (target->connected != connected) {
 ---            target->connected = connected;
 ---            changed = true;
 ---    }
 ---    spin_unlock_irq(&target->lock);
 ---
 ---    return changed;
 ---}
 ---
    static void srp_disconnect_target(struct srp_target_port *target)
    {
        struct srp_rdma_ch *ch;
        int i;
    
 ---    if (srp_change_conn_state(target, false)) {
 ---            /* XXX should send SRP_I_LOGOUT request */
 +++    /* XXX should send SRP_I_LOGOUT request */
    
 ---            for (i = 0; i < target->ch_count; i++) {
 ---                    ch = &target->ch[i];
 ---                    if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
 ---                            shost_printk(KERN_DEBUG, target->scsi_host,
 ---                                         PFX "Sending CM DREQ failed\n");
 ---                    }
 +++    for (i = 0; i < target->ch_count; i++) {
 +++            ch = &target->ch[i];
 +++            ch->connected = false;
 +++            if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
 +++                    shost_printk(KERN_DEBUG, target->scsi_host,
 +++                                 PFX "Sending CM DREQ failed\n");
                }
        }
    }
@@@@@ -836,7 -853,7 -852,7 -852,7 +837,7 @@@@@ static void srp_free_req_data(struct sr
        struct srp_request *req;
        int i;
    
 ---    if (!ch->target || !ch->req_ring)
 +++    if (!ch->req_ring)
                return;
    
        for (i = 0; i < target->req_ring_size; ++i) {
@@@@@ -970,26 -987,14 -986,14 -986,14 +971,26 @@@@@ static void srp_rport_delete(struct srp
        srp_queue_remove_work(target);
    }
    
 +++/**
 +++ * srp_connected_ch() - number of connected channels
 +++ * @target: SRP target port.
 +++ */
 +++static int srp_connected_ch(struct srp_target_port *target)
 +++{
 +++    int i, c = 0;
 +++
 +++    for (i = 0; i < target->ch_count; i++)
 +++            c += target->ch[i].connected;
 +++
 +++    return c;
 +++}
 +++
    static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
    {
        struct srp_target_port *target = ch->target;
        int ret;
    
 ---    WARN_ON_ONCE(!multich && target->connected);
 ---
 ---    target->qp_in_error = false;
 +++    WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
    
        ret = srp_lookup_path(ch);
        if (ret)
                 */
                switch (ch->status) {
                case 0:
 ---                    srp_change_conn_state(target, true);
 +++                    ch->connected = true;
                        return 0;
    
                case SRP_PORT_REDIRECT:
@@@@@ -1210,10 -1215,14 -1214,14 -1214,14 +1211,10 @@@@@ static int srp_rport_reconnect(struct s
         */
        for (i = 0; i < target->ch_count; i++) {
                ch = &target->ch[i];
 ---            if (!ch->target)
 ---                    break;
                ret += srp_new_cm_id(ch);
        }
        for (i = 0; i < target->ch_count; i++) {
                ch = &target->ch[i];
 ---            if (!ch->target)
 ---                    break;
                for (j = 0; j < target->req_ring_size; ++j) {
                        struct srp_request *req = &ch->req_ring[j];
    
        }
        for (i = 0; i < target->ch_count; i++) {
                ch = &target->ch[i];
 ---            if (!ch->target)
 ---                    break;
                /*
                 * Whether or not creating a new CM ID succeeded, create a new
                 * QP. This guarantees that all completion callback function
                for (j = 0; j < target->queue_size; ++j)
                        list_add(&ch->tx_ring[j]->list, &ch->free_tx);
        }
 +++
 +++    target->qp_in_error = false;
 +++
        for (i = 0; i < target->ch_count; i++) {
                ch = &target->ch[i];
 ---            if (ret || !ch->target) {
 ---                    if (i > 1)
 ---                            ret = 0;
 +++            if (ret)
                        break;
 ---            }
                ret = srp_connect_ch(ch, multich);
                multich = true;
        }
@@@@@ -1832,7 -1843,7 -1842,7 -1842,7 +1833,7 @@@@@ static void srp_process_aer_req(struct 
        s32 delta = be32_to_cpu(req->req_lim_delta);
    
        shost_printk(KERN_ERR, target->scsi_host, PFX
 ---                 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
 +++                 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
    
        if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
                shost_printk(KERN_ERR, target->scsi_host, PFX
@@@@@ -1919,20 -1930,21 -1929,20 -1929,20 +1920,21 @@@@@ static void srp_handle_qp_err(u64 wr_id
                return;
        }
    
 ---    if (target->connected && !target->qp_in_error) {
 +++    if (ch->connected && !target->qp_in_error) {
                if (wr_id & LOCAL_INV_WR_ID_MASK) {
                        shost_printk(KERN_ERR, target->scsi_host, PFX
- --                                 "LOCAL_INV failed with status %d\n",
- --                                 wc_status);
+ ++                                 "LOCAL_INV failed with status %s (%d)\n",
+ ++                                 ib_wc_status_msg(wc_status), wc_status);
                } else if (wr_id & FAST_REG_WR_ID_MASK) {
                        shost_printk(KERN_ERR, target->scsi_host, PFX
- --                                 "FAST_REG_MR failed status %d\n",
- --                                 wc_status);
+ ++                                 "FAST_REG_MR failed status %s (%d)\n",
+ ++                                 ib_wc_status_msg(wc_status), wc_status);
                } else {
                        shost_printk(KERN_ERR, target->scsi_host,
- --                                 PFX "failed %s status %d for iu %p\n",
+ ++                                 PFX "failed %s status %s (%d) for iu %p\n",
                                     send_err ? "send" : "receive",
- --                                 wc_status, (void *)(uintptr_t)wr_id);
+ ++                                 ib_wc_status_msg(wc_status), wc_status,
+ ++                                 (void *)(uintptr_t)wr_id);
                }
                queue_work(system_long_wq, &target->tl_err_work);
        }
@@@@@ -2024,7 -2036,7 -2034,7 -2034,7 +2026,7 @@@@@ static int srp_queuecommand(struct Scsi
        memset(cmd, 0, sizeof *cmd);
    
        cmd->opcode = SRP_CMD;
 ---    cmd->lun    = cpu_to_be64((u64) scmnd->device->lun << 48);
 +++    int_to_scsilun(scmnd->device->lun, &cmd->lun);
        cmd->tag    = tag;
        memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
    
@@@@@ -2357,7 -2369,7 -2367,7 -2367,7 +2359,7 @@@@@ static int srp_cm_handler(struct ib_cm_
        case IB_CM_DREQ_RECEIVED:
                shost_printk(KERN_WARNING, target->scsi_host,
                             PFX "DREQ received - connection closed\n");
 ---            srp_change_conn_state(target, false);
 +++            ch->connected = false;
                if (ib_send_cm_drep(cm_id, NULL, 0))
                        shost_printk(KERN_ERR, target->scsi_host,
                                     PFX "Sending CM DREP failed\n");
@@@@@ -2404,8 -2416,8 -2414,8 -2414,8 +2406,8 @@@@@ srp_change_queue_depth(struct scsi_devi
        return scsi_change_queue_depth(sdev, qdepth);
    }
    
 ---static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
 ---                         unsigned int lun, u8 func)
 +++static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
 +++                         u8 func)
    {
        struct srp_target_port *target = ch->target;
        struct srp_rport *rport = target->rport;
        struct srp_iu *iu;
        struct srp_tsk_mgmt *tsk_mgmt;
    
 ---    if (!target->connected || target->qp_in_error)
 +++    if (!ch->connected || target->qp_in_error)
                return -1;
    
        init_completion(&ch->tsk_mgmt_done);
        memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
    
        tsk_mgmt->opcode        = SRP_TSK_MGMT;
 ---    tsk_mgmt->lun           = cpu_to_be64((u64) lun << 48);
 +++    int_to_scsilun(lun, &tsk_mgmt->lun);
        tsk_mgmt->tag           = req_tag | SRP_TAG_TSK_MGMT;
        tsk_mgmt->tsk_mgmt_func = func;
        tsk_mgmt->task_tag      = req_tag;
@@@@@ -2553,7 -2565,8 -2563,8 -2563,8 +2555,7 @@@@@ static ssize_t show_id_ext(struct devic
    {
        struct srp_target_port *target = host_to_target(class_to_shost(dev));
    
 ---    return sprintf(buf, "0x%016llx\n",
 ---                   (unsigned long long) be64_to_cpu(target->id_ext));
 +++    return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
    }
    
    static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
    {
        struct srp_target_port *target = host_to_target(class_to_shost(dev));
    
 ---    return sprintf(buf, "0x%016llx\n",
 ---                   (unsigned long long) be64_to_cpu(target->ioc_guid));
 +++    return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
    }
    
    static ssize_t show_service_id(struct device *dev,
    {
        struct srp_target_port *target = host_to_target(class_to_shost(dev));
    
 ---    return sprintf(buf, "0x%016llx\n",
 ---                   (unsigned long long) be64_to_cpu(target->service_id));
 +++    return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
    }
    
    static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
@@@@@ -2760,7 -2775,7 -2773,7 -2773,7 +2762,7 @@@@@ static int srp_add_target(struct srp_ho
    
        target->state = SRP_TARGET_SCANNING;
        sprintf(target->target_name, "SRP.T10:%016llX",
 ---             (unsigned long long) be64_to_cpu(target->id_ext));
 +++            be64_to_cpu(target->id_ext));
    
        if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
                return -ENODEV;
        scsi_scan_target(&target->scsi_host->shost_gendev,
                         0, target->scsi_id, SCAN_WILD_CARD, 0);
    
 ---    if (!target->connected || target->qp_in_error) {
 +++    if (srp_connected_ch(target) < target->ch_count ||
 +++        target->qp_in_error) {
                shost_printk(KERN_INFO, target->scsi_host,
                             PFX "SCSI scan failed - removing SCSI host\n");
                srp_queue_remove_work(target);
@@@@@ -3134,7 -3148,7 -3146,7 -3146,7 +3136,7 @@@@@ static ssize_t srp_create_target(struc
        target_host->transportt  = ib_srp_transport_template;
        target_host->max_channel = 0;
        target_host->max_id      = 1;
 ---    target_host->max_lun     = SRP_MAX_LUN;
 +++    target_host->max_lun     = -1LL;
        target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
    
        target = host_to_target(target_host);
    
        ret = srp_parse_options(buf, target);
        if (ret)
 ---            goto err;
 +++            goto out;
    
        ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
        if (ret)
 ---            goto err;
 +++            goto out;
    
        target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
    
                             be64_to_cpu(target->ioc_guid),
                             be64_to_cpu(target->initiator_ext));
                ret = -EEXIST;
 ---            goto err;
 +++            goto out;
        }
    
        if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
        spin_lock_init(&target->lock);
        ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
        if (ret)
 ---            goto err;
 +++            goto out;
    
        ret = -ENOMEM;
        target->ch_count = max_t(unsigned, num_online_nodes(),
        target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
                             GFP_KERNEL);
        if (!target->ch)
 ---            goto err;
 +++            goto out;
    
        node_idx = 0;
        for_each_online_node(node) {
@@@@@ -3303,6 -3317,9 -3315,9 -3315,9 +3305,6 @@@@@ err_disconnect
        }
    
        kfree(target->ch);
 ---
 ---err:
 ---    scsi_host_put(target_host);
        goto out;
    }
    
diff --combined include/rdma/ib_verbs.h
index 65994a19e84055e7b4f4d8cde83a07eb41c1a69a,672fc8f20409e7519a3265666cb9179a1f029954,65994a19e84055e7b4f4d8cde83a07eb41c1a69a,3ebf0c019a66f9316562c70919f8bff485526818..ad499bda62a4eec633db287313e271e49d5d7218
@@@@@ -81,6 -81,6 -81,6 -81,13 +81,13 @@@@@ enum rdma_transport_type 
        RDMA_TRANSPORT_USNIC_UDP
    };
    
+++ enum rdma_protocol_type {
+++     RDMA_PROTOCOL_IB,
+++     RDMA_PROTOCOL_IBOE,
+++     RDMA_PROTOCOL_IWARP,
+++     RDMA_PROTOCOL_USNIC_UDP
+++ };
+++ 
    __attribute_const__ enum rdma_transport_type
    rdma_node_get_transport(enum rdma_node_type node_type);
    
@@@@@ -346,6 -346,6 -346,6 -353,40 +353,40 @@@@@ union rdma_protocol_stats 
        struct iw_protocol_stats        iw;
    };
    
+++ /* Define bits for the various functionality this port needs to be supported by
+++  * the core.
+++  */
+++ /* Management                           0x00000FFF */
+++ #define RDMA_CORE_CAP_IB_MAD            0x00000001
+++ #define RDMA_CORE_CAP_IB_SMI            0x00000002
+++ #define RDMA_CORE_CAP_IB_CM             0x00000004
+++ #define RDMA_CORE_CAP_IW_CM             0x00000008
+++ #define RDMA_CORE_CAP_IB_SA             0x00000010
+++ 
+++ /* Address format                       0x000FF000 */
+++ #define RDMA_CORE_CAP_AF_IB             0x00001000
+++ #define RDMA_CORE_CAP_ETH_AH            0x00002000
+++ 
+++ /* Protocol                             0xFFF00000 */
+++ #define RDMA_CORE_CAP_PROT_IB           0x00100000
+++ #define RDMA_CORE_CAP_PROT_ROCE         0x00200000
+++ #define RDMA_CORE_CAP_PROT_IWARP        0x00400000
+++ 
+++ #define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
+++                                     | RDMA_CORE_CAP_IB_MAD \
+++                                     | RDMA_CORE_CAP_IB_SMI \
+++                                     | RDMA_CORE_CAP_IB_CM  \
+++                                     | RDMA_CORE_CAP_IB_SA  \
+++                                     | RDMA_CORE_CAP_AF_IB)
+++ #define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
+++                                     | RDMA_CORE_CAP_IB_MAD  \
+++                                     | RDMA_CORE_CAP_IB_CM   \
+++                                     | RDMA_CORE_CAP_IB_SA   \
+++                                     | RDMA_CORE_CAP_AF_IB   \
+++                                     | RDMA_CORE_CAP_ETH_AH)
+++ #define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
+++                                     | RDMA_CORE_CAP_IW_CM)
+++ 
    struct ib_port_attr {
        enum ib_port_state      state;
        enum ib_mtu             max_mtu;
@@@@@ -412,6 -412,8 -412,6 -453,6 +453,8 @@@@@ enum ib_event_type 
        IB_EVENT_GID_CHANGE,
    };
    
+ ++__attribute_const__ const char *ib_event_msg(enum ib_event_type event);
+ ++
    struct ib_event {
        struct ib_device        *device;
        union {
@@@@@ -663,6 -665,8 -663,6 -704,6 +706,8 @@@@@ enum ib_wc_status 
        IB_WC_GENERAL_ERR
    };
    
+ ++__attribute_const__ const char *ib_wc_status_msg(enum ib_wc_status status);
+ ++
    enum ib_wc_opcode {
        IB_WC_SEND,
        IB_WC_RDMA_WRITE,
@@@@@ -1474,6 -1478,6 -1474,6 -1515,12 +1519,12 @@@@@ struct ib_dma_mapping_ops 
    
    struct iw_cm_verbs;
    
+++ struct ib_port_immutable {
+++     int                           pkey_tbl_len;
+++     int                           gid_tbl_len;
+++     u32                           core_cap_flags;
+++ };
+++ 
    struct ib_device {
        struct device                *dma_device;
    
        struct list_head              client_data_list;
    
        struct ib_cache               cache;
---     int                          *pkey_tbl_len;
---     int                          *gid_tbl_len;
+++     /**
+++      * port_immutable is indexed by port number
+++      */
+++     struct ib_port_immutable     *port_immutable;
    
        int                           num_comp_vectors;
    
        u32                          local_dma_lkey;
        u8                           node_type;
        u8                           phys_port_cnt;
+++ 
+++     /**
+++      * The following mandatory functions are used only at device
+++      * registration.  Keep functions such as these at the end of this
+++      * structure to avoid cache line misses when accessing struct ib_device
+++      * in fast paths.
+++      */
+++     int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
    };
    
    struct ib_client {
@@@@@ -1743,6 -1747,6 -1743,6 -1800,242 +1804,242 @@@@@ int ib_query_port(struct ib_device *dev
    enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
                                               u8 port_num);
    
+++ /**
+++  * rdma_start_port - Return the first valid port number for the device
+++  * specified
+++  *
+++  * @device: Device to be checked
+++  *
+++  * Return start port number
+++  */
+++ static inline u8 rdma_start_port(const struct ib_device *device)
+++ {
+++     return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
+++ }
+++ 
+++ /**
+++  * rdma_end_port - Return the last valid port number for the device
+++  * specified
+++  *
+++  * @device: Device to be checked
+++  *
+++  * Return last port number
+++  */
+++ static inline u8 rdma_end_port(const struct ib_device *device)
+++ {
+++     return (device->node_type == RDMA_NODE_IB_SWITCH) ?
+++             0 : device->phys_port_cnt;
+++ }
+++ 
+++ static inline bool rdma_protocol_ib(struct ib_device *device, u8 port_num)
+++ {
+++     return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
+++ }
+++ 
+++ static inline bool rdma_protocol_roce(struct ib_device *device, u8 port_num)
+++ {
+++     return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
+++ }
+++ 
+++ static inline bool rdma_protocol_iwarp(struct ib_device *device, u8 port_num)
+++ {
+++     return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
+++ }
+++ 
+++ static inline bool rdma_ib_or_roce(struct ib_device *device, u8 port_num)
+++ {
+++     return device->port_immutable[port_num].core_cap_flags &
+++             (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE);
+++ }
+++ 
+++ /**
+++  * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
+++  * Management Datagrams.
+++  * @device: Device to check
+++  * @port_num: Port number to check
+++  *
+++  * Management Datagrams (MAD) are a required part of the InfiniBand
+++  * specification and are supported on all InfiniBand devices.  A slightly
+++  * extended version are also supported on OPA interfaces.
+++  *
+++  * Return: true if the port supports sending/receiving of MAD packets.
+++  */
+++ static inline bool rdma_cap_ib_mad(struct ib_device *device, u8 port_num)
+++ {
+++     return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
+++ }
+++ 
+++ /**
+++  * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
+++  * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
+++  * @device: Device to check
+++  * @port_num: Port number to check
+++  *
+++  * Each InfiniBand node is required to provide a Subnet Management Agent
+++  * that the subnet manager can access.  Prior to the fabric being fully
+++  * configured by the subnet manager, the SMA is accessed via a well known
+++  * interface called the Subnet Management Interface (SMI).  This interface
+++  * uses directed route packets to communicate with the SM to get around the
+++  * chicken and egg problem of the SM needing to know what's on the fabric
+++  * in order to configure the fabric, and needing to configure the fabric in
+++  * order to send packets to the devices on the fabric.  These directed
+++  * route packets do not need the fabric fully configured in order to reach
+++  * their destination.  The SMI is the only method allowed to send
+++  * directed route packets on an InfiniBand fabric.
+++  *
+++  * Return: true if the port provides an SMI.
+++  */
+++ static inline bool rdma_cap_ib_smi(struct ib_device *device, u8 port_num)
+++ {
+++     return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
+++ }
+++ 
+++ /**
+++  * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
+++  * Communication Manager.
+++  * @device: Device to check
+++  * @port_num: Port number to check
+++  *
+++  * The InfiniBand Communication Manager is one of many pre-defined General
+++  * Service Agents (GSA) that are accessed via the General Service
+++  * Interface (GSI).  It's role is to facilitate establishment of connections
+++  * between nodes as well as other management related tasks for established
+++  * connections.
+++  *
+++  * Return: true if the port supports an IB CM (this does not guarantee that
+++  * a CM is actually running however).
+++  */
+++ static inline bool rdma_cap_ib_cm(struct ib_device *device, u8 port_num)
+++ {
+++     return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
+++ }
+++ 
+++ /**
+++  * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
+++  * Communication Manager.
+++  * @device: Device to check
+++  * @port_num: Port number to check
+++  *
+++  * Similar to above, but specific to iWARP connections which have a different
+++  * managment protocol than InfiniBand.
+++  *
+++  * Return: true if the port supports an iWARP CM (this does not guarantee that
+++  * a CM is actually running however).
+++  */
+++ static inline bool rdma_cap_iw_cm(struct ib_device *device, u8 port_num)
+++ {
+++     return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
+++ }
+++ 
+++ /**
+++  * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
+++  * Subnet Administration.
+++  * @device: Device to check
+++  * @port_num: Port number to check
+++  *
+++  * An InfiniBand Subnet Administration (SA) service is a pre-defined General
+++  * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
+++  * fabrics, devices should resolve routes to other hosts by contacting the
+++  * SA to query the proper route.
+++  *
+++  * Return: true if the port should act as a client to the fabric Subnet
+++  * Administration interface.  This does not imply that the SA service is
+++  * running locally.
+++  */
+++ static inline bool rdma_cap_ib_sa(struct ib_device *device, u8 port_num)
+++ {
+++     return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
+++ }
+++ 
+++ /**
+++  * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
+++  * Multicast.
+++  * @device: Device to check
+++  * @port_num: Port number to check
+++  *
+++  * InfiniBand multicast registration is more complex than normal IPv4 or
+++  * IPv6 multicast registration.  Each Host Channel Adapter must register
+++  * with the Subnet Manager when it wishes to join a multicast group.  It
+++  * should do so only once regardless of how many queue pairs it subscribes
+++  * to this group.  And it should leave the group only after all queue pairs
+++  * attached to the group have been detached.
+++  *
+++  * Return: true if the port must undertake the additional adminstrative
+++  * overhead of registering/unregistering with the SM and tracking of the
+++  * total number of queue pairs attached to the multicast group.
+++  */
+++ static inline bool rdma_cap_ib_mcast(struct ib_device *device, u8 port_num)
+++ {
+++     return rdma_cap_ib_sa(device, port_num);
+++ }
+++ 
+++ /**
+++  * rdma_cap_af_ib - Check if the port of device has the capability
+++  * Native Infiniband Address.
+++  * @device: Device to check
+++  * @port_num: Port number to check
+++  *
+++  * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
+++  * GID.  RoCE uses a different mechanism, but still generates a GID via
+++  * a prescribed mechanism and port specific data.
+++  *
+++  * Return: true if the port uses a GID address to identify devices on the
+++  * network.
+++  */
+++ static inline bool rdma_cap_af_ib(struct ib_device *device, u8 port_num)
+++ {
+++     return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
+++ }
+++ 
+++ /**
+++  * rdma_cap_eth_ah - Check if the port of device has the capability
+++  * Ethernet Address Handle.
+++  * @device: Device to check
+++  * @port_num: Port number to check
+++  *
+++  * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
+++  * to fabricate GIDs over Ethernet/IP specific addresses native to the
+++  * port.  Normally, packet headers are generated by the sending host
+++  * adapter, but when sending connectionless datagrams, we must manually
+++  * inject the proper headers for the fabric we are communicating over.
+++  *
+++  * Return: true if we are running as a RoCE port and must force the
+++  * addition of a Global Route Header built from our Ethernet Address
+++  * Handle into our header list for connectionless packets.
+++  */
+++ static inline bool rdma_cap_eth_ah(struct ib_device *device, u8 port_num)
+++ {
+++     return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
+++ }
+++ 
+++ /**
+++  * rdma_cap_read_multi_sge - Check if the port of device has the capability
+++  * RDMA Read Multiple Scatter-Gather Entries.
+++  * @device: Device to check
+++  * @port_num: Port number to check
+++  *
+++  * iWARP has a restriction that RDMA READ requests may only have a single
+++  * Scatter/Gather Entry (SGE) in the work request.
+++  *
+++  * NOTE: although the linux kernel currently assumes all devices are either
+++  * single SGE RDMA READ devices or identical SGE maximums for RDMA READs and
+++  * WRITEs, according to Tom Talpey, this is not accurate.  There are some
+++  * devices out there that support more than a single SGE on RDMA READ
+++  * requests, but do not support the same number of SGEs as they do on
+++  * RDMA WRITE requests.  The linux kernel would need rearchitecting to
+++  * support these imbalanced READ/WRITE SGEs allowed devices.  So, for now,
+++  * suffice with either the device supports the same READ/WRITE SGEs, or
+++  * it only gets one READ sge.
+++  *
+++  * Return: true for any device that allows more than one SGE in RDMA READ
+++  * requests.
+++  */
+++ static inline bool rdma_cap_read_multi_sge(struct ib_device *device,
+++                                        u8 port_num)
+++ {
+++     return !(device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP);
+++ }
+++ 
    int ib_query_gid(struct ib_device *device,
                 u8 port_num, int index, union ib_gid *gid);
    
index f609c1c2d38ddfbad3e77718b737dbcd0391bfcd,13ee04f213d7b462b74a400defbf32b50e4cb802,f609c1c2d38ddfbad3e77718b737dbcd0391bfcd,3f5750cf187ef420548f89417f0ae93d9208a0e5..88eb994edd36419abda0202a087f0540bbf6362c
@@@@@ -175,8 -175,8 -175,8 -175,8 +175,8 @@@@@ void svc_rdma_put_req_map(struct svc_rd
    static void cq_event_handler(struct ib_event *event, void *context)
    {
        struct svc_xprt *xprt = context;
- --    dprintk("svcrdma: received CQ event id=%d, context=%p\n",
- --            event->event, context);
+ ++    dprintk("svcrdma: received CQ event %s (%d), context=%p\n",
+ ++            ib_event_msg(event->event), event->event, context);
        set_bit(XPT_CLOSE, &xprt->xpt_flags);
    }
    
@@@@@ -191,8 -191,9 -191,8 -191,8 +191,9 @@@@@ static void qp_event_handler(struct ib_
        case IB_EVENT_COMM_EST:
        case IB_EVENT_SQ_DRAINED:
        case IB_EVENT_QP_LAST_WQE_REACHED:
- --            dprintk("svcrdma: QP event %d received for QP=%p\n",
- --                    event->event, event->element.qp);
+ ++            dprintk("svcrdma: QP event %s (%d) received for QP=%p\n",
+ ++                    ib_event_msg(event->event), event->event,
+ ++                    event->element.qp);
                break;
        /* These are considered fatal events */
        case IB_EVENT_PATH_MIG_ERR:
        case IB_EVENT_QP_ACCESS_ERR:
        case IB_EVENT_DEVICE_FATAL:
        default:
- --            dprintk("svcrdma: QP ERROR event %d received for QP=%p, "
+ ++            dprintk("svcrdma: QP ERROR event %s (%d) received for QP=%p, "
                        "closing transport\n",
- --                    event->event, event->element.qp);
+ ++                    ib_event_msg(event->event), event->event,
+ ++                    event->element.qp);
                set_bit(XPT_CLOSE, &xprt->xpt_flags);
                break;
        }
@@@@@ -402,7 -404,8 -402,7 -402,7 +404,8 @@@@@ static void sq_cq_reap(struct svcxprt_r
                for (i = 0; i < ret; i++) {
                        wc = &wc_a[i];
                        if (wc->status != IB_WC_SUCCESS) {
- --                            dprintk("svcrdma: sq wc err status %d\n",
+ ++                            dprintk("svcrdma: sq wc err status %s (%d)\n",
+ ++                                    ib_wc_status_msg(wc->status),
                                        wc->status);
    
                                /* Close the transport */
@@@@@ -616,7 -619,8 -616,7 -616,7 +619,8 @@@@@ static int rdma_listen_handler(struct r
        switch (event->event) {
        case RDMA_CM_EVENT_CONNECT_REQUEST:
                dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
- --                    "event=%d\n", cma_id, cma_id->context, event->event);
+ ++                    "event = %s (%d)\n", cma_id, cma_id->context,
+ ++                    rdma_event_msg(event->event), event->event);
                handle_connect_req(cma_id,
                                   event->param.conn.initiator_depth);
                break;
    
        default:
                dprintk("svcrdma: Unexpected event on listening endpoint %p, "
- --                    "event=%d\n", cma_id, event->event);
+ ++                    "event = %s (%d)\n", cma_id,
+ ++                    rdma_event_msg(event->event), event->event);
                break;
        }
    
@@@@@ -669,7 -674,8 -669,7 -669,7 +674,8 @@@@@ static int rdma_cma_handler(struct rdma
                break;
        case RDMA_CM_EVENT_DEVICE_REMOVAL:
                dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
- --                    "event=%d\n", cma_id, xprt, event->event);
+ ++                    "event = %s (%d)\n", cma_id, xprt,
+ ++                    rdma_event_msg(event->event), event->event);
                if (xprt) {
                        set_bit(XPT_CLOSE, &xprt->xpt_flags);
                        svc_xprt_enqueue(xprt);
                break;
        default:
                dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
- --                    "event=%d\n", cma_id, event->event);
+ ++                    "event = %s (%d)\n", cma_id,
+ ++                    rdma_event_msg(event->event), event->event);
                break;
        }
        return 0;
@@@@@ -851,7 -858,7 -851,7 -851,7 +858,7 @@@@@ static struct svc_xprt *svc_rdma_accept
        struct ib_qp_init_attr qp_attr;
        struct ib_device_attr devattr;
        int uninitialized_var(dma_mr_acc);
---     int need_dma_mr;
+++     int need_dma_mr = 0;
        int ret;
        int i;
    
        /*
         * Determine if a DMA MR is required and if so, what privs are required
         */
---     switch (rdma_node_get_transport(newxprt->sc_cm_id->device->node_type)) {
---     case RDMA_TRANSPORT_IWARP:
---             newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
---             if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) {
---                     need_dma_mr = 1;
---                     dma_mr_acc =
---                             (IB_ACCESS_LOCAL_WRITE |
---                              IB_ACCESS_REMOTE_WRITE);
---             } else if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
---                     need_dma_mr = 1;
---                     dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
---             } else
---                     need_dma_mr = 0;
---             break;
---     case RDMA_TRANSPORT_IB:
---             if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) {
---                     need_dma_mr = 1;
---                     dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
---             } else if (!(devattr.device_cap_flags &
---                          IB_DEVICE_LOCAL_DMA_LKEY)) {
---                     need_dma_mr = 1;
---                     dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
---             } else
---                     need_dma_mr = 0;
---             break;
---     default:
+++     if (!rdma_protocol_iwarp(newxprt->sc_cm_id->device,
+++                              newxprt->sc_cm_id->port_num) &&
+++         !rdma_ib_or_roce(newxprt->sc_cm_id->device,
+++                          newxprt->sc_cm_id->port_num))
                goto errout;
+++ 
+++     if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG) ||
+++         !(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
+++             need_dma_mr = 1;
+++             dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
+++             if (rdma_protocol_iwarp(newxprt->sc_cm_id->device,
+++                                     newxprt->sc_cm_id->port_num) &&
+++                 !(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG))
+++                     dma_mr_acc |= IB_ACCESS_REMOTE_WRITE;
        }
    
+++     if (rdma_protocol_iwarp(newxprt->sc_cm_id->device,
+++                             newxprt->sc_cm_id->port_num))
+++             newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
+++ 
        /* Create the DMA MR if needed, otherwise, use the DMA LKEY */
        if (need_dma_mr) {
                /* Register all of physical memory */