]> git.karo-electronics.de Git - linux-beck.git/commitdiff
IB/core: add support to create a unsafe global rkey to ib_create_pd
authorChristoph Hellwig <hch@lst.de>
Mon, 5 Sep 2016 10:56:17 +0000 (12:56 +0200)
committerDoug Ledford <dledford@redhat.com>
Fri, 23 Sep 2016 17:47:44 +0000 (13:47 -0400)
Instead of exposing ib_get_dma_mr to ULPs and letting them use it more or
less unchecked, this moves the capability of creating a global rkey into
the RDMA core, where it can be easily audited.  It also prints a warning
everytime this feature is used as well.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
18 files changed:
drivers/infiniband/core/mad.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/nvme/host/rdma.c
drivers/nvme/target/rdma.c
drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
include/rdma/ib_verbs.h
net/9p/trans_rdma.c
net/rds/ib.c
net/sunrpc/xprtrdma/svc_rdma_transport.c
net/sunrpc/xprtrdma/verbs.c

index 2d49228f28b2b18ac537b1bb982127cf66138356..95d33a3572e6d2eae96709ccbac2106137043cb7 100644 (file)
@@ -3160,7 +3160,7 @@ static int ib_mad_port_open(struct ib_device *device,
                goto error3;
        }
 
-       port_priv->pd = ib_alloc_pd(device);
+       port_priv->pd = ib_alloc_pd(device, 0);
        if (IS_ERR(port_priv->pd)) {
                dev_err(&device->dev, "Couldn't create ib_mad PD\n");
                ret = PTR_ERR(port_priv->pd);
index 9159ea5ad821818561d9665c93133ae8fb270062..e87b5187729ce00ae255341b02c0a900f4062f85 100644 (file)
@@ -227,9 +227,11 @@ EXPORT_SYMBOL(rdma_port_get_link_layer);
  * Every PD has a local_dma_lkey which can be used as the lkey value for local
  * memory operations.
  */
-struct ib_pd *ib_alloc_pd(struct ib_device *device)
+struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
+               const char *caller)
 {
        struct ib_pd *pd;
+       int mr_access_flags = 0;
 
        pd = device->alloc_pd(device, NULL, NULL);
        if (IS_ERR(pd))
@@ -239,24 +241,39 @@ struct ib_pd *ib_alloc_pd(struct ib_device *device)
        pd->uobject = NULL;
        pd->__internal_mr = NULL;
        atomic_set(&pd->usecnt, 0);
+       pd->flags = flags;
 
        if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
                pd->local_dma_lkey = device->local_dma_lkey;
-       else {
+       else
+               mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
+
+       if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
+               pr_warn("%s: enabling unsafe global rkey\n", caller);
+               mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
+       }
+
+       if (mr_access_flags) {
                struct ib_mr *mr;
 
-               mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
+               mr = ib_get_dma_mr(pd, mr_access_flags);
                if (IS_ERR(mr)) {
                        ib_dealloc_pd(pd);
                        return (struct ib_pd *)mr;
                }
 
                pd->__internal_mr = mr;
-               pd->local_dma_lkey = pd->__internal_mr->lkey;
+
+               if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
+                       pd->local_dma_lkey = pd->__internal_mr->lkey;
+
+               if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
+                       pd->unsafe_global_rkey = pd->__internal_mr->rkey;
        }
+
        return pd;
 }
-EXPORT_SYMBOL(ib_alloc_pd);
+EXPORT_SYMBOL(__ib_alloc_pd);
 
 /**
  * ib_dealloc_pd - Deallocates a protection domain.
index 9c2e53d28f985740c3d22f9c13be776e58010567..517346f8ed7581443bbc0d92a12ae8f6bfb46f9f 100644 (file)
@@ -1897,7 +1897,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
                goto err_buf;
        }
 
-       ctx->pd = ib_alloc_pd(ctx->ib_dev);
+       ctx->pd = ib_alloc_pd(ctx->ib_dev, 0);
        if (IS_ERR(ctx->pd)) {
                ret = PTR_ERR(ctx->pd);
                pr_err("Couldn't create tunnel PD (%d)\n", ret);
index 2af44c2de2624a75d90a675727b32f914ec53e05..a96e78c2c75ac3c5c7001bbb529c1dfc7055b613 100644 (file)
@@ -1259,7 +1259,7 @@ static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
        if (err)
                goto err1;
 
-       xrcd->pd = ib_alloc_pd(ibdev);
+       xrcd->pd = ib_alloc_pd(ibdev, 0);
        if (IS_ERR(xrcd->pd)) {
                err = PTR_ERR(xrcd->pd);
                goto err2;
index f02a975320bd5b4efef25358c58931b4685b5077..cf1eee492a4e24ee3a523a5dc54902f89045b858 100644 (file)
@@ -2223,7 +2223,7 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
                goto error_0;
        }
 
-       pd = ib_alloc_pd(&dev->ib_dev);
+       pd = ib_alloc_pd(&dev->ib_dev, 0);
        if (IS_ERR(pd)) {
                mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
                ret = PTR_ERR(pd);
index c55ecb2c3736cedfe6ffb5bff4d2c44f44c167fb..6067f075772a40c534725a4c3a0870465d68dcbe 100644 (file)
@@ -147,7 +147,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
        int ret, size;
        int i;
 
-       priv->pd = ib_alloc_pd(priv->ca);
+       priv->pd = ib_alloc_pd(priv->ca, 0);
        if (IS_ERR(priv->pd)) {
                printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
                return -ENODEV;
index 1b4945367e4f6d93bcf6e3612a1450e84f9f5eac..e9de99219d74b57b819ff3ac5c991229169b6cc8 100644 (file)
@@ -88,7 +88,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
                  device->comps_used, ib_dev->name,
                  ib_dev->num_comp_vectors, max_cqe);
 
-       device->pd = ib_alloc_pd(ib_dev);
+       device->pd = ib_alloc_pd(ib_dev, 0);
        if (IS_ERR(device->pd))
                goto pd_err;
 
index ba6be060a476b19d608c7797bed4526b7df04332..8df608ede3668e94a28f9a47aba3aaa3021dec35 100644 (file)
@@ -309,7 +309,7 @@ isert_create_device_ib_res(struct isert_device *device)
        if (ret)
                goto out;
 
-       device->pd = ib_alloc_pd(ib_dev);
+       device->pd = ib_alloc_pd(ib_dev, 0);
        if (IS_ERR(device->pd)) {
                ret = PTR_ERR(device->pd);
                isert_err("failed to allocate pd, device %p, ret=%d\n",
index 3322ed750172ea78f2f57cc1ad08f6f9946eca63..579b8aedfcdd53c4ff6cd38918cd1bbcc434a17c 100644 (file)
@@ -3573,7 +3573,7 @@ static void srp_add_one(struct ib_device *device)
        INIT_LIST_HEAD(&srp_dev->dev_list);
 
        srp_dev->dev = device;
-       srp_dev->pd  = ib_alloc_pd(device);
+       srp_dev->pd  = ib_alloc_pd(device, 0);
        if (IS_ERR(srp_dev->pd))
                goto free_dev;
 
index dfa23b075a88469b73c3f199f73c8b8ed571187f..48a44af740a6c50103b329bc9871bf3c3fa5c142 100644 (file)
@@ -2475,7 +2475,7 @@ static void srpt_add_one(struct ib_device *device)
        init_waitqueue_head(&sdev->ch_releaseQ);
        mutex_init(&sdev->mutex);
 
-       sdev->pd = ib_alloc_pd(device);
+       sdev->pd = ib_alloc_pd(device, 0);
        if (IS_ERR(sdev->pd))
                goto free_dev;
 
index 8d2875b4c56d8c8bf7da8951512288b327dd6e7a..a4961edf0ca15416b699bff6dad7314a858ab23b 100644 (file)
@@ -446,7 +446,7 @@ nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
        ndev->dev = cm_id->device;
        kref_init(&ndev->ref);
 
-       ndev->pd = ib_alloc_pd(ndev->dev);
+       ndev->pd = ib_alloc_pd(ndev->dev, 0);
        if (IS_ERR(ndev->pd))
                goto out_free_dev;
 
index b4d648536c3e43316cc2929f8346d4997db01306..187763a773555601c3adcc3b02db26958b5feb1f 100644 (file)
@@ -848,7 +848,7 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
        ndev->device = cm_id->device;
        kref_init(&ndev->ref);
 
-       ndev->pd = ib_alloc_pd(ndev->device);
+       ndev->pd = ib_alloc_pd(ndev->device, 0);
        if (IS_ERR(ndev->pd))
                goto out_free_dev;
 
index 4f5978b3767bfba14b7e73be2d879de0a69afed1..0e4c6090bf625dd1deaeea83db12f3ab0db39146 100644 (file)
@@ -2465,7 +2465,7 @@ int kiblnd_dev_failover(struct kib_dev *dev)
        hdev->ibh_cmid  = cmid;
        hdev->ibh_ibdev = cmid->device;
 
-       pd = ib_alloc_pd(cmid->device);
+       pd = ib_alloc_pd(cmid->device, 0);
        if (IS_ERR(pd)) {
                rc = PTR_ERR(pd);
                CERROR("Can't allocate PD: %d\n", rc);
index 38a08dae49c4c420c3ba98f8c1c7f613abcadbb6..4bdd898697cfd3000ab6411e27bb3770d1f3b0b6 100644 (file)
@@ -1370,10 +1370,13 @@ struct ib_udata {
 
 struct ib_pd {
        u32                     local_dma_lkey;
+       u32                     flags;
        struct ib_device       *device;
        struct ib_uobject      *uobject;
        atomic_t                usecnt; /* count all resources */
 
+       u32                     unsafe_global_rkey;
+
        /*
         * Implementation details of the RDMA core, don't use in drivers:
         */
@@ -2506,8 +2509,23 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
 int ib_find_pkey(struct ib_device *device,
                 u8 port_num, u16 pkey, u16 *index);
 
-struct ib_pd *ib_alloc_pd(struct ib_device *device);
+enum ib_pd_flags {
+       /*
+        * Create a memory registration for all memory in the system and place
+        * the rkey for it into pd->unsafe_global_rkey.  This can be used by
+        * ULPs to avoid the overhead of dynamic MRs.
+        *
+        * This flag is generally considered unsafe and must only be used in
+        * extremly trusted environments.  Every use of it will log a warning
+        * in the kernel log.
+        */
+       IB_PD_UNSAFE_GLOBAL_RKEY        = 0x01,
+};
 
+struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
+               const char *caller);
+#define ib_alloc_pd(device, flags) \
+       __ib_alloc_pd((device), (flags), __func__)
 void ib_dealloc_pd(struct ib_pd *pd);
 
 /**
index 1852e383afd6263967c8c9e85fcdc4c9b6529193..553ed4ecb6a0ec6527ca6171e3493f97ac8a8e0d 100644 (file)
@@ -680,7 +680,7 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
                goto error;
 
        /* Create the Protection Domain */
-       rdma->pd = ib_alloc_pd(rdma->cm_id->device);
+       rdma->pd = ib_alloc_pd(rdma->cm_id->device, 0);
        if (IS_ERR(rdma->pd))
                goto error;
 
index 7eaf887e46f8ef12ffd7d72aeadb7d0e15459851..5680d90b0b779ec41d019f1d0797dca7b5072ece 100644 (file)
@@ -160,7 +160,7 @@ static void rds_ib_add_one(struct ib_device *device)
        rds_ibdev->max_responder_resources = device->attrs.max_qp_rd_atom;
 
        rds_ibdev->dev = device;
-       rds_ibdev->pd = ib_alloc_pd(device);
+       rds_ibdev->pd = ib_alloc_pd(device, 0);
        if (IS_ERR(rds_ibdev->pd)) {
                rds_ibdev->pd = NULL;
                goto put_dev;
index dd9440137834c7b1a55b433a1a91bb52efa26acc..eb2857f52b05134baa1263a9942d4f0b704e9c2c 100644 (file)
@@ -993,7 +993,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
        newxprt->sc_ord = min_t(size_t, dev->attrs.max_qp_rd_atom, newxprt->sc_ord);
        newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord);
 
-       newxprt->sc_pd = ib_alloc_pd(dev);
+       newxprt->sc_pd = ib_alloc_pd(dev, 0);
        if (IS_ERR(newxprt->sc_pd)) {
                dprintk("svcrdma: error creating PD for connect request\n");
                goto errout;
index 536d0be3f61bdd3995f95a5ae3893e9cf9d84997..6561d4a35acbe5f7bfeb7198f9f6b58d3bb5048b 100644 (file)
@@ -386,7 +386,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
        }
        ia->ri_device = ia->ri_id->device;
 
-       ia->ri_pd = ib_alloc_pd(ia->ri_device);
+       ia->ri_pd = ib_alloc_pd(ia->ri_device, 0);
        if (IS_ERR(ia->ri_pd)) {
                rc = PTR_ERR(ia->ri_pd);
                pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc);