summaryrefslogtreecommitdiff
path: root/drivers/nvme
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-09 17:04:33 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-09 17:04:33 -0700
commitb9044ac8292fc94bee33f6f08acaed3ac55f0c75 (patch)
tree40dce5392f6392984311ec1e083ce29b5186ce8b /drivers/nvme
parent1fde76f173e4d9aa205432c2ed5eff978d00deee (diff)
parent2937f375751922ffce9ef1d5fa84491840b0c8e0 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull main rdma updates from Doug Ledford: "This is the main pull request for the rdma stack this release. The code has been through 0day and I had it tagged for linux-next testing for a couple days. Summary: - updates to mlx5 - updates to mlx4 (two conflicts, both minor and easily resolved) - updates to iw_cxgb4 (one conflict, not so obvious to resolve, proper resolution is to keep the code in cxgb4_main.c as it is in Linus' tree as attach_uld was refactored and moved into cxgb4_uld.c) - improvements to uAPI (moved vendor specific API elements to uAPI area) - add hns-roce driver and hns and hns-roce ACPI reset support - conversion of all rdma code away from deprecated create_singlethread_workqueue - security improvement: remove unsafe ib_get_dma_mr (breaks lustre in staging)" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (75 commits) staging/lustre: Disable InfiniBand support iw_cxgb4: add fast-path for small REG_MR operations cxgb4: advertise support for FR_NSMR_TPTE_WR IB/core: correctly handle rdma_rw_init_mrs() failure IB/srp: Fix infinite loop when FMR sg[0].offset != 0 IB/srp: Remove an unused argument IB/core: Improve ib_map_mr_sg() documentation IB/mlx4: Fix possible vl/sl field mismatch in LRH header in QP1 packets IB/mthca: Move user vendor structures IB/nes: Move user vendor structures IB/ocrdma: Move user vendor structures IB/mlx4: Move user vendor structures IB/cxgb4: Move user vendor structures IB/cxgb3: Move user vendor structures IB/mlx5: Move and decouple user vendor structures IB/{core,hw}: Add constant for node_desc ipoib: Make ipoib_warn ratelimited IB/mlx4/alias_GUID: Remove deprecated create_singlethread_workqueue IB/ipoib_verbs: Remove deprecated create_singlethread_workqueue IB/ipoib: Remove deprecated create_singlethread_workqueue ...
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/rdma.c25
-rw-r--r--drivers/nvme/target/rdma.c2
2 files changed, 6 insertions, 21 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index fbdb2267e460..28632292e85e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -54,7 +54,6 @@
struct nvme_rdma_device {
struct ib_device *dev;
struct ib_pd *pd;
- struct ib_mr *mr;
struct kref ref;
struct list_head entry;
};
@@ -408,10 +407,7 @@ static void nvme_rdma_free_dev(struct kref *ref)
list_del(&ndev->entry);
mutex_unlock(&device_list_mutex);
- if (!register_always)
- ib_dereg_mr(ndev->mr);
ib_dealloc_pd(ndev->pd);
-
kfree(ndev);
}
@@ -444,24 +440,16 @@ nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
ndev->dev = cm_id->device;
kref_init(&ndev->ref);
- ndev->pd = ib_alloc_pd(ndev->dev);
+ ndev->pd = ib_alloc_pd(ndev->dev,
+ register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
if (IS_ERR(ndev->pd))
goto out_free_dev;
- if (!register_always) {
- ndev->mr = ib_get_dma_mr(ndev->pd,
- IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE);
- if (IS_ERR(ndev->mr))
- goto out_free_pd;
- }
-
if (!(ndev->dev->attrs.device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS)) {
dev_err(&ndev->dev->dev,
"Memory registrations not supported.\n");
- goto out_free_mr;
+ goto out_free_pd;
}
list_add(&ndev->entry, &device_list);
@@ -469,9 +457,6 @@ out_unlock:
mutex_unlock(&device_list_mutex);
return ndev;
-out_free_mr:
- if (!register_always)
- ib_dereg_mr(ndev->mr);
out_free_pd:
ib_dealloc_pd(ndev->pd);
out_free_dev:
@@ -915,7 +900,7 @@ static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
sg->addr = cpu_to_le64(sg_dma_address(req->sg_table.sgl));
put_unaligned_le24(sg_dma_len(req->sg_table.sgl), sg->length);
- put_unaligned_le32(queue->device->mr->rkey, sg->key);
+ put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key);
sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
return 0;
}
@@ -1000,7 +985,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
nvme_rdma_queue_idx(queue))
return nvme_rdma_map_sg_inline(queue, req, c);
- if (!register_always)
+ if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
return nvme_rdma_map_sg_single(queue, req, c);
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 1cbe6e053b5b..f8d23999e0f2 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -848,7 +848,7 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
ndev->device = cm_id->device;
kref_init(&ndev->ref);
- ndev->pd = ib_alloc_pd(ndev->device);
+ ndev->pd = ib_alloc_pd(ndev->device, 0);
if (IS_ERR(ndev->pd))
goto out_free_dev;