summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSagi Grimberg <sagi@grimberg.me>2018-09-03 03:47:07 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-10-10 08:54:24 +0200
commitf36f3ebdf1e1a211c3be521e187d69a94b30db77 (patch)
tree3446ace66cf9781796198d3c83921f73ec5e9a71
parenta90a52c51ad46b8c1407b1113aabd8cf6e7c197d (diff)
nvmet-rdma: fix possible bogus dereference under heavy load
[ Upstream commit 8407879c4e0d7731f6e7e905893cecf61a7762c7 ] Currently we always repost the recv buffer before we send a response capsule back to the host. Since ordering is not guaranteed for send and recv completions, it is posible that we will receive a new request from the host before we got a send completion for the response capsule. Today, we pre-allocate 2x rsps the length of the queue, but in reality, under heavy load there is nothing that is really preventing the gap to expand until we exhaust all our rsps. To fix this, if we don't have any pre-allocated rsps left, we dynamically allocate a rsp and make sure to free it when we are done. If under memory pressure we fail to allocate a rsp, we silently drop the command and wait for the host to retry. Reported-by: Steve Wise <swise@opengridcomputing.com> Tested-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Sagi Grimberg <sagi@grimberg.me> [hch: dropped a superflous assignment] Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Sasha Levin <alexander.levin@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/nvme/target/rdma.c27
1 files changed, 25 insertions, 2 deletions
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 3333d417b248..a70b3d24936d 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -65,6 +65,7 @@ struct nvmet_rdma_rsp {
struct nvmet_req req;
+ bool allocated;
u8 n_rdma;
u32 flags;
u32 invalidate_rkey;
@@ -167,11 +168,19 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
unsigned long flags;
spin_lock_irqsave(&queue->rsps_lock, flags);
- rsp = list_first_entry(&queue->free_rsps,
+ rsp = list_first_entry_or_null(&queue->free_rsps,
struct nvmet_rdma_rsp, free_list);
- list_del(&rsp->free_list);
+ if (likely(rsp))
+ list_del(&rsp->free_list);
spin_unlock_irqrestore(&queue->rsps_lock, flags);
+ if (unlikely(!rsp)) {
+ rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
+ if (unlikely(!rsp))
+ return NULL;
+ rsp->allocated = true;
+ }
+
return rsp;
}
@@ -180,6 +189,11 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
{
unsigned long flags;
+ if (rsp->allocated) {
+ kfree(rsp);
+ return;
+ }
+
spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
@@ -756,6 +770,15 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
cmd->queue = queue;
rsp = nvmet_rdma_get_rsp(queue);
+ if (unlikely(!rsp)) {
+ /*
+ * we get here only under memory pressure,
+ * silently drop and have the host retry
+ * as we can't even fail it.
+ */
+ nvmet_rdma_post_recv(queue->dev, cmd);
+ return;
+ }
rsp->queue = queue;
rsp->cmd = cmd;
rsp->flags = 0;