summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw/rxe
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2021-01-28 11:47:53 -0600
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-03-04 10:26:33 +0100
commitc88fc726d9c0e7a83b71df971adcc6a3cf657ad1 (patch)
tree43ceaea3f1e1e564b50d203ba1ba058c25436027 /drivers/infiniband/sw/rxe
parent2e556ba37f1328d6a35a81fccb36470287a3d890 (diff)
RDMA/rxe: Fix coding error in rxe_rcv_mcast_pkt
[ Upstream commit 8fc1b7027fc162738d5a85c82410e501a371a404 ] rxe_rcv_mcast_pkt() in rxe_recv.c can leak SKBs in error path code. The loop over the QPs attached to a multicast group creates new cloned SKBs for all but the last QP in the list and passes the SKB and its clones to rxe_rcv_pkt() for further processing. Any QPs that do not pass some checks are skipped. If the last QP in the list fails the tests the SKB is leaked. This patch checks if the SKB for the last QP was used and if not frees it. Also removes a redundant loop invariant assignment. Fixes: 8700e3e7c485 ("Soft RoCE driver") Fixes: 71abf20b28ff ("RDMA/rxe: Handle skb_clone() failure in rxe_recv.c") Link: https://lore.kernel.org/r/20210128174752.16128-1-rpearson@hpe.com Signed-off-by: Bob Pearson <rpearson@hpe.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers/infiniband/sw/rxe')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 2dc808988d94..369ba76f1605 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -305,7 +305,6 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
list_for_each_entry(mce, &mcg->qp_list, qp_list) {
qp = mce->qp;
- pkt = SKB_TO_PKT(skb);
/* validate qp for incoming packet */
err = check_type_state(rxe, pkt, qp);
@@ -317,12 +316,18 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
continue;
/* for all but the last qp create a new clone of the
- * skb and pass to the qp.
+ * skb and pass to the qp. If an error occurs in the
+ * checks for the last qp in the list we need to
+ * free the skb since it hasn't been passed on to
+ * rxe_rcv_pkt() which would free it later.
*/
- if (mce->qp_list.next != &mcg->qp_list)
+ if (mce->qp_list.next != &mcg->qp_list) {
per_qp_skb = skb_clone(skb, GFP_ATOMIC);
- else
+ } else {
per_qp_skb = skb;
+ /* show we have consumed the skb */
+ skb = NULL;
+ }
if (unlikely(!per_qp_skb))
continue;
@@ -337,9 +342,8 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
- return;
-
err1:
+ /* free skb if not consumed */
kfree_skb(skb);
}