summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/mlx4/main.c1
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c4
4 files changed, 15 insertions, 11 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 77ddf2fa8625..8763fb832b01 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2491,6 +2491,7 @@ err_counter:
mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
err_map:
+ mlx4_ib_free_eqs(dev, ibdev);
iounmap(ibdev->uar_map);
err_uar:
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 36ec8aa048aa..0b5bb0cee6f9 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -1105,7 +1105,8 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
while ((p = rb_first(&ctx->mcg_table)) != NULL) {
group = rb_entry(p, struct mcast_group, node);
if (atomic_read(&group->refcount))
- mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group);
+ mcg_debug_group(group, "group refcount %d!!! (pointer %p)\n",
+ atomic_read(&group->refcount), group);
force_clean_group(group);
}
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 3eff35c2d453..2684605fe67f 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -41,13 +41,13 @@
#include "qib.h"
-#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
-#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
+#define RVT_BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
+#define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE-1)
static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
struct qpn_map *map, unsigned off)
{
- return (map - qpt->map) * BITS_PER_PAGE + off;
+ return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
}
static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
@@ -59,7 +59,7 @@ static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
if (((off & qpt->mask) >> 1) >= n)
off = (off | qpt->mask) + 2;
} else
- off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
+ off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
return off;
}
@@ -147,8 +147,8 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
qpn = 2;
if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
qpn = (qpn | qpt->mask) + 2;
- offset = qpn & BITS_PER_PAGE_MASK;
- map = &qpt->map[qpn / BITS_PER_PAGE];
+ offset = qpn & RVT_BITS_PER_PAGE_MASK;
+ map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
max_scan = qpt->nmaps - !offset;
for (i = 0;;) {
if (unlikely(!map->page)) {
@@ -173,7 +173,7 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
* We just need to be sure we don't loop
* forever.
*/
- } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
+ } while (offset < RVT_BITS_PER_PAGE && qpn < QPN_MAX);
/*
* In order to keep the number of pages allocated to a
* minimum, we scan the all existing pages before increasing
@@ -204,9 +204,9 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
{
struct qpn_map *map;
- map = qpt->map + qpn / BITS_PER_PAGE;
+ map = qpt->map + qpn / RVT_BITS_PER_PAGE;
if (map->page)
- clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
+ clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
}
static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index e6b7556d5221..cbc4216091c9 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -2088,8 +2088,10 @@ send_last:
ret = qib_get_rwqe(qp, 1);
if (ret < 0)
goto nack_op_err;
- if (!ret)
+ if (!ret) {
+ qib_put_ss(&qp->r_sge);
goto rnr_nak;
+ }
wc.ex.imm_data = ohdr->u.rc.imm_data;
hdrsize += 4;
wc.wc_flags = IB_WC_WITH_IMM;