summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/ehca/ehca_cq.c
diff options
context:
space:
mode:
authorJoachim Fenkes <fenkes@de.ibm.com>2007-07-09 15:30:39 +0200
committerRoland Dreier <rolandd@cisco.com>2007-07-09 20:12:27 -0700
commit28db6beb420c756c61dd44d9f2786a0677159e74 (patch)
tree3d5b7cfdfcfe80268bf47d84404d2d9a2034cc65 /drivers/infiniband/hw/ehca/ehca_cq.c
parent9844b71baa60270110eabaa9589d3260443d1a71 (diff)
IB/ehca: Refactor sync between completions and destroy_cq using atomic_t
- ehca_cq.nr_events is made an atomic_t, eliminating a lot of locking. - The CQ is removed from the CQ idr first now to make sure no more completions are scheduled on that CQ. The "wait for all completions to end" code becomes much simpler this way. Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_cq.c')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c26
1 files changed, 9 insertions, 17 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 94bad273b34c..3729997457ca 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -146,6 +146,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
spin_lock_init(&my_cq->spinlock);
spin_lock_init(&my_cq->cb_lock);
spin_lock_init(&my_cq->task_lock);
+ atomic_set(&my_cq->nr_events, 0);
init_waitqueue_head(&my_cq->wait_completion);
my_cq->ownpid = current->tgid;
@@ -303,16 +304,6 @@ create_cq_exit1:
return cq;
}
-static int get_cq_nr_events(struct ehca_cq *my_cq)
-{
- int ret;
- unsigned long flags;
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
- ret = my_cq->nr_events;
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
- return ret;
-}
-
int ehca_destroy_cq(struct ib_cq *cq)
{
u64 h_ret;
@@ -339,17 +330,18 @@ int ehca_destroy_cq(struct ib_cq *cq)
}
}
+ /*
+ * remove the CQ from the idr first to make sure
+ * no more interrupt tasklets will touch this CQ
+ */
spin_lock_irqsave(&ehca_cq_idr_lock, flags);
- while (my_cq->nr_events) {
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
- wait_event(my_cq->wait_completion, !get_cq_nr_events(my_cq));
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
- /* recheck nr_events to assure no cqe has just arrived */
- }
-
idr_remove(&ehca_cq_idr, my_cq->token);
spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+ /* now wait until all pending events have completed */
+ wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events));
+
+ /* nobody's using our CQ any longer -- we can destroy it */
h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
if (h_ret == H_R_STATE) {
/* cq in err: read err data and destroy it forcibly */