summaryrefslogtreecommitdiff
path: root/crypto/async_tx/async_tx.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-23 12:03:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-23 12:03:18 -0700
commit5554b35933245e95710d709175e14c02cbc956a4 (patch)
tree2eeb2f05a7061da3c9a3bc9ea69a344b990c6b49 /crypto/async_tx/async_tx.c
parent0f6e38a6381446eff5175b77d1094834a633a90f (diff)
parent7f1b358a236ee9c19657a619ac6f2dcabcaa0924 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (24 commits) I/OAT: I/OAT version 3.0 support I/OAT: tcp_dma_copybreak default value dependent on I/OAT version I/OAT: Add watchdog/reset functionality to ioatdma iop_adma: cleanup iop_chan_xor_slot_count iop_adma: document how to calculate the minimum descriptor pool size iop_adma: directly reclaim descriptors on allocation failure async_tx: make async_tx_test_ack a boolean routine async_tx: remove depend_tx from async_tx_sync_epilog async_tx: export async_tx_quiesce async_tx: fix handling of the "out of descriptor" condition in async_xor async_tx: ensure the xor destination buffer remains dma-mapped async_tx: list_for_each_entry_rcu() cleanup dmaengine: Driver for the Synopsys DesignWare DMA controller dmaengine: Add slave DMA interface dmaengine: add DMA_COMPL_SKIP_{SRC,DEST}_UNMAP flags to control dma unmap dmaengine: Add dma_client parameter to device_alloc_chan_resources dmatest: Simple DMA memcpy test client dmaengine: DMA engine driver for Marvell XOR engine iop-adma: fix platform driver hotplug/coldplug dmaengine: track the number of clients using a channel ... Fixed up conflict in drivers/dca/dca-sysfs.c manually
Diffstat (limited to 'crypto/async_tx/async_tx.c')
-rw-r--r--crypto/async_tx/async_tx.c33
1 files changed, 22 insertions, 11 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 095c798d3170..85eaf7b1c531 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -295,7 +295,7 @@ dma_channel_add_remove(struct dma_client *client,
case DMA_RESOURCE_REMOVED:
found = 0;
spin_lock_irqsave(&async_tx_lock, flags);
- list_for_each_entry_rcu(ref, &async_tx_master_list, node)
+ list_for_each_entry(ref, &async_tx_master_list, node)
if (ref->chan == chan) {
/* permit backing devices to go away */
dma_chan_put(ref->chan);
@@ -608,23 +608,34 @@ async_trigger_callback(enum async_tx_flags flags,
pr_debug("%s: (sync)\n", __func__);
/* wait for any prerequisite operations */
- if (depend_tx) {
- /* if ack is already set then we cannot be sure
- * we are referring to the correct operation
- */
- BUG_ON(async_tx_test_ack(depend_tx));
- if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
- panic("%s: DMA_ERROR waiting for depend_tx\n",
- __func__);
- }
+ async_tx_quiesce(&depend_tx);
- async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
+ async_tx_sync_epilog(cb_fn, cb_param);
}
return tx;
}
EXPORT_SYMBOL_GPL(async_trigger_callback);
+/**
+ * async_tx_quiesce - ensure tx is complete and freeable upon return
+ * @tx - transaction to quiesce
+ */
+void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
+{
+ if (*tx) {
+ /* if ack is already set then we cannot be sure
+ * we are referring to the correct operation
+ */
+ BUG_ON(async_tx_test_ack(*tx));
+ if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
+ panic("DMA_ERROR waiting for transaction\n");
+ async_tx_ack(*tx);
+ *tx = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(async_tx_quiesce);
+
module_init(async_tx_init);
module_exit(async_tx_exit);