summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
authorFancy Fang <B47543@freescale.com>2013-12-04 10:13:26 +0800
committerFancy Fang <B47543@freescale.com>2013-12-06 16:36:42 +0800
commitffcad666548417ef21937e0a755d85ab922313a9 (patch)
tree917f657a35a3acb3db544062f634658dd3064058 /drivers/dma
parent9aec719436856ddcce32cdd1a7de776c822b8563 (diff)
ENGR00290613 PXP: add asynchronous multi instances support for PXP
Move PXP registers setting from pxp_issue_pending() to a seperate kernel thread. This change will avoid the multi instances hang issues solved in previous commits. And also the pxp users won't be blocked when it call dma_async_issue_pending() function. Signed-off-by: Fancy Fang <B47543@freescale.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/pxp/pxp_dma_v2.c88
1 files changed, 77 insertions, 11 deletions
diff --git a/drivers/dma/pxp/pxp_dma_v2.c b/drivers/dma/pxp/pxp_dma_v2.c
index 0cd8c71b08d6..00b84a0ae06b 100644
--- a/drivers/dma/pxp/pxp_dma_v2.c
+++ b/drivers/dma/pxp/pxp_dma_v2.c
@@ -40,6 +40,7 @@
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/of.h>
+#include <linux/kthread.h>
#include "regs-pxp_v2.h"
@@ -48,7 +49,6 @@
static LIST_HEAD(head);
static int timeout_in_ms = 600;
static unsigned int block_size;
-struct mutex hard_lock;
struct pxp_dma {
struct dma_device dma;
@@ -78,6 +78,11 @@ struct pxps {
/* to turn clock off when pxp is inactive */
struct timer_list clk_timer;
+
+ /* for pxp config dispatch asynchronously*/
+ struct task_struct *dispatch;
+ wait_queue_head_t thread_waitq;
+ struct completion complete;
};
#define to_pxp_dma(d) container_of(d, struct pxp_dma, dma)
@@ -1293,7 +1298,6 @@ static irqreturn_t pxp_irq(int irq, void *dev_id)
}
pxp_chan = list_entry(head.next, struct pxp_channel, list);
- list_del_init(&pxp_chan->list);
if (list_empty(&pxp_chan->active_list)) {
pr_debug("PXP_IRQ pxp_chan->active_list empty. chan_id %d\n",
@@ -1322,7 +1326,10 @@ static irqreturn_t pxp_irq(int irq, void *dev_id)
list_splice_init(&desc->tx_list, &pxp_chan->free_list);
list_move(&desc->list, &pxp_chan->free_list);
- mutex_unlock(&hard_lock);
+ if (list_empty(&pxp_chan->active_list))
+ list_del_init(&pxp_chan->list);
+
+ complete(&pxp->complete);
pxp->pxp_ongoing = 0;
mod_timer(&pxp->clk_timer, jiffies + msecs_to_jiffies(timeout_in_ms));
@@ -1438,6 +1445,7 @@ static void pxp_issue_pending(struct dma_chan *chan)
struct pxp_dma *pxp_dma = to_pxp_dma(chan->device);
struct pxps *pxp = to_pxp(pxp_dma);
unsigned long flags0, flags;
+ struct list_head *iter;
spin_lock_irqsave(&pxp->lock, flags0);
spin_lock_irqsave(&pxp_chan->lock, flags);
@@ -1445,7 +1453,22 @@ static void pxp_issue_pending(struct dma_chan *chan)
if (!list_empty(&pxp_chan->queue)) {
pxpdma_dequeue(pxp_chan, &pxp_chan->active_list);
pxp_chan->status = PXP_CHANNEL_READY;
- list_add_tail(&pxp_chan->list, &head);
+ iter = head.next;
+ /* Avoid adding a pxp channel to head list which
+ * has been already listed in it. And this may
+ * cause the head list to be broken down.
+ */
+ if (list_empty(&head)) {
+ list_add_tail(&pxp_chan->list, &head);
+ } else {
+ while (iter != &head) {
+ if (&pxp_chan->list == iter)
+ break;
+ iter = iter->next;
+ }
+ if (iter == &head)
+ list_add_tail(&pxp_chan->list, &head);
+ }
} else {
spin_unlock_irqrestore(&pxp_chan->lock, flags);
spin_unlock_irqrestore(&pxp->lock, flags0);
@@ -1455,12 +1478,7 @@ static void pxp_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&pxp->lock, flags0);
pxp_clk_enable(pxp);
- mutex_lock(&hard_lock);
-
- spin_lock_irqsave(&pxp->lock, flags);
- pxp->pxp_ongoing = 1;
- spin_unlock_irqrestore(&pxp->lock, flags);
- pxpdma_dostart_work(pxp);
+ wake_up_interruptible(&pxp->thread_waitq);
}
static void __pxp_terminate_all(struct dma_chan *chan)
@@ -1761,6 +1779,46 @@ static const struct of_device_id imx_pxpdma_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, imx_pxpdma_dt_ids);
+static int has_pending_task(struct pxps *pxp, struct pxp_channel *task)
+{
+ int found;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pxp->lock, flags);
+ found = !list_empty(&head);
+ spin_unlock_irqrestore(&pxp->lock, flags);
+
+ return found;
+}
+
+static int pxp_dispatch_thread(void *argv)
+{
+ struct pxps *pxp = (struct pxps *)argv;
+ struct pxp_channel *pending = NULL;
+ unsigned long flags;
+
+ while (!kthread_should_stop()) {
+ int ret;
+ ret = wait_event_interruptible(pxp->thread_waitq,
+ has_pending_task(pxp, pending));
+ if (signal_pending(current))
+ continue;
+
+ spin_lock_irqsave(&pxp->lock, flags);
+ pxp->pxp_ongoing = 1;
+ spin_unlock_irqrestore(&pxp->lock, flags);
+ init_completion(&pxp->complete);
+ pxpdma_dostart_work(pxp);
+ ret = wait_for_completion_timeout(&pxp->complete, 2 * HZ);
+ if (ret == 0) {
+ printk(KERN_EMERG "%s: task is timeout\n\n", __func__);
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int pxp_probe(struct platform_device *pdev)
{
struct pxps *pxp;
@@ -1792,7 +1850,6 @@ static int pxp_probe(struct platform_device *pdev)
spin_lock_init(&pxp->lock);
mutex_init(&pxp->clk_mutex);
- mutex_init(&hard_lock);
pxp->base = devm_request_and_ioremap(&pdev->dev, res);
if (pxp->base == NULL) {
@@ -1836,6 +1893,14 @@ static int pxp_probe(struct platform_device *pdev)
pxp->clk_timer.function = pxp_clkoff_timer;
pxp->clk_timer.data = (unsigned long)pxp;
+ /* allocate a kernel thread to dispatch pxp conf */
+ pxp->dispatch = kthread_run(pxp_dispatch_thread, pxp, "pxp_dispatch");
+ if (IS_ERR(pxp->dispatch)) {
+ err = PTR_ERR(pxp->dispatch);
+ goto exit;
+ }
+ init_waitqueue_head(&pxp->thread_waitq);
+
register_pxp_device();
pm_runtime_enable(pxp->dev);
@@ -1851,6 +1916,7 @@ static int pxp_remove(struct platform_device *pdev)
struct pxps *pxp = platform_get_drvdata(pdev);
unregister_pxp_device();
+ kthread_stop(pxp->dispatch);
cancel_work_sync(&pxp->work);
del_timer_sync(&pxp->clk_timer);
clk_disable_unprepare(pxp->clk);