summaryrefslogtreecommitdiff
path: root/drivers/tty
diff options
context:
space:
mode:
authorFugang Duan <b38611@freescale.com>2014-07-13 12:04:17 +0800
committerNitin Garg <nitin.garg@freescale.com>2015-09-17 08:58:25 -0500
commite385aa924fca75ebe59ca6e16fa4e1161e049eca (patch)
treeaf3c70d2c5cbe4f4c39d7e649aec335119bc8609 /drivers/tty
parentf42a77879a1712f8ef308830fc1995bebda57090 (diff)
ENGR00322581-02 tty: serial: imx: add dma sync for dma tx path
For DMA tx path, there have no sync between prepare the tx BD and dma callback which can ensure tx_wor submit next dma request after the last finished. (cherry picked from commit 945d45c39f2afd98fd82464fee005d1e66807367) Signed-off-by: Fugang Duan <B38611@freescale.com> Signed-off-by: Robin Gong <b38343@freescale.com>
Diffstat (limited to 'drivers/tty')
-rw-r--r--drivers/tty/serial/imx.c76
1 files changed, 44 insertions, 32 deletions
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 34b3f0cffcc0..f43c0e32c4f0 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -228,6 +228,8 @@ struct imx_port {
struct work_struct tsk_dma_tx;
wait_queue_head_t dma_wait;
unsigned int saved_reg[11];
+#define DMA_TX_IS_WORKING 1
+ unsigned long flags;
};
struct imx_port_ucrs {
@@ -504,6 +506,8 @@ static void dma_tx_callback(void *data)
dev_dbg(sport->port.dev, "we finish the TX DMA.\n");
+ clear_bit(DMA_TX_IS_WORKING, &sport->flags);
+ smp_mb__after_clear_bit();
uart_write_wakeup(&sport->port);
schedule_work(&sport->tsk_dma_tx);
@@ -523,47 +527,54 @@ static void dma_tx_work(struct work_struct *w)
struct dma_async_tx_descriptor *desc;
struct dma_chan *chan = sport->dma_chan_tx;
struct device *dev = sport->port.dev;
- enum dma_status status;
+ unsigned long flags;
int ret;
- status = dmaengine_tx_status(chan, (dma_cookie_t)0, NULL);
- if (DMA_IN_PROGRESS == status)
+ if (test_and_set_bit(DMA_TX_IS_WORKING, &sport->flags))
return;
+ spin_lock_irqsave(&sport->port.lock, flags);
sport->tx_bytes = uart_circ_chars_pending(xmit);
- if (xmit->tail > xmit->head && xmit->head > 0) {
- sport->dma_tx_nents = 2;
- sg_init_table(sgl, 2);
- sg_set_buf(sgl, xmit->buf + xmit->tail,
- UART_XMIT_SIZE - xmit->tail);
- sg_set_buf(sgl + 1, xmit->buf, xmit->head);
- } else {
- sport->dma_tx_nents = 1;
- sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
- }
+ if (sport->tx_bytes > 0) {
+ if (xmit->tail > xmit->head && xmit->head > 0) {
+ sport->dma_tx_nents = 2;
+ sg_init_table(sgl, 2);
+ sg_set_buf(sgl, xmit->buf + xmit->tail,
+ UART_XMIT_SIZE - xmit->tail);
+ sg_set_buf(sgl + 1, xmit->buf, xmit->head);
+ } else {
+ sport->dma_tx_nents = 1;
+ sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
+ }
+ spin_unlock_irqrestore(&sport->port.lock, flags);
- ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
- if (ret == 0) {
- dev_err(dev, "DMA mapping error for TX.\n");
- return;
- }
- desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,
- DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
- if (!desc) {
- dev_err(dev, "We cannot prepare for the TX slave dma!\n");
+ ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+ if (ret == 0) {
+ dev_err(dev, "DMA mapping error for TX.\n");
+ goto err_out;
+ }
+ desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "We cannot prepare for the TX slave dma!\n");
+ goto err_out;
+ }
+ desc->callback = dma_tx_callback;
+ desc->callback_param = sport;
+
+ dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n",
+ uart_circ_chars_pending(xmit));
+ /* fire it */
+ sport->dma_is_txing = 1;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
return;
}
- desc->callback = dma_tx_callback;
- desc->callback_param = sport;
-
- dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n",
- uart_circ_chars_pending(xmit));
- /* fire it */
- sport->dma_is_txing = 1;
- dmaengine_submit(desc);
- dma_async_issue_pending(chan);
- return;
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+err_out:
+ clear_bit(DMA_TX_IS_WORKING, &sport->flags);
+ smp_mb__after_clear_bit();
}
/*
@@ -1045,6 +1056,7 @@ static void imx_enable_dma(struct imx_port *sport)
unsigned long temp;
init_waitqueue_head(&sport->dma_wait);
+ sport->flags = 0;
/* set UCR1 */
temp = readl(sport->port.membase + UCR1);