summaryrefslogtreecommitdiff
path: root/drivers/tty/serial/imx.c
diff options
context:
space:
mode:
authorFugang Duan <fugang.duan@nxp.com>2017-06-12 16:56:43 +0800
committerJason Liu <jason.hui.liu@nxp.com>2019-02-12 10:26:50 +0800
commit4d3bd1e2f9b14284aa8190ca28ac97e20935830a (patch)
tree87da430970249860dee21abb1c6a83359fb799ee /drivers/tty/serial/imx.c
parent28f34e086c871f34277e35f4cde3db16979cd03f (diff)
MLK-15093 tty: serial: imx: enable bit TDMAEN in each DMA transfer
In below case: write() -> flush() -> write() -> flush() ... .imx_flush_buffer() _MAY_ clear UCR1_TDMAEN bit if the callback is not comming or DMA transfer is not completed, to ensure DMA trigger is enabled for the new DMA prep_sg, enable the UCR1_TDMAEN bit in .dma_tx_work(). Signed-off-by: Fugang Duan <fugang.duan@nxp.com> Tested-by: Fabio Estevam <fabio.estevam@nxp.com> Tested-by: David Wolfe <david.wolfe@nxp.com>
Diffstat (limited to 'drivers/tty/serial/imx.c')
-rw-r--r--drivers/tty/serial/imx.c22
1 files changed, 17 insertions, 5 deletions
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 8f780974754d..53cd6b326740 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -508,16 +508,21 @@ static void dma_tx_callback(void *data)
struct circ_buf *xmit = &sport->port.state->xmit;
unsigned long flags;
- dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
-
- sport->dma_is_txing = 0;
-
/* update the stat */
spin_lock_irqsave(&sport->port.lock, flags);
+ /* user call .flush() before the code slice coming */
+ if (!sport->dma_is_txing) {
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+ return;
+ }
+ sport->dma_is_txing = 0;
+
xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1);
sport->port.icount.tx += sport->tx_bytes;
spin_unlock_irqrestore(&sport->port.lock, flags);
+ dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+
dev_dbg(sport->port.dev, "we finish the TX DMA.\n");
clear_bit(DMA_TX_IS_WORKING, &sport->flags);
@@ -543,6 +548,7 @@ static void dma_tx_work(struct work_struct *w)
struct dma_chan *chan = sport->dma_chan_tx;
struct device *dev = sport->port.dev;
unsigned long flags;
+ unsigned long temp;
int ret;
if (test_and_set_bit(DMA_TX_IS_WORKING, &sport->flags))
@@ -584,6 +590,10 @@ static void dma_tx_work(struct work_struct *w)
sport->dma_is_txing = 1;
dmaengine_submit(desc);
dma_async_issue_pending(chan);
+
+ temp = readl(sport->port.membase + UCR1);
+ temp |= UCR1_TDMAEN;
+ writel(temp, sport->port.membase + UCR1);
return;
}
spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -1448,7 +1458,9 @@ static void imx_flush_buffer(struct uart_port *port)
temp = readl(sport->port.membase + UCR1);
temp &= ~UCR1_TDMAEN;
writel(temp, sport->port.membase + UCR1);
- sport->dma_is_txing = false;
+ sport->dma_is_txing = 0;
+ clear_bit(DMA_TX_IS_WORKING, &sport->flags);
+ smp_mb__after_atomic();
}
/*