diff options
author | Fugang Duan <b38611@freescale.com> | 2014-07-13 12:04:17 +0800 |
---|---|---|
committer | Nitin Garg <nitin.garg@freescale.com> | 2015-04-14 13:59:38 -0500 |
commit | ce899219ebe7c6168e86478d1f360252a3e0d008 (patch) | |
tree | 86ab42566cfa4afc4e6419e39b4a8ceb9bedf711 /drivers/tty | |
parent | 54be376505491ccbe12bd67b76b2d68ceb69aef9 (diff) |
ENGR00322581-02 tty: serial: imx: add dma sync for dma tx path
For DMA tx path, there have no sync between prepare the tx BD and
dma callback which can ensure tx_wor submit next dma request after
the last finished.
(cherry picked from commit 945d45c39f2afd98fd82464fee005d1e66807367)
Signed-off-by: Fugang Duan <B38611@freescale.com>
Signed-off-by: Robin Gong <b38343@freescale.com>
Diffstat (limited to 'drivers/tty')
-rw-r--r-- | drivers/tty/serial/imx.c | 76 |
1 files changed, 44 insertions, 32 deletions
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 5ed34d17edbe..cf573b1259b7 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -228,6 +228,8 @@ struct imx_port { struct work_struct tsk_dma_tx; wait_queue_head_t dma_wait; unsigned int saved_reg[11]; +#define DMA_TX_IS_WORKING 1 + unsigned long flags; }; struct imx_port_ucrs { @@ -504,6 +506,8 @@ static void dma_tx_callback(void *data) dev_dbg(sport->port.dev, "we finish the TX DMA.\n"); + clear_bit(DMA_TX_IS_WORKING, &sport->flags); + smp_mb__after_clear_bit(); uart_write_wakeup(&sport->port); schedule_work(&sport->tsk_dma_tx); @@ -523,47 +527,54 @@ static void dma_tx_work(struct work_struct *w) struct dma_async_tx_descriptor *desc; struct dma_chan *chan = sport->dma_chan_tx; struct device *dev = sport->port.dev; - enum dma_status status; + unsigned long flags; int ret; - status = dmaengine_tx_status(chan, (dma_cookie_t)0, NULL); - if (DMA_IN_PROGRESS == status) + if (test_and_set_bit(DMA_TX_IS_WORKING, &sport->flags)) return; + spin_lock_irqsave(&sport->port.lock, flags); sport->tx_bytes = uart_circ_chars_pending(xmit); - if (xmit->tail > xmit->head && xmit->head > 0) { - sport->dma_tx_nents = 2; - sg_init_table(sgl, 2); - sg_set_buf(sgl, xmit->buf + xmit->tail, - UART_XMIT_SIZE - xmit->tail); - sg_set_buf(sgl + 1, xmit->buf, xmit->head); - } else { - sport->dma_tx_nents = 1; - sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes); - } + if (sport->tx_bytes > 0) { + if (xmit->tail > xmit->head && xmit->head > 0) { + sport->dma_tx_nents = 2; + sg_init_table(sgl, 2); + sg_set_buf(sgl, xmit->buf + xmit->tail, + UART_XMIT_SIZE - xmit->tail); + sg_set_buf(sgl + 1, xmit->buf, xmit->head); + } else { + sport->dma_tx_nents = 1; + sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes); + } + spin_unlock_irqrestore(&sport->port.lock, flags); - ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); - if (ret == 0) { - dev_err(dev, "DMA mapping error for TX.\n"); - return; - } - desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents, - DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); - if (!desc) { - dev_err(dev, "We cannot prepare for the TX slave dma!\n"); + ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); + if (ret == 0) { + dev_err(dev, "DMA mapping error for TX.\n"); + goto err_out; + } + desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(dev, "We cannot prepare for the TX slave dma!\n"); + goto err_out; + } + desc->callback = dma_tx_callback; + desc->callback_param = sport; + + dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n", + uart_circ_chars_pending(xmit)); + /* fire it */ + sport->dma_is_txing = 1; + dmaengine_submit(desc); + dma_async_issue_pending(chan); return; } - desc->callback = dma_tx_callback; - desc->callback_param = sport; - - dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n", - uart_circ_chars_pending(xmit)); - /* fire it */ - sport->dma_is_txing = 1; - dmaengine_submit(desc); - dma_async_issue_pending(chan); - return; + spin_unlock_irqrestore(&sport->port.lock, flags); +err_out: + clear_bit(DMA_TX_IS_WORKING, &sport->flags); + smp_mb__after_clear_bit(); } /* @@ -1037,6 +1048,7 @@ static void imx_enable_dma(struct imx_port *sport) unsigned long temp; init_waitqueue_head(&sport->dma_wait); + sport->flags = 0; /* set UCR1 */ temp = readl(sport->port.membase + UCR1); |