summaryrefslogtreecommitdiff
path: root/drivers/tty
diff options
context:
space:
mode:
authorAndy Duan <fugang.duan@nxp.com>2017-03-21 17:32:56 +0800
committerJason Liu <jason.hui.liu@nxp.com>2019-02-12 10:26:02 +0800
commitaa91843b7aa5f32f4b2a69777de888772050e932 (patch)
tree109f0a8fe929576bf5fa2cf57c861061301d9e50 /drivers/tty
parente22fd1b25fbac0afbd755eb40b20af6f7ac23ef3 (diff)
MLK-14498-6 tty: serial: imx: align tx path with kernel 3.14
Align tx path with kernel 3.14, otherwise there have data loss. The patch is cherry-picked from commit:47c1570ac934, and merge the patch 1afe15219403. Signed-off-by: Fugang Duan <B38611@freescale.com>
Diffstat (limited to 'drivers/tty')
-rw-r--r--drivers/tty/serial/imx.c118
1 files changed, 60 insertions, 58 deletions
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 1c981ff6c949..b2ca614e21bf 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -243,9 +243,12 @@ struct imx_port {
struct imx_dma_rxbuf rx_buf;
unsigned int tx_bytes;
unsigned int dma_tx_nents;
+ struct work_struct tsk_dma_tx;
wait_queue_head_t dma_wait;
unsigned int saved_reg[10];
bool context_saved;
+#define DMA_TX_IS_WORKING 1
+ unsigned long flags;
};
struct imx_port_ucrs {
@@ -444,7 +447,6 @@ static void imx_enable_ms(struct uart_port *port)
mctrl_gpio_enable_ms(sport->gpios);
}
-static void imx_dma_tx(struct imx_port *sport);
static inline void imx_transmit_buffer(struct imx_port *sport)
{
struct circ_buf *xmit = &sport->port.state->xmit;
@@ -475,7 +477,7 @@ static inline void imx_transmit_buffer(struct imx_port *sport)
writel(temp, sport->port.membase + UCR1);
} else {
writel(temp, sport->port.membase + UCR1);
- imx_dma_tx(sport);
+ schedule_work(&sport->tsk_dma_tx);
}
}
@@ -504,95 +506,89 @@ static void dma_tx_callback(void *data)
struct scatterlist *sgl = &sport->tx_sgl[0];
struct circ_buf *xmit = &sport->port.state->xmit;
unsigned long flags;
- unsigned long temp;
-
- spin_lock_irqsave(&sport->port.lock, flags);
dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
- temp = readl(sport->port.membase + UCR1);
- temp &= ~UCR1_TDMAEN;
- writel(temp, sport->port.membase + UCR1);
+ sport->dma_is_txing = 0;
/* update the stat */
+ spin_lock_irqsave(&sport->port.lock, flags);
xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1);
sport->port.icount.tx += sport->tx_bytes;
+ spin_unlock_irqrestore(&sport->port.lock, flags);
dev_dbg(sport->port.dev, "we finish the TX DMA.\n");
- sport->dma_is_txing = 0;
-
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ clear_bit(DMA_TX_IS_WORKING, &sport->flags);
+ smp_mb__after_atomic();
+ uart_write_wakeup(&sport->port);
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&sport->port);
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port))
+ schedule_work(&sport->tsk_dma_tx);
if (waitqueue_active(&sport->dma_wait)) {
wake_up(&sport->dma_wait);
dev_dbg(sport->port.dev, "exit in %s.\n", __func__);
return;
}
-
- spin_lock_irqsave(&sport->port.lock, flags);
- if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port))
- imx_dma_tx(sport);
- spin_unlock_irqrestore(&sport->port.lock, flags);
}
-static void imx_dma_tx(struct imx_port *sport)
+static void dma_tx_work(struct work_struct *w)
{
+ struct imx_port *sport = container_of(w, struct imx_port, tsk_dma_tx);
struct circ_buf *xmit = &sport->port.state->xmit;
struct scatterlist *sgl = sport->tx_sgl;
struct dma_async_tx_descriptor *desc;
struct dma_chan *chan = sport->dma_chan_tx;
struct device *dev = sport->port.dev;
- unsigned long temp;
+ unsigned long flags;
int ret;
- if (sport->dma_is_txing)
+ if (test_and_set_bit(DMA_TX_IS_WORKING, &sport->flags))
return;
+ spin_lock_irqsave(&sport->port.lock, flags);
sport->tx_bytes = uart_circ_chars_pending(xmit);
- if (xmit->tail > xmit->head && xmit->head > 0) {
- sport->dma_tx_nents = 2;
- sg_init_table(sgl, 2);
- sg_set_buf(sgl, xmit->buf + xmit->tail,
- UART_XMIT_SIZE - xmit->tail);
- sg_set_buf(sgl + 1, xmit->buf, xmit->head);
- } else {
- sport->dma_tx_nents = 1;
- sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
- }
+ if (sport->tx_bytes > 0) {
+ if (xmit->tail > xmit->head && xmit->head > 0) {
+ sport->dma_tx_nents = 2;
+ sg_init_table(sgl, 2);
+ sg_set_buf(sgl, xmit->buf + xmit->tail,
+ UART_XMIT_SIZE - xmit->tail);
+ sg_set_buf(sgl + 1, xmit->buf, xmit->head);
+ } else {
+ sport->dma_tx_nents = 1;
+ sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
+ }
+ spin_unlock_irqrestore(&sport->port.lock, flags);
- ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
- if (ret == 0) {
- dev_err(dev, "DMA mapping error for TX.\n");
- return;
- }
- desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,
- DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
- if (!desc) {
- dma_unmap_sg(dev, sgl, sport->dma_tx_nents,
- DMA_TO_DEVICE);
- dev_err(dev, "We cannot prepare for the TX slave dma!\n");
+ ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+ if (ret == 0) {
+ dev_err(dev, "DMA mapping error for TX.\n");
+ goto err_out;
+ }
+ desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "We cannot prepare for the TX slave dma!\n");
+ goto err_out;
+ }
+ desc->callback = dma_tx_callback;
+ desc->callback_param = sport;
+
+ dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n",
+ uart_circ_chars_pending(xmit));
+ /* fire it */
+ sport->dma_is_txing = 1;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
return;
}
- desc->callback = dma_tx_callback;
- desc->callback_param = sport;
-
- dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n",
- uart_circ_chars_pending(xmit));
-
- temp = readl(sport->port.membase + UCR1);
- temp |= UCR1_TDMAEN;
- writel(temp, sport->port.membase + UCR1);
-
- /* fire it */
- sport->dma_is_txing = 1;
- dmaengine_submit(desc);
- dma_async_issue_pending(chan);
- return;
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+err_out:
+ clear_bit(DMA_TX_IS_WORKING, &sport->flags);
+ smp_mb__after_atomic();
}
/*
@@ -637,7 +633,7 @@ static void imx_start_tx(struct uart_port *port)
if (!uart_circ_empty(&port->state->xmit) &&
!uart_tx_stopped(port))
- imx_dma_tx(sport);
+ schedule_work(&sport->tsk_dma_tx);
return;
}
}
@@ -1297,6 +1293,9 @@ static int imx_startup(struct uart_port *port)
&& !sport->dma_is_inited)
imx_uart_dma_init(sport);
+ if (sport->dma_is_inited)
+ INIT_WORK(&sport->tsk_dma_tx, dma_tx_work);
+
spin_lock_irqsave(&sport->port.lock, flags);
/*
@@ -1378,6 +1377,9 @@ static void imx_shutdown(struct uart_port *port)
dmaengine_terminate_all(sport->dma_chan_tx);
dmaengine_terminate_all(sport->dma_chan_rx);
}
+
+ cancel_work_sync(&sport->tsk_dma_tx);
+
spin_lock_irqsave(&sport->port.lock, flags);
imx_stop_tx(port);
imx_stop_rx(port);