1
0
Fork 0

serial: imx: remove the DMA wait queue

The DMA wait queue makes the code very complicated:
  For RX, the @->stop_rx hook does not really stop the RX;
  For TX, the @->stop_tx hook does not really stop the TX.

The above make the imx_shutdown has to wait the RX/TX DMA to be finished.

In order to make code more simple, this patch removes the DMA wait queue.
By calling the dmaengine_terminate_all, this patch makes the RX stops
immediately after we call the @->stop_rx hook, so does the TX.

Signed-off-by: Huang Shijie <b32955@freescale.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
hifive-unleashed-5.1
Huang Shijie 2014-05-23 12:40:40 +08:00 committed by Greg Kroah-Hartman
parent 8eccd0cd21
commit e2f2786606
1 changed files with 14 additions and 28 deletions

View File

@ -225,7 +225,6 @@ struct imx_port {
void *rx_buf;
unsigned int tx_bytes;
unsigned int dma_tx_nents;
wait_queue_head_t dma_wait;
};
struct imx_port_ucrs {
@ -416,12 +415,10 @@ static void imx_stop_tx(struct uart_port *port)
return;
}
/*
* We are maybe in the SMP context, so if the DMA TX thread is running
* on other cpu, we have to wait for it to finish.
*/
if (sport->dma_is_enabled && sport->dma_is_txing)
return;
if (sport->dma_is_enabled && sport->dma_is_txing) {
dmaengine_terminate_all(sport->dma_chan_tx);
sport->dma_is_txing = 0;
}
temp = readl(sport->port.membase + UCR1);
writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1);
@ -435,12 +432,10 @@ static void imx_stop_rx(struct uart_port *port)
struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
/*
* We are maybe in the SMP context, so if the DMA TX thread is running
* on other cpu, we have to wait for it to finish.
*/
if (sport->dma_is_enabled && sport->dma_is_rxing)
return;
if (sport->dma_is_enabled && sport->dma_is_rxing) {
dmaengine_terminate_all(sport->dma_chan_rx);
sport->dma_is_rxing = 0;
}
temp = readl(sport->port.membase + UCR2);
writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2);
@ -497,12 +492,6 @@ static void dma_tx_callback(void *data)
dev_dbg(sport->port.dev, "we finish the TX DMA.\n");
uart_write_wakeup(&sport->port);
if (waitqueue_active(&sport->dma_wait)) {
wake_up(&sport->dma_wait);
dev_dbg(sport->port.dev, "exit in %s.\n", __func__);
return;
}
}
static void imx_dma_tx(struct imx_port *sport)
@ -875,10 +864,6 @@ static void imx_rx_dma_done(struct imx_port *sport)
writel(temp, sport->port.membase + UCR1);
sport->dma_is_rxing = 0;
/* Is the shutdown waiting for us? */
if (waitqueue_active(&sport->dma_wait))
wake_up(&sport->dma_wait);
}
/*
@ -1025,8 +1010,6 @@ static void imx_enable_dma(struct imx_port *sport)
{
unsigned long temp;
init_waitqueue_head(&sport->dma_wait);
/* set UCR1 */
temp = readl(sport->port.membase + UCR1);
temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN |
@ -1218,10 +1201,13 @@ static void imx_shutdown(struct uart_port *port)
unsigned long flags;
if (sport->dma_is_enabled) {
/* We have to wait for the DMA to finish. */
wait_event(sport->dma_wait,
!sport->dma_is_rxing && !sport->dma_is_txing);
/*
* The upper layer may does not call the @->stop_tx and
* @->stop_rx, so we call them ourselves.
*/
imx_stop_tx(port);
imx_stop_rx(port);
imx_disable_dma(sport);
imx_uart_dma_exit(sport);
}