1
0
Fork 0

spi: img-spfi: Implement a handle_err() callback

The driver can be greatly simplified by moving the transfer timeout
handling to a handle_err() callback.

Signed-off-by: Ezequiel Garcia <ezequiel.garcia@imgtec.com>
Signed-off-by: Andrew Bresticker <abrestic@chromium.org>
Signed-off-by: Mark Brown <broonie@kernel.org>
hifive-unleashed-5.1
Ezequiel Garcia 2015-04-08 10:03:14 -07:00 committed by Mark Brown
parent 89cda06d7f
commit 824ab37df0
1 changed files with 24 additions and 20 deletions

View File

@ -270,7 +270,6 @@ static int img_spfi_start_pio(struct spi_master *master,
if (rx_bytes > 0 || tx_bytes > 0) {
dev_err(spfi->dev, "PIO transfer timed out\n");
spfi_reset(spfi);
return -ETIMEDOUT;
}
@ -396,6 +395,29 @@ stop_dma:
return -EIO;
}
static void img_spfi_handle_err(struct spi_master *master,
struct spi_message *msg)
{
struct img_spfi *spfi = spi_master_get_devdata(master);
unsigned long flags;
/*
* Stop all DMA and reset the controller if the previous transaction
* timed-out and never completed it's DMA.
*/
spin_lock_irqsave(&spfi->lock, flags);
if (spfi->tx_dma_busy || spfi->rx_dma_busy) {
spfi->tx_dma_busy = false;
spfi->rx_dma_busy = false;
dmaengine_terminate_all(spfi->tx_ch);
dmaengine_terminate_all(spfi->rx_ch);
}
spin_unlock_irqrestore(&spfi->lock, flags);
spfi_reset(spfi);
}
static int img_spfi_prepare(struct spi_master *master, struct spi_message *msg)
{
struct img_spfi *spfi = spi_master_get_devdata(master);
@ -462,8 +484,6 @@ static int img_spfi_transfer_one(struct spi_master *master,
struct spi_transfer *xfer)
{
struct img_spfi *spfi = spi_master_get_devdata(spi->master);
bool dma_reset = false;
unsigned long flags;
int ret;
if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) {
@ -473,23 +493,6 @@ static int img_spfi_transfer_one(struct spi_master *master,
return -EINVAL;
}
/*
* Stop all DMA and reset the controller if the previous transaction
* timed-out and never completed it's DMA.
*/
spin_lock_irqsave(&spfi->lock, flags);
if (spfi->tx_dma_busy || spfi->rx_dma_busy) {
dev_err(spfi->dev, "SPI DMA still busy\n");
dma_reset = true;
}
spin_unlock_irqrestore(&spfi->lock, flags);
if (dma_reset) {
dmaengine_terminate_all(spfi->tx_ch);
dmaengine_terminate_all(spfi->rx_ch);
spfi_reset(spfi);
}
img_spfi_config(master, spi, xfer);
if (master->can_dma && master->can_dma(master, spi, xfer))
ret = img_spfi_start_dma(master, spi, xfer);
@ -607,6 +610,7 @@ static int img_spfi_probe(struct platform_device *pdev)
master->set_cs = img_spfi_set_cs;
master->transfer_one = img_spfi_transfer_one;
master->prepare_message = img_spfi_prepare;
master->handle_err = img_spfi_handle_err;
spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx");
spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx");