1
0
Fork 0

mmc-host: move to dma_transfer_direction

fixup usage of dma direction by introducing dma_transfer_direction,
this patch moves mmc drivers to use new enum

Cc: Nicolas Ferre <nicolas.ferre@atmel.com>
Cc: Chris Ball <cjb@laptop.org>
Cc: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Acked-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
hifive-unleashed-5.1
Vinod Koul 2011-10-14 10:45:11 +05:30
parent 1d0c81e876
commit 05f5799cbe
6 changed files with 31 additions and 15 deletions

View File

@ -653,6 +653,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
struct scatterlist *sg;
unsigned int i;
enum dma_data_direction direction;
enum dma_transfer_direction slave_dirn;
unsigned int sglen;
/*
@ -681,16 +682,19 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
if (atmci_is_mci2())
mci_writel(host, DMA, MCI_DMA_CHKSIZE(3) | MCI_DMAEN);
if (data->flags & MMC_DATA_READ)
if (data->flags & MMC_DATA_READ) {
direction = DMA_FROM_DEVICE;
else
slave_dirn = DMA_DEV_TO_MEM;
} else {
direction = DMA_TO_DEVICE;
slave_dirn = DMA_MEM_TO_DEV;
}
sglen = dma_map_sg(chan->device->dev, data->sg,
data->sg_len, direction);
desc = chan->device->device_prep_slave_sg(chan,
data->sg, sglen, direction,
data->sg, sglen, slave_dirn,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
goto unmap_exit;

View File

@ -372,6 +372,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
struct dma_chan *chan;
struct dma_device *device;
struct dma_async_tx_descriptor *desc;
enum dma_data_direction buffer_dirn;
int nr_sg;
/* Check if next job is already prepared */
@ -385,10 +386,12 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
}
if (data->flags & MMC_DATA_READ) {
conf.direction = DMA_FROM_DEVICE;
conf.direction = DMA_DEV_TO_MEM;
buffer_dirn = DMA_FROM_DEVICE;
chan = host->dma_rx_channel;
} else {
conf.direction = DMA_TO_DEVICE;
conf.direction = DMA_MEM_TO_DEV;
buffer_dirn = DMA_TO_DEVICE;
chan = host->dma_tx_channel;
}
@ -401,7 +404,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
return -EINVAL;
device = chan->device;
nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
if (nr_sg == 0)
return -EINVAL;
@ -424,7 +427,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
unmap_exit:
if (!next)
dmaengine_terminate_all(chan);
dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
return -ENOMEM;
}

View File

@ -217,6 +217,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
unsigned int blksz = data->blksz;
unsigned int datasize = nob * blksz;
struct scatterlist *sg;
enum dma_transfer_direction slave_dirn;
int i, nents;
if (data->flags & MMC_DATA_STREAM)
@ -239,10 +240,13 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
}
}
if (data->flags & MMC_DATA_READ)
if (data->flags & MMC_DATA_READ) {
host->dma_dir = DMA_FROM_DEVICE;
else
slave_dirn = DMA_DEV_TO_MEM;
} else {
host->dma_dir = DMA_TO_DEVICE;
slave_dirn = DMA_MEM_TO_DEV;
}
nents = dma_map_sg(host->dma->device->dev, data->sg,
data->sg_len, host->dma_dir);
@ -250,7 +254,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
return -EINVAL;
host->desc = host->dma->device->device_prep_slave_sg(host->dma,
data->sg, data->sg_len, host->dma_dir,
data->sg, data->sg_len, slave_dirn,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!host->desc) {

View File

@ -153,6 +153,7 @@ struct mxs_mmc_host {
struct dma_chan *dmach;
struct mxs_dma_data dma_data;
unsigned int dma_dir;
enum dma_transfer_direction slave_dirn;
u32 ssp_pio_words[SSP_PIO_NUM];
unsigned int version;
@ -323,7 +324,7 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
}
desc = host->dmach->device->device_prep_slave_sg(host->dmach,
sgl, sg_len, host->dma_dir, append);
sgl, sg_len, host->slave_dirn, append);
if (desc) {
desc->callback = mxs_mmc_dma_irq_callback;
desc->callback_param = host;
@ -432,6 +433,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
int i;
unsigned short dma_data_dir, timeout;
enum dma_transfer_direction slave_dirn;
unsigned int data_size = 0, log2_blksz;
unsigned int blocks = data->blocks;
@ -447,9 +449,11 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
if (data->flags & MMC_DATA_WRITE) {
dma_data_dir = DMA_TO_DEVICE;
slave_dirn = DMA_MEM_TO_DEV;
read = 0;
} else {
dma_data_dir = DMA_FROM_DEVICE;
slave_dirn = DMA_DEV_TO_MEM;
read = BM_SSP_CTRL0_READ;
}
@ -517,6 +521,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
WARN_ON(host->data != NULL);
host->data = data;
host->dma_dir = dma_data_dir;
host->slave_dirn = slave_dirn;
desc = mxs_mmc_prep_dma(host, 1);
if (!desc)
goto out;

View File

@ -230,7 +230,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
if (ret > 0) {
host->dma_active = true;
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
if (desc) {
@ -278,7 +278,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
if (ret > 0) {
host->dma_active = true;
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
if (desc) {

View File

@ -77,7 +77,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_FROM_DEVICE, DMA_CTRL_ACK);
DMA_DEV_TO_MEM, DMA_CTRL_ACK);
if (desc) {
cookie = dmaengine_submit(desc);
@ -158,7 +158,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_TO_DEVICE, DMA_CTRL_ACK);
DMA_MEM_TO_DEV, DMA_CTRL_ACK);
if (desc) {
cookie = dmaengine_submit(desc);