Merge branch 'next' into v3.1-rc4

Fixed trivial conflicts  in  drivers/dma/amba-pl08x.c

Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
Vinod Koul 2011-09-02 16:43:44 +05:30 committed by Vinod Koul
commit 8516f52fa4
9 changed files with 447 additions and 327 deletions

View file

@ -21,6 +21,9 @@
* OneNAND features. * OneNAND features.
*/ */
#ifndef ASM_PL080_H
#define ASM_PL080_H
#define PL080_INT_STATUS (0x00) #define PL080_INT_STATUS (0x00)
#define PL080_TC_STATUS (0x04) #define PL080_TC_STATUS (0x04)
#define PL080_TC_CLEAR (0x08) #define PL080_TC_CLEAR (0x08)
@ -138,3 +141,4 @@ struct pl080s_lli {
u32 control1; u32 control1;
}; };
#endif /* ASM_PL080_H */

View file

@ -66,28 +66,23 @@
* after the final transfer signalled by LBREQ or LSREQ. The DMAC * after the final transfer signalled by LBREQ or LSREQ. The DMAC
* will then move to the next LLI entry. * will then move to the next LLI entry.
* *
* Only the former works sanely with scatter lists, so we only implement
* the DMAC flow control method. However, peripherals which use the LBREQ
* and LSREQ signals (eg, MMCI) are unable to use this mode, which through
* these hardware restrictions prevents them from using scatter DMA.
*
* Global TODO: * Global TODO:
* - Break out common code from arch/arm/mach-s3c64xx and share * - Break out common code from arch/arm/mach-s3c64xx and share
*/ */
#include <linux/device.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/dmaengine.h>
#include <linux/amba/bus.h> #include <linux/amba/bus.h>
#include <linux/amba/pl08x.h> #include <linux/amba/pl08x.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/slab.h>
#include <asm/hardware/pl080.h> #include <asm/hardware/pl080.h>
#define DRIVER_NAME "pl08xdmac" #define DRIVER_NAME "pl08xdmac"
@ -126,7 +121,8 @@ struct pl08x_lli {
* @phy_chans: array of data for the physical channels * @phy_chans: array of data for the physical channels
* @pool: a pool for the LLI descriptors * @pool: a pool for the LLI descriptors
* @pool_ctr: counter of LLIs in the pool * @pool_ctr: counter of LLIs in the pool
* @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
* fetches
* @mem_buses: set to indicate memory transfers on AHB2. * @mem_buses: set to indicate memory transfers on AHB2.
* @lock: a spinlock for this struct * @lock: a spinlock for this struct
*/ */
@ -149,14 +145,6 @@ struct pl08x_driver_data {
* PL08X specific defines * PL08X specific defines
*/ */
/*
* Memory boundaries: the manual for PL08x says that the controller
* cannot read past a 1KiB boundary, so these defines are used to
* create transfer LLIs that do not cross such boundaries.
*/
#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
/* Size (bytes) of each LLI buffer allocated for one transfer */ /* Size (bytes) of each LLI buffer allocated for one transfer */
# define PL08X_LLI_TSFR_SIZE 0x2000 # define PL08X_LLI_TSFR_SIZE 0x2000
@ -272,7 +260,6 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
writel(val, ch->base + PL080_CH_CONFIG); writel(val, ch->base + PL080_CH_CONFIG);
} }
/* /*
* pl08x_terminate_phy_chan() stops the channel, clears the FIFO and * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
* clears any pending interrupt status. This should not be used for * clears any pending interrupt status. This should not be used for
@ -407,6 +394,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
return NULL; return NULL;
} }
pm_runtime_get_sync(&pl08x->adev->dev);
return ch; return ch;
} }
@ -420,6 +408,8 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
/* Stop the channel and clear its interrupts */ /* Stop the channel and clear its interrupts */
pl08x_terminate_phy_chan(pl08x, ch); pl08x_terminate_phy_chan(pl08x, ch);
pm_runtime_put(&pl08x->adev->dev);
/* Mark it as free */ /* Mark it as free */
ch->serving = NULL; ch->serving = NULL;
spin_unlock_irqrestore(&ch->lock, flags); spin_unlock_irqrestore(&ch->lock, flags);
@ -499,36 +489,30 @@ struct pl08x_lli_build_data {
}; };
/* /*
* Autoselect a master bus to use for the transfer this prefers the * Autoselect a master bus to use for the transfer. Slave will be the chosen as
* destination bus if both available if fixed address on one bus the * victim in case src & dest are not similarly aligned. i.e. If after aligning
* other will be chosen * masters address with width requirements of transfer (by sending few byte by
* byte data), slave is still not aligned, then its width will be reduced to
* BYTE.
* - prefers the destination bus if both available
* - prefers bus with fixed address (i.e. peripheral)
*/ */
static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
{ {
if (!(cctl & PL080_CONTROL_DST_INCR)) { if (!(cctl & PL080_CONTROL_DST_INCR)) {
*mbus = &bd->srcbus; *mbus = &bd->dstbus;
*sbus = &bd->dstbus; *sbus = &bd->srcbus;
} else if (!(cctl & PL080_CONTROL_SRC_INCR)) { } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
*mbus = &bd->dstbus;
*sbus = &bd->srcbus;
} else {
if (bd->dstbus.buswidth == 4) {
*mbus = &bd->dstbus;
*sbus = &bd->srcbus;
} else if (bd->srcbus.buswidth == 4) {
*mbus = &bd->srcbus;
*sbus = &bd->dstbus;
} else if (bd->dstbus.buswidth == 2) {
*mbus = &bd->dstbus;
*sbus = &bd->srcbus;
} else if (bd->srcbus.buswidth == 2) {
*mbus = &bd->srcbus; *mbus = &bd->srcbus;
*sbus = &bd->dstbus; *sbus = &bd->dstbus;
} else { } else {
/* bd->srcbus.buswidth == 1 */ if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
*mbus = &bd->dstbus; *mbus = &bd->dstbus;
*sbus = &bd->srcbus; *sbus = &bd->srcbus;
} else {
*mbus = &bd->srcbus;
*sbus = &bd->dstbus;
} }
} }
} }
@ -547,7 +531,8 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
llis_va[num_llis].cctl = cctl; llis_va[num_llis].cctl = cctl;
llis_va[num_llis].src = bd->srcbus.addr; llis_va[num_llis].src = bd->srcbus.addr;
llis_va[num_llis].dst = bd->dstbus.addr; llis_va[num_llis].dst = bd->dstbus.addr;
llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
sizeof(struct pl08x_lli);
llis_va[num_llis].lli |= bd->lli_bus; llis_va[num_llis].lli |= bd->lli_bus;
if (cctl & PL080_CONTROL_SRC_INCR) if (cctl & PL080_CONTROL_SRC_INCR)
@ -560,16 +545,12 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
bd->remainder -= len; bd->remainder -= len;
} }
/* static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
* Return number of bytes to fill to boundary, or len. u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
* This calculation works for any value of addr.
*/
static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
{ {
size_t boundary_len = PL08X_BOUNDARY_SIZE - *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
(addr & (PL08X_BOUNDARY_SIZE - 1)); pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
(*total_bytes) += len;
return min(boundary_len, len);
} }
/* /*
@ -583,13 +564,11 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
struct pl08x_bus_data *mbus, *sbus; struct pl08x_bus_data *mbus, *sbus;
struct pl08x_lli_build_data bd; struct pl08x_lli_build_data bd;
int num_llis = 0; int num_llis = 0;
u32 cctl; u32 cctl, early_bytes = 0;
size_t max_bytes_per_lli; size_t max_bytes_per_lli, total_bytes = 0;
size_t total_bytes = 0;
struct pl08x_lli *llis_va; struct pl08x_lli *llis_va;
txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
&txd->llis_bus);
if (!txd->llis_va) { if (!txd->llis_va) {
dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
return 0; return 0;
@ -619,55 +598,85 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
bd.srcbus.buswidth = bd.srcbus.maxwidth; bd.srcbus.buswidth = bd.srcbus.maxwidth;
bd.dstbus.buswidth = bd.dstbus.maxwidth; bd.dstbus.buswidth = bd.dstbus.maxwidth;
/*
* Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
*/
max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
PL080_CONTROL_TRANSFER_SIZE_MASK;
/* We need to count this down to zero */ /* We need to count this down to zero */
bd.remainder = txd->len; bd.remainder = txd->len;
/*
* Choose bus to align to
* - prefers destination bus if both available
* - if fixed address on one bus chooses other
*/
pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n", dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
bd.srcbus.buswidth, bd.srcbus.buswidth,
bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
bd.dstbus.buswidth, bd.dstbus.buswidth,
bd.remainder, max_bytes_per_lli); bd.remainder);
dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
mbus == &bd.srcbus ? "src" : "dst", mbus == &bd.srcbus ? "src" : "dst",
sbus == &bd.srcbus ? "src" : "dst"); sbus == &bd.srcbus ? "src" : "dst");
if (txd->len < mbus->buswidth) { /*
/* Less than a bus width available - send as single bytes */ * Zero length is only allowed if all these requirements are met:
while (bd.remainder) { * - flow controller is peripheral.
dev_vdbg(&pl08x->adev->dev, * - src.addr is aligned to src.width
"%s single byte LLIs for a transfer of " * - dst.addr is aligned to dst.width
"less than a bus width (remain 0x%08x)\n", *
__func__, bd.remainder); * sg_len == 1 should be true, as there can be two cases here:
cctl = pl08x_cctl_bits(cctl, 1, 1, 1); * - Memory addresses are contiguous and are not scattered. Here, Only
pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); * one sg will be passed by user driver, with memory address and zero
total_bytes++; * length. We pass this to controller and after the transfer it will
} * receive the last burst request from peripheral and so transfer
} else { * finishes.
/* Make one byte LLIs until master bus is aligned */ *
while ((mbus->addr) % (mbus->buswidth)) { * - Memory addresses are scattered and are not contiguous. Here,
dev_vdbg(&pl08x->adev->dev, * Obviously as DMA controller doesn't know when a lli's transfer gets
"%s adjustment lli for less than bus width " * over, it can't load next lli. So in this case, there has to be an
"(remain 0x%08x)\n", * assumption that only one lli is supported. Thus, we can't have
__func__, bd.remainder); * scattered addresses.
cctl = pl08x_cctl_bits(cctl, 1, 1, 1); */
pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); if (!bd.remainder) {
total_bytes++; u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
PL080_CONFIG_FLOW_CONTROL_SHIFT;
if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
(fc <= PL080_FLOW_SRC2DST_SRC))) {
dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
__func__);
return 0;
} }
if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
(bd.srcbus.addr % bd.srcbus.buswidth)) {
dev_err(&pl08x->adev->dev,
"%s src & dst address must be aligned to src"
" & dst width if peripheral is flow controller",
__func__);
return 0;
}
cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
bd.dstbus.buswidth, 0);
pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
}
/*
* Send byte by byte for following cases
* - Less than a bus width available
* - until master bus is aligned
*/
if (bd.remainder < mbus->buswidth)
early_bytes = bd.remainder;
else if ((mbus->addr) % (mbus->buswidth)) {
early_bytes = mbus->buswidth - (mbus->addr) % (mbus->buswidth);
if ((bd.remainder - early_bytes) < mbus->buswidth)
early_bytes = bd.remainder;
}
if (early_bytes) {
dev_vdbg(&pl08x->adev->dev, "%s byte width LLIs "
"(remain 0x%08x)\n", __func__, bd.remainder);
prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
&total_bytes);
}
if (bd.remainder) {
/* /*
* Master now aligned * Master now aligned
* - if slave is not then we must set its width down * - if slave is not then we must set its width down
@ -680,138 +689,55 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
sbus->buswidth = 1; sbus->buswidth = 1;
} }
/* Bytes transferred = tsize * src width, not MIN(buswidths) */
max_bytes_per_lli = bd.srcbus.buswidth *
PL080_CONTROL_TRANSFER_SIZE_MASK;
/* /*
* Make largest possible LLIs until less than one bus * Make largest possible LLIs until less than one bus
* width left * width left
*/ */
while (bd.remainder > (mbus->buswidth - 1)) { while (bd.remainder > (mbus->buswidth - 1)) {
size_t lli_len, target_len, tsize, odd_bytes; size_t lli_len, tsize, width;
/* /*
* If enough left try to send max possible, * If enough left try to send max possible,
* otherwise try to send the remainder * otherwise try to send the remainder
*/ */
target_len = min(bd.remainder, max_bytes_per_lli); lli_len = min(bd.remainder, max_bytes_per_lli);
/* /*
* Set bus lengths for incrementing buses to the * Check against maximum bus alignment: Calculate actual
* number of bytes which fill to next memory boundary, * transfer size in relation to bus width and get a
* limiting on the target length calculated above. * maximum remainder of the highest bus width - 1
*/ */
if (cctl & PL080_CONTROL_SRC_INCR) width = max(mbus->buswidth, sbus->buswidth);
bd.srcbus.fill_bytes = lli_len = (lli_len / width) * width;
pl08x_pre_boundary(bd.srcbus.addr, tsize = lli_len / bd.srcbus.buswidth;
target_len);
else
bd.srcbus.fill_bytes = target_len;
if (cctl & PL080_CONTROL_DST_INCR)
bd.dstbus.fill_bytes =
pl08x_pre_boundary(bd.dstbus.addr,
target_len);
else
bd.dstbus.fill_bytes = target_len;
/* Find the nearest */
lli_len = min(bd.srcbus.fill_bytes,
bd.dstbus.fill_bytes);
BUG_ON(lli_len > bd.remainder);
if (lli_len <= 0) {
dev_err(&pl08x->adev->dev,
"%s lli_len is %zu, <= 0\n",
__func__, lli_len);
return 0;
}
if (lli_len == target_len) {
/*
* Can send what we wanted.
* Maintain alignment
*/
lli_len = (lli_len/mbus->buswidth) *
mbus->buswidth;
odd_bytes = 0;
} else {
/*
* So now we know how many bytes to transfer
* to get to the nearest boundary. The next
* LLI will past the boundary. However, we
* may be working to a boundary on the slave
* bus. We need to ensure the master stays
* aligned, and that we are working in
* multiples of the bus widths.
*/
odd_bytes = lli_len % mbus->buswidth;
lli_len -= odd_bytes;
}
if (lli_len) {
/*
* Check against minimum bus alignment:
* Calculate actual transfer size in relation
* to bus width an get a maximum remainder of
* the smallest bus width - 1
*/
/* FIXME: use round_down()? */
tsize = lli_len / min(mbus->buswidth,
sbus->buswidth);
lli_len = tsize * min(mbus->buswidth,
sbus->buswidth);
if (target_len != lli_len) {
dev_vdbg(&pl08x->adev->dev,
"%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
__func__, target_len, lli_len, txd->len);
}
cctl = pl08x_cctl_bits(cctl,
bd.srcbus.buswidth,
bd.dstbus.buswidth,
tsize);
dev_vdbg(&pl08x->adev->dev, dev_vdbg(&pl08x->adev->dev,
"%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n", "%s fill lli with single lli chunk of "
"size 0x%08zx (remainder 0x%08zx)\n",
__func__, lli_len, bd.remainder); __func__, lli_len, bd.remainder);
pl08x_fill_lli_for_desc(&bd, num_llis++,
lli_len, cctl); cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
bd.dstbus.buswidth, tsize);
pl08x_fill_lli_for_desc(&bd, num_llis++, lli_len, cctl);
total_bytes += lli_len; total_bytes += lli_len;
} }
if (odd_bytes) {
/*
* Creep past the boundary, maintaining
* master alignment
*/
int j;
for (j = 0; (j < mbus->buswidth)
&& (bd.remainder); j++) {
cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
dev_vdbg(&pl08x->adev->dev,
"%s align with boundary, single byte (remain 0x%08zx)\n",
__func__, bd.remainder);
pl08x_fill_lli_for_desc(&bd,
num_llis++, 1, cctl);
total_bytes++;
}
}
}
/* /*
* Send any odd bytes * Send any odd bytes
*/ */
while (bd.remainder) { if (bd.remainder) {
cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
dev_vdbg(&pl08x->adev->dev, dev_vdbg(&pl08x->adev->dev,
"%s align with boundary, single odd byte (remain %zu)\n", "%s align with boundary, send odd bytes (remain %zu)\n",
__func__, bd.remainder); __func__, bd.remainder);
pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); prep_byte_width_lli(&bd, &cctl, bd.remainder,
total_bytes++; num_llis++, &total_bytes);
} }
} }
if (total_bytes != txd->len) { if (total_bytes != txd->len) {
dev_err(&pl08x->adev->dev, dev_err(&pl08x->adev->dev,
"%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
@ -917,9 +843,7 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
* need, but for slaves the physical signals may be muxed! * need, but for slaves the physical signals may be muxed!
* Can the platform allow us to use this channel? * Can the platform allow us to use this channel?
*/ */
if (plchan->slave && if (plchan->slave && pl08x->pd->get_signal) {
ch->signal < 0 &&
pl08x->pd->get_signal) {
ret = pl08x->pd->get_signal(plchan); ret = pl08x->pd->get_signal(plchan);
if (ret < 0) { if (ret < 0) {
dev_dbg(&pl08x->adev->dev, dev_dbg(&pl08x->adev->dev,
@ -1008,10 +932,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
* If slaves are relying on interrupts to signal completion this function * If slaves are relying on interrupts to signal completion this function
* must not be called with interrupts disabled. * must not be called with interrupts disabled.
*/ */
static enum dma_status static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
pl08x_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate)
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{ {
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
dma_cookie_t last_used; dma_cookie_t last_used;
@ -1253,7 +1175,9 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
num_llis = pl08x_fill_llis_for_desc(pl08x, txd); num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
if (!num_llis) { if (!num_llis) {
kfree(txd); spin_lock_irqsave(&plchan->lock, flags);
pl08x_free_txd(pl08x, txd);
spin_unlock_irqrestore(&plchan->lock, flags);
return -EINVAL; return -EINVAL;
} }
@ -1301,7 +1225,7 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
unsigned long flags) unsigned long flags)
{ {
struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
if (txd) { if (txd) {
dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
@ -1367,7 +1291,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd; struct pl08x_txd *txd;
int ret; int ret, tmp;
/* /*
* Current implementation ASSUMES only one sg * Current implementation ASSUMES only one sg
@ -1401,12 +1325,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
txd->len = sgl->length; txd->len = sgl->length;
if (direction == DMA_TO_DEVICE) { if (direction == DMA_TO_DEVICE) {
txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
txd->cctl = plchan->dst_cctl; txd->cctl = plchan->dst_cctl;
txd->src_addr = sgl->dma_address; txd->src_addr = sgl->dma_address;
txd->dst_addr = plchan->dst_addr; txd->dst_addr = plchan->dst_addr;
} else if (direction == DMA_FROM_DEVICE) { } else if (direction == DMA_FROM_DEVICE) {
txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
txd->cctl = plchan->src_cctl; txd->cctl = plchan->src_cctl;
txd->src_addr = plchan->src_addr; txd->src_addr = plchan->src_addr;
txd->dst_addr = sgl->dma_address; txd->dst_addr = sgl->dma_address;
@ -1416,6 +1338,15 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
return NULL; return NULL;
} }
if (plchan->cd->device_fc)
tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
PL080_FLOW_PER2MEM_PER;
else
tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
PL080_FLOW_PER2MEM;
txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
ret = pl08x_prep_channel_resources(plchan, txd); ret = pl08x_prep_channel_resources(plchan, txd);
if (ret) if (ret)
return NULL; return NULL;
@ -1507,13 +1438,7 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
*/ */
static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
{ {
u32 val; writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
val = readl(pl08x->base + PL080_CONFIG);
val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
/* We implicitly clear bit 1 and that means little-endian mode */
val |= PL080_CONFIG_ENABLE;
writel(val, pl08x->base + PL080_CONFIG);
} }
static void pl08x_unmap_buffers(struct pl08x_txd *txd) static void pl08x_unmap_buffers(struct pl08x_txd *txd)
@ -1630,38 +1555,40 @@ static void pl08x_tasklet(unsigned long data)
static irqreturn_t pl08x_irq(int irq, void *dev) static irqreturn_t pl08x_irq(int irq, void *dev)
{ {
struct pl08x_driver_data *pl08x = dev; struct pl08x_driver_data *pl08x = dev;
u32 mask = 0; u32 mask = 0, err, tc, i;
u32 val;
int i;
val = readl(pl08x->base + PL080_ERR_STATUS); /* check & clear - ERR & TC interrupts */
if (val) { err = readl(pl08x->base + PL080_ERR_STATUS);
/* An error interrupt (on one or more channels) */ if (err) {
dev_err(&pl08x->adev->dev, dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
"%s error interrupt, register value 0x%08x\n", __func__, err);
__func__, val); writel(err, pl08x->base + PL080_ERR_CLEAR);
/*
* Simply clear ALL PL08X error interrupts,
* regardless of channel and cause
* FIXME: should be 0x00000003 on PL081 really.
*/
writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
} }
val = readl(pl08x->base + PL080_INT_STATUS); tc = readl(pl08x->base + PL080_INT_STATUS);
if (tc)
writel(tc, pl08x->base + PL080_TC_CLEAR);
if (!err && !tc)
return IRQ_NONE;
for (i = 0; i < pl08x->vd->channels; i++) { for (i = 0; i < pl08x->vd->channels; i++) {
if ((1 << i) & val) { if (((1 << i) & err) || ((1 << i) & tc)) {
/* Locate physical channel */ /* Locate physical channel */
struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
struct pl08x_dma_chan *plchan = phychan->serving; struct pl08x_dma_chan *plchan = phychan->serving;
if (!plchan) {
dev_err(&pl08x->adev->dev,
"%s Error TC interrupt on unused channel: 0x%08x\n",
__func__, i);
continue;
}
/* Schedule tasklet on this channel */ /* Schedule tasklet on this channel */
tasklet_schedule(&plchan->tasklet); tasklet_schedule(&plchan->tasklet);
mask |= (1 << i); mask |= (1 << i);
} }
} }
/* Clear only the terminal interrupts on channels we processed */
writel(mask, pl08x->base + PL080_TC_CLEAR);
return mask ? IRQ_HANDLED : IRQ_NONE; return mask ? IRQ_HANDLED : IRQ_NONE;
} }
@ -1685,9 +1612,7 @@ static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
* Make a local wrapper to hold required data * Make a local wrapper to hold required data
*/ */
static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
struct dma_device *dmadev, struct dma_device *dmadev, unsigned int channels, bool slave)
unsigned int channels,
bool slave)
{ {
struct pl08x_dma_chan *chan; struct pl08x_dma_chan *chan;
int i; int i;
@ -1700,7 +1625,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
* to cope with that situation. * to cope with that situation.
*/ */
for (i = 0; i < channels; i++) { for (i = 0; i < channels; i++) {
chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan) { if (!chan) {
dev_err(&pl08x->adev->dev, dev_err(&pl08x->adev->dev,
"%s no memory for channel\n", __func__); "%s no memory for channel\n", __func__);
@ -1728,7 +1653,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
kfree(chan); kfree(chan);
continue; continue;
} }
dev_info(&pl08x->adev->dev, dev_dbg(&pl08x->adev->dev,
"initialize virtual channel \"%s\"\n", "initialize virtual channel \"%s\"\n",
chan->name); chan->name);
@ -1837,8 +1762,8 @@ static const struct file_operations pl08x_debugfs_operations = {
static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
{ {
/* Expose a simple debugfs interface to view all clocks */ /* Expose a simple debugfs interface to view all clocks */
(void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, (void) debugfs_create_file(dev_name(&pl08x->adev->dev),
NULL, pl08x, S_IFREG | S_IRUGO, NULL, pl08x,
&pl08x_debugfs_operations); &pl08x_debugfs_operations);
} }
@ -1860,12 +1785,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
return ret; return ret;
/* Create the driver state holder */ /* Create the driver state holder */
pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
if (!pl08x) { if (!pl08x) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_no_pl08x; goto out_no_pl08x;
} }
pm_runtime_set_active(&adev->dev);
pm_runtime_enable(&adev->dev);
/* Initialize memcpy engine */ /* Initialize memcpy engine */
dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
pl08x->memcpy.dev = &adev->dev; pl08x->memcpy.dev = &adev->dev;
@ -1939,7 +1867,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
} }
/* Initialize physical channels */ /* Initialize physical channels */
pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)),
GFP_KERNEL); GFP_KERNEL);
if (!pl08x->phy_chans) { if (!pl08x->phy_chans) {
dev_err(&adev->dev, "%s failed to allocate " dev_err(&adev->dev, "%s failed to allocate "
@ -1956,9 +1884,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&ch->lock); spin_lock_init(&ch->lock);
ch->serving = NULL; ch->serving = NULL;
ch->signal = -1; ch->signal = -1;
dev_info(&adev->dev, dev_dbg(&adev->dev, "physical channel %d is %s\n",
"physical channel %d is %s\n", i, i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
} }
/* Register as many memcpy channels as there are physical channels */ /* Register as many memcpy channels as there are physical channels */
@ -1974,8 +1901,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
/* Register slave channels */ /* Register slave channels */
ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
pl08x->pd->num_slave_channels, pl08x->pd->num_slave_channels, true);
true);
if (ret <= 0) { if (ret <= 0) {
dev_warn(&pl08x->adev->dev, dev_warn(&pl08x->adev->dev,
"%s failed to enumerate slave channels - %d\n", "%s failed to enumerate slave channels - %d\n",
@ -2005,6 +1931,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
amba_part(adev), amba_rev(adev), amba_part(adev), amba_rev(adev),
(unsigned long long)adev->res.start, adev->irq[0]); (unsigned long long)adev->res.start, adev->irq[0]);
pm_runtime_put(&adev->dev);
return 0; return 0;
out_no_slave_reg: out_no_slave_reg:
@ -2023,6 +1951,9 @@ out_no_ioremap:
dma_pool_destroy(pl08x->pool); dma_pool_destroy(pl08x->pool);
out_no_lli_pool: out_no_lli_pool:
out_no_platdata: out_no_platdata:
pm_runtime_put(&adev->dev);
pm_runtime_disable(&adev->dev);
kfree(pl08x); kfree(pl08x);
out_no_pl08x: out_no_pl08x:
amba_release_regions(adev); amba_release_regions(adev);

View file

@ -107,10 +107,11 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
{ {
struct at_desc *desc, *_desc; struct at_desc *desc, *_desc;
struct at_desc *ret = NULL; struct at_desc *ret = NULL;
unsigned long flags;
unsigned int i = 0; unsigned int i = 0;
LIST_HEAD(tmp_list); LIST_HEAD(tmp_list);
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
i++; i++;
if (async_tx_test_ack(&desc->txd)) { if (async_tx_test_ack(&desc->txd)) {
@ -121,7 +122,7 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
dev_dbg(chan2dev(&atchan->chan_common), dev_dbg(chan2dev(&atchan->chan_common),
"desc %p not ACKed\n", desc); "desc %p not ACKed\n", desc);
} }
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
dev_vdbg(chan2dev(&atchan->chan_common), dev_vdbg(chan2dev(&atchan->chan_common),
"scanned %u descriptors on freelist\n", i); "scanned %u descriptors on freelist\n", i);
@ -129,9 +130,9 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
if (!ret) { if (!ret) {
ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
if (ret) { if (ret) {
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated++; atchan->descs_allocated++;
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} else { } else {
dev_err(chan2dev(&atchan->chan_common), dev_err(chan2dev(&atchan->chan_common),
"not enough descriptors available\n"); "not enough descriptors available\n");
@ -150,8 +151,9 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
{ {
if (desc) { if (desc) {
struct at_desc *child; struct at_desc *child;
unsigned long flags;
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry(child, &desc->tx_list, desc_node) list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&atchan->chan_common), dev_vdbg(chan2dev(&atchan->chan_common),
"moving child desc %p to freelist\n", "moving child desc %p to freelist\n",
@ -160,7 +162,7 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
dev_vdbg(chan2dev(&atchan->chan_common), dev_vdbg(chan2dev(&atchan->chan_common),
"moving desc %p to freelist\n", desc); "moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &atchan->free_list); list_add(&desc->desc_node, &atchan->free_list);
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} }
} }
@ -299,7 +301,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* for cyclic transfers, /* for cyclic transfers,
* no need to replay callback function while stopping */ * no need to replay callback function while stopping */
if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) { if (!atc_chan_is_cyclic(atchan)) {
dma_async_tx_callback callback = txd->callback; dma_async_tx_callback callback = txd->callback;
void *param = txd->callback_param; void *param = txd->callback_param;
@ -471,16 +473,17 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
static void atc_tasklet(unsigned long data) static void atc_tasklet(unsigned long data)
{ {
struct at_dma_chan *atchan = (struct at_dma_chan *)data; struct at_dma_chan *atchan = (struct at_dma_chan *)data;
unsigned long flags;
spin_lock(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
atc_handle_error(atchan); atc_handle_error(atchan);
else if (test_bit(ATC_IS_CYCLIC, &atchan->status)) else if (atc_chan_is_cyclic(atchan))
atc_handle_cyclic(atchan); atc_handle_cyclic(atchan);
else else
atc_advance_work(atchan); atc_advance_work(atchan);
spin_unlock(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} }
static irqreturn_t at_dma_interrupt(int irq, void *dev_id) static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
@ -539,8 +542,9 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
struct at_desc *desc = txd_to_at_desc(tx); struct at_desc *desc = txd_to_at_desc(tx);
struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
dma_cookie_t cookie; dma_cookie_t cookie;
unsigned long flags;
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
cookie = atc_assign_cookie(atchan, desc); cookie = atc_assign_cookie(atchan, desc);
if (list_empty(&atchan->active_list)) { if (list_empty(&atchan->active_list)) {
@ -554,7 +558,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
list_add_tail(&desc->desc_node, &atchan->queue); list_add_tail(&desc->desc_node, &atchan->queue);
} }
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
return cookie; return cookie;
} }
@ -927,28 +931,29 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device); struct at_dma *atdma = to_at_dma(chan->device);
int chan_id = atchan->chan_common.chan_id; int chan_id = atchan->chan_common.chan_id;
unsigned long flags;
LIST_HEAD(list); LIST_HEAD(list);
dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
if (cmd == DMA_PAUSE) { if (cmd == DMA_PAUSE) {
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
set_bit(ATC_IS_PAUSED, &atchan->status); set_bit(ATC_IS_PAUSED, &atchan->status);
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} else if (cmd == DMA_RESUME) { } else if (cmd == DMA_RESUME) {
if (!test_bit(ATC_IS_PAUSED, &atchan->status)) if (!atc_chan_is_paused(atchan))
return 0; return 0;
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
clear_bit(ATC_IS_PAUSED, &atchan->status); clear_bit(ATC_IS_PAUSED, &atchan->status);
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} else if (cmd == DMA_TERMINATE_ALL) { } else if (cmd == DMA_TERMINATE_ALL) {
struct at_desc *desc, *_desc; struct at_desc *desc, *_desc;
/* /*
@ -957,7 +962,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* channel. We still have to poll the channel enable bit due * channel. We still have to poll the channel enable bit due
* to AHB/HSB limitations. * to AHB/HSB limitations.
*/ */
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
/* disabling channel: must also remove suspend state */ /* disabling channel: must also remove suspend state */
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
@ -978,7 +983,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
/* if channel dedicated to cyclic operations, free it */ /* if channel dedicated to cyclic operations, free it */
clear_bit(ATC_IS_CYCLIC, &atchan->status); clear_bit(ATC_IS_CYCLIC, &atchan->status);
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} else { } else {
return -ENXIO; return -ENXIO;
} }
@ -1004,9 +1009,10 @@ atc_tx_status(struct dma_chan *chan,
struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_chan *atchan = to_at_dma_chan(chan);
dma_cookie_t last_used; dma_cookie_t last_used;
dma_cookie_t last_complete; dma_cookie_t last_complete;
unsigned long flags;
enum dma_status ret; enum dma_status ret;
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
last_complete = atchan->completed_cookie; last_complete = atchan->completed_cookie;
last_used = chan->cookie; last_used = chan->cookie;
@ -1021,7 +1027,7 @@ atc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used); ret = dma_async_is_complete(cookie, last_complete, last_used);
} }
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
if (ret != DMA_SUCCESS) if (ret != DMA_SUCCESS)
dma_set_tx_state(txstate, last_complete, last_used, dma_set_tx_state(txstate, last_complete, last_used,
@ -1029,7 +1035,7 @@ atc_tx_status(struct dma_chan *chan,
else else
dma_set_tx_state(txstate, last_complete, last_used, 0); dma_set_tx_state(txstate, last_complete, last_used, 0);
if (test_bit(ATC_IS_PAUSED, &atchan->status)) if (atc_chan_is_paused(atchan))
ret = DMA_PAUSED; ret = DMA_PAUSED;
dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
@ -1046,18 +1052,19 @@ atc_tx_status(struct dma_chan *chan,
static void atc_issue_pending(struct dma_chan *chan) static void atc_issue_pending(struct dma_chan *chan)
{ {
struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_chan *atchan = to_at_dma_chan(chan);
unsigned long flags;
dev_vdbg(chan2dev(chan), "issue_pending\n"); dev_vdbg(chan2dev(chan), "issue_pending\n");
/* Not needed for cyclic transfers */ /* Not needed for cyclic transfers */
if (test_bit(ATC_IS_CYCLIC, &atchan->status)) if (atc_chan_is_cyclic(atchan))
return; return;
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
if (!atc_chan_is_enabled(atchan)) { if (!atc_chan_is_enabled(atchan)) {
atc_advance_work(atchan); atc_advance_work(atchan);
} }
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} }
/** /**
@ -1073,6 +1080,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
struct at_dma *atdma = to_at_dma(chan->device); struct at_dma *atdma = to_at_dma(chan->device);
struct at_desc *desc; struct at_desc *desc;
struct at_dma_slave *atslave; struct at_dma_slave *atslave;
unsigned long flags;
int i; int i;
u32 cfg; u32 cfg;
LIST_HEAD(tmp_list); LIST_HEAD(tmp_list);
@ -1116,11 +1124,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
list_add_tail(&desc->desc_node, &tmp_list); list_add_tail(&desc->desc_node, &tmp_list);
} }
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated = i; atchan->descs_allocated = i;
list_splice(&tmp_list, &atchan->free_list); list_splice(&tmp_list, &atchan->free_list);
atchan->completed_cookie = chan->cookie = 1; atchan->completed_cookie = chan->cookie = 1;
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
/* channel parameters */ /* channel parameters */
channel_writel(atchan, CFG, cfg); channel_writel(atchan, CFG, cfg);
@ -1293,15 +1301,13 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
/* controller can do slave DMA: can trigger cyclic transfers */
if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
atdma->dma_common.device_control = atc_control; atdma->dma_common.device_control = atc_control;
}
dma_writel(atdma, EN, AT_DMA_ENABLE); dma_writel(atdma, EN, AT_DMA_ENABLE);
@ -1377,27 +1383,112 @@ static void at_dma_shutdown(struct platform_device *pdev)
clk_disable(atdma->clk); clk_disable(atdma->clk);
} }
static int at_dma_prepare(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev);
struct dma_chan *chan, *_chan;
list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
device_node) {
struct at_dma_chan *atchan = to_at_dma_chan(chan);
/* wait for transaction completion (except in cyclic case) */
if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
return -EAGAIN;
}
return 0;
}
static void atc_suspend_cyclic(struct at_dma_chan *atchan)
{
struct dma_chan *chan = &atchan->chan_common;
/* Channel should be paused by user
* do it anyway even if it is not done already */
if (!atc_chan_is_paused(atchan)) {
dev_warn(chan2dev(chan),
"cyclic channel not paused, should be done by channel user\n");
atc_control(chan, DMA_PAUSE, 0);
}
/* now preserve additional data for cyclic operations */
/* next descriptor address in the cyclic list */
atchan->save_dscr = channel_readl(atchan, DSCR);
vdbg_dump_regs(atchan);
}
static int at_dma_suspend_noirq(struct device *dev) static int at_dma_suspend_noirq(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev); struct at_dma *atdma = platform_get_drvdata(pdev);
struct dma_chan *chan, *_chan;
at_dma_off(platform_get_drvdata(pdev)); /* preserve data */
list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
device_node) {
struct at_dma_chan *atchan = to_at_dma_chan(chan);
if (atc_chan_is_cyclic(atchan))
atc_suspend_cyclic(atchan);
atchan->save_cfg = channel_readl(atchan, CFG);
}
atdma->save_imr = dma_readl(atdma, EBCIMR);
/* disable DMA controller */
at_dma_off(atdma);
clk_disable(atdma->clk); clk_disable(atdma->clk);
return 0; return 0;
} }
static void atc_resume_cyclic(struct at_dma_chan *atchan)
{
struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
/* restore channel status for cyclic descriptors list:
* next descriptor in the cyclic list at the time of suspend */
channel_writel(atchan, SADDR, 0);
channel_writel(atchan, DADDR, 0);
channel_writel(atchan, CTRLA, 0);
channel_writel(atchan, CTRLB, 0);
channel_writel(atchan, DSCR, atchan->save_dscr);
dma_writel(atdma, CHER, atchan->mask);
/* channel pause status should be removed by channel user
* We cannot take the initiative to do it here */
vdbg_dump_regs(atchan);
}
static int at_dma_resume_noirq(struct device *dev) static int at_dma_resume_noirq(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev); struct at_dma *atdma = platform_get_drvdata(pdev);
struct dma_chan *chan, *_chan;
/* bring back DMA controller */
clk_enable(atdma->clk); clk_enable(atdma->clk);
dma_writel(atdma, EN, AT_DMA_ENABLE); dma_writel(atdma, EN, AT_DMA_ENABLE);
/* clear any pending interrupt */
while (dma_readl(atdma, EBCISR))
cpu_relax();
/* restore saved data */
dma_writel(atdma, EBCIER, atdma->save_imr);
list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
device_node) {
struct at_dma_chan *atchan = to_at_dma_chan(chan);
channel_writel(atchan, CFG, atchan->save_cfg);
if (atc_chan_is_cyclic(atchan))
atc_resume_cyclic(atchan);
}
return 0; return 0;
} }
static const struct dev_pm_ops at_dma_dev_pm_ops = { static const struct dev_pm_ops at_dma_dev_pm_ops = {
.prepare = at_dma_prepare,
.suspend_noirq = at_dma_suspend_noirq, .suspend_noirq = at_dma_suspend_noirq,
.resume_noirq = at_dma_resume_noirq, .resume_noirq = at_dma_resume_noirq,
}; };

View file

@ -204,6 +204,9 @@ enum atc_status {
* @status: transmit status information from irq/prep* functions * @status: transmit status information from irq/prep* functions
* to tasklet (use atomic operations) * to tasklet (use atomic operations)
* @tasklet: bottom half to finish transaction work * @tasklet: bottom half to finish transaction work
* @save_cfg: configuration register that is saved on suspend/resume cycle
* @save_dscr: for cyclic operations, preserve next descriptor address in
* the cyclic list on suspend/resume cycle
* @lock: serializes enqueue/dequeue operations to descriptors lists * @lock: serializes enqueue/dequeue operations to descriptors lists
* @completed_cookie: identifier for the most recently completed operation * @completed_cookie: identifier for the most recently completed operation
* @active_list: list of descriptors dmaengine is being running on * @active_list: list of descriptors dmaengine is being running on
@ -218,6 +221,8 @@ struct at_dma_chan {
u8 mask; u8 mask;
unsigned long status; unsigned long status;
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
u32 save_cfg;
u32 save_dscr;
spinlock_t lock; spinlock_t lock;
@ -248,6 +253,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
* @chan_common: common dmaengine dma_device object members * @chan_common: common dmaengine dma_device object members
* @ch_regs: memory mapped register base * @ch_regs: memory mapped register base
* @clk: dma controller clock * @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
* @all_chan_mask: all channels availlable in a mask * @all_chan_mask: all channels availlable in a mask
* @dma_desc_pool: base of DMA descriptor region (DMA address) * @dma_desc_pool: base of DMA descriptor region (DMA address)
* @chan: channels table to store at_dma_chan structures * @chan: channels table to store at_dma_chan structures
@ -256,6 +262,7 @@ struct at_dma {
struct dma_device dma_common; struct dma_device dma_common;
void __iomem *regs; void __iomem *regs;
struct clk *clk; struct clk *clk;
u32 save_imr;
u8 all_chan_mask; u8 all_chan_mask;
@ -355,6 +362,23 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
return !!(dma_readl(atdma, CHSR) & atchan->mask); return !!(dma_readl(atdma, CHSR) & atchan->mask);
} }
/**
* atc_chan_is_paused - test channel pause/resume status
* @atchan: channel we want to test status
*/
static inline int atc_chan_is_paused(struct at_dma_chan *atchan)
{
return test_bit(ATC_IS_PAUSED, &atchan->status);
}
/**
* atc_chan_is_cyclic - test if given channel has cyclic property set
* @atchan: channel we want to test status
*/
static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan)
{
return test_bit(ATC_IS_CYCLIC, &atchan->status);
}
/** /**
* set_desc_eol - set end-of-link to descriptor so it will end transfer * set_desc_eol - set end-of-link to descriptor so it will end transfer

View file

@ -10,6 +10,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/freezer.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/module.h> #include <linux/module.h>
@ -251,6 +252,7 @@ static int dmatest_func(void *data)
int i; int i;
thread_name = current->comm; thread_name = current->comm;
set_freezable_with_signal();
ret = -ENOMEM; ret = -ENOMEM;
@ -305,7 +307,8 @@ static int dmatest_func(void *data)
dma_addr_t dma_srcs[src_cnt]; dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt]; dma_addr_t dma_dsts[dst_cnt];
struct completion cmp; struct completion cmp;
unsigned long tmo = msecs_to_jiffies(timeout); unsigned long start, tmo, end = 0 /* compiler... */;
bool reload = true;
u8 align = 0; u8 align = 0;
total_tests++; total_tests++;
@ -404,7 +407,17 @@ static int dmatest_func(void *data)
} }
dma_async_issue_pending(chan); dma_async_issue_pending(chan);
tmo = wait_for_completion_timeout(&cmp, tmo); do {
start = jiffies;
if (reload)
end = start + msecs_to_jiffies(timeout);
else if (end <= start)
end = start + 1;
tmo = wait_for_completion_interruptible_timeout(&cmp,
end - start);
reload = try_to_freeze();
} while (tmo == -ERESTARTSYS);
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
if (tmo == 0) { if (tmo == 0) {
@ -477,6 +490,8 @@ err_srcs:
pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
thread_name, total_tests, failed_tests, ret); thread_name, total_tests, failed_tests, ret);
/* terminate all transfers on specified channels */
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
if (iterations > 0) if (iterations > 0)
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
@ -499,6 +514,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
list_del(&thread->node); list_del(&thread->node);
kfree(thread); kfree(thread);
} }
/* terminate all transfers on specified channels */
dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
kfree(dtc); kfree(dtc);
} }

View file

@ -318,6 +318,7 @@ struct sdma_engine {
dma_addr_t context_phys; dma_addr_t context_phys;
struct dma_device dma_device; struct dma_device dma_device;
struct clk *clk; struct clk *clk;
struct mutex channel_0_lock;
struct sdma_script_start_addrs *script_addrs; struct sdma_script_start_addrs *script_addrs;
}; };
@ -415,11 +416,15 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
dma_addr_t buf_phys; dma_addr_t buf_phys;
int ret; int ret;
mutex_lock(&sdma->channel_0_lock);
buf_virt = dma_alloc_coherent(NULL, buf_virt = dma_alloc_coherent(NULL,
size, size,
&buf_phys, GFP_KERNEL); &buf_phys, GFP_KERNEL);
if (!buf_virt) if (!buf_virt) {
return -ENOMEM; ret = -ENOMEM;
goto err_out;
}
bd0->mode.command = C0_SETPM; bd0->mode.command = C0_SETPM;
bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
@ -433,6 +438,9 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
dma_free_coherent(NULL, size, buf_virt, buf_phys); dma_free_coherent(NULL, size, buf_virt, buf_phys);
err_out:
mutex_unlock(&sdma->channel_0_lock);
return ret; return ret;
} }
@ -656,6 +664,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0); dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1); dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
mutex_lock(&sdma->channel_0_lock);
memset(context, 0, sizeof(*context)); memset(context, 0, sizeof(*context));
context->channel_state.pc = load_address; context->channel_state.pc = load_address;
@ -676,6 +686,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
ret = sdma_run_channel(&sdma->channel[0]); ret = sdma_run_channel(&sdma->channel[0]);
mutex_unlock(&sdma->channel_0_lock);
return ret; return ret;
} }
@ -1131,18 +1143,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
saddr_arr[i] = addr_arr[i]; saddr_arr[i] = addr_arr[i];
} }
static int __init sdma_get_firmware(struct sdma_engine *sdma, static void sdma_load_firmware(const struct firmware *fw, void *context)
const char *fw_name)
{ {
const struct firmware *fw; struct sdma_engine *sdma = context;
const struct sdma_firmware_header *header; const struct sdma_firmware_header *header;
int ret;
const struct sdma_script_start_addrs *addr; const struct sdma_script_start_addrs *addr;
unsigned short *ram_code; unsigned short *ram_code;
ret = request_firmware(&fw, fw_name, sdma->dev); if (!fw) {
if (ret) dev_err(sdma->dev, "firmware not found\n");
return ret; return;
}
if (fw->size < sizeof(*header)) if (fw->size < sizeof(*header))
goto err_firmware; goto err_firmware;
@ -1172,6 +1183,16 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma,
err_firmware: err_firmware:
release_firmware(fw); release_firmware(fw);
}
static int __init sdma_get_firmware(struct sdma_engine *sdma,
const char *fw_name)
{
int ret;
ret = request_firmware_nowait(THIS_MODULE,
FW_ACTION_HOTPLUG, fw_name, sdma->dev,
GFP_KERNEL, sdma, sdma_load_firmware);
return ret; return ret;
} }
@ -1269,11 +1290,14 @@ static int __init sdma_probe(struct platform_device *pdev)
struct sdma_platform_data *pdata = pdev->dev.platform_data; struct sdma_platform_data *pdata = pdev->dev.platform_data;
int i; int i;
struct sdma_engine *sdma; struct sdma_engine *sdma;
s32 *saddr_arr;
sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
if (!sdma) if (!sdma)
return -ENOMEM; return -ENOMEM;
mutex_init(&sdma->channel_0_lock);
sdma->dev = &pdev->dev; sdma->dev = &pdev->dev;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@ -1310,6 +1334,11 @@ static int __init sdma_probe(struct platform_device *pdev)
goto err_alloc; goto err_alloc;
} }
/* initially no scripts available */
saddr_arr = (s32 *)sdma->script_addrs;
for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
saddr_arr[i] = -EINVAL;
if (of_id) if (of_id)
pdev->id_entry = of_id->data; pdev->id_entry = of_id->data;
sdma->devtype = pdev->id_entry->driver_data; sdma->devtype = pdev->id_entry->driver_data;

View file

@ -130,6 +130,23 @@ struct mxs_dma_engine {
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
}; };
static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
/* enable apbh channel clock */
if (dma_is_apbh()) {
if (apbh_is_old())
writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
else
writel(1 << chan_id,
mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
}
}
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{ {
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@ -148,38 +165,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id; int chan_id = mxs_chan->chan.chan_id;
/* clkgate needs to be enabled before writing other registers */
mxs_dma_clkgate(mxs_chan, 1);
/* set cmd_addr up */ /* set cmd_addr up */
writel(mxs_chan->ccw_phys, writel(mxs_chan->ccw_phys,
mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
/* enable apbh channel clock */
if (dma_is_apbh()) {
if (apbh_is_old())
writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
else
writel(1 << chan_id,
mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
}
/* write 1 to SEMA to kick off the channel */ /* write 1 to SEMA to kick off the channel */
writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id)); writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
} }
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
{ {
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
/* disable apbh channel clock */ /* disable apbh channel clock */
if (dma_is_apbh()) { mxs_dma_clkgate(mxs_chan, 0);
if (apbh_is_old())
writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
else
writel(1 << chan_id,
mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
}
mxs_chan->status = DMA_SUCCESS; mxs_chan->status = DMA_SUCCESS;
} }
@ -338,7 +338,10 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
if (ret) if (ret)
goto err_clk; goto err_clk;
/* clkgate needs to be enabled for reset to finish */
mxs_dma_clkgate(mxs_chan, 1);
mxs_dma_reset_chan(mxs_chan); mxs_dma_reset_chan(mxs_chan);
mxs_dma_clkgate(mxs_chan, 0);
dma_async_tx_descriptor_init(&mxs_chan->desc, chan); dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
mxs_chan->desc.tx_submit = mxs_dma_tx_submit; mxs_chan->desc.tx_submit = mxs_dma_tx_submit;

View file

@ -47,6 +47,9 @@ enum {
* @muxval: a number usually used to poke into some mux regiser to * @muxval: a number usually used to poke into some mux regiser to
* mux in the signal to this channel * mux in the signal to this channel
* @cctl_opt: default options for the channel control register * @cctl_opt: default options for the channel control register
* @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
* channels. Fill with 'true' if peripheral should be flow controller. Direction
* will be selected at Runtime.
* @addr: source/target address in physical memory for this DMA channel, * @addr: source/target address in physical memory for this DMA channel,
* can be the address of a FIFO register for burst requests for example. * can be the address of a FIFO register for burst requests for example.
* This can be left undefined if the PrimeCell API is used for configuring * This can be left undefined if the PrimeCell API is used for configuring
@ -65,6 +68,7 @@ struct pl08x_channel_data {
int max_signal; int max_signal;
u32 muxval; u32 muxval;
u32 cctl; u32 cctl;
bool device_fc;
dma_addr_t addr; dma_addr_t addr;
bool circular_buffer; bool circular_buffer;
bool single; bool single;
@ -77,13 +81,11 @@ struct pl08x_channel_data {
* @addr: current address * @addr: current address
* @maxwidth: the maximum width of a transfer on this bus * @maxwidth: the maximum width of a transfer on this bus
* @buswidth: the width of this bus in bytes: 1, 2 or 4 * @buswidth: the width of this bus in bytes: 1, 2 or 4
* @fill_bytes: bytes required to fill to the next bus memory boundary
*/ */
struct pl08x_bus_data { struct pl08x_bus_data {
dma_addr_t addr; dma_addr_t addr;
u8 maxwidth; u8 maxwidth;
u8 buswidth; u8 buswidth;
size_t fill_bytes;
}; };
/** /**
@ -105,8 +107,16 @@ struct pl08x_phy_chan {
/** /**
* struct pl08x_txd - wrapper for struct dma_async_tx_descriptor * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
* @tx: async tx descriptor
* @node: node for txd list for channels
* @src_addr: src address of txd
* @dst_addr: dst address of txd
* @len: transfer len in bytes
* @direction: direction of transfer
* @llis_bus: DMA memory address (physical) start for the LLIs * @llis_bus: DMA memory address (physical) start for the LLIs
* @llis_va: virtual memory address start for the LLIs * @llis_va: virtual memory address start for the LLIs
* @cctl: control reg values for current txd
* @ccfg: config reg values for current txd
*/ */
struct pl08x_txd { struct pl08x_txd {
struct dma_async_tx_descriptor tx; struct dma_async_tx_descriptor tx;

View file

@ -24,8 +24,7 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/dma-direction.h> #include <linux/dma-direction.h>
#include <linux/scatterlist.h>
struct scatterlist;
/** /**
* typedef dma_cookie_t - an opaque DMA cookie * typedef dma_cookie_t - an opaque DMA cookie
@ -519,6 +518,16 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
(unsigned long)config); (unsigned long)config);
} }
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
struct dma_chan *chan, void *buf, size_t len,
enum dma_data_direction dir, unsigned long flags)
{
struct scatterlist sg;
sg_init_one(&sg, buf, len);
return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags);
}
static inline int dmaengine_terminate_all(struct dma_chan *chan) static inline int dmaengine_terminate_all(struct dma_chan *chan)
{ {
return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);