1
0
Fork 0

DMAengine updates for v4.20-rc1

Change this time round are:
  - Support for ColdFire mcf5441x edma controller
  - Support for link list mode in sprd dma
  - More users of managed dmaenginem_async_device_register API
  - Cyclic mode support in owl dma driver
  - DT updates for renesas drivers, dma-jz4780 updates and support for
    JZ4770, JZ4740 and JZ4725B controllers
  - Removal of deprecated dma_slave_config direction in dmaengine drivers,
    few more users will be removed in next cycle and eventually users.
  - Minor updates to idma64, ioat, pxa, ppc drivers
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJb0YtfAAoJEHwUBw8lI4NHJoQP/3JVOPM9Zi6CNnWsGUSGR1Ok
 DZyGMJLe92t+EtQyhLfApGypscJifIRTn3IYD1cWBmHReiaWvMJjcZDh5w4L/mbB
 LHmUPOuDrw9V2WtN8y9gyX3syjtKZi64OuT5eEbZKCiRAELsiqu0CZs92kXAV7QQ
 vo1zCyN0hGivGRi9jkICVbq0yzixwuQsjgkekvw1VChYXQeB6S6UydeEl/HACraO
 6a92XdAIMXlvnvTTV5DSldILfQAP+nh8Dp8J/U6kNTEPgrYZDBouNb3foJDv0EA2
 7tJ8ryRK5E/ZcqG5enhaATTou/e8fvInazGG1T4rFl0p6UX19Y0kYzP7XD2TrCBJ
 ro0bw+BBeKE66lkT/di8vxDzPxDh2COdbbPuAA8vPXX8XJxctGNNiYXiB8dNyHxD
 d2SGFVjY4ttrNMENNxNH2jnkfjSGiV4VJdvD/FzKNm6edoG+hRGVh8N8R3hORkKb
 9m5Si1zySkZ7jsq/MsUBCfPzpj0vjS7fS4SIsm+WLEm+cmr6m4cAeblhy61lvp9Y
 4i0MgkoX+Jh0uP+3czPAtw+TAPHQ3xmLQBNyimDBbk6APXDTez9O4oraYRjru1Ue
 IDo3GQO98wV01M8jVZDm/wqfI73XB8z9tARaBAiu5uXh9b+erG13be8l5ZKOHyPE
 gd/kPgz70+vKuqtYiC9S
 =/pTg
 -----END PGP SIGNATURE-----

Merge tag 'dmaengine-4.20-rc1' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine updates from Vinod Koul:

 - Support for ColdFire mcf5441x edma controller

 - Support for link list mode in sprd dma

 - More users of managed dmaenginem_async_device_register API

 - Cyclic mode support in owl dma driver

 - DT updates for renesas drivers, dma-jz4780 updates and support for
   JZ4770, JZ4740 and JZ4725B controllers

 - Removal of deprecated dma_slave_config direction in dmaengine
   drivers, few more users will be removed in next cycle and eventually
   removed.

 - Minor updates to idma64, ioat, pxa, ppc drivers

* tag 'dmaengine-4.20-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (66 commits)
  dmaengine: ppc4xx: fix off-by-one build failure
  dmaengine: owl: Fix warnings generated during build
  dmaengine: fsl-edma: remove dma_slave_config direction usage
  dmaengine: rcar-dmac: set scatter/gather max segment size
  dmaengine: mmp_tdma: remove dma_slave_config direction usage
  dmaengine: ep93xx_dma: remove dma_slave_config direction usage
  dmaengine: k3dma: remove dma_slave_config direction usage
  dmaengine: k3dma: dont use direction for memcpy
  dmaengine: imx-dma: remove dma_slave_config direction usage
  dmaengine: idma: remove dma_slave_config direction usage
  dmaengine: hsu: remove dma_slave_config direction usage
  dmaengine: dw: remove dma_slave_config direction usage
  dmaengine: jz4740: remove dma_slave_config direction usage
  dmaengine: coh901318: remove dma_slave_config direction usage
  dmaengine: bcm2835: remove dma_slave_config direction usage
  dmaengine: at_hdmac: remove dma_slave_config direction usage
  dmaengine: owl: Add Slave and Cyclic mode support for Actions Semi Owl S900 SoC
  dmaengine: ioat: fix prototype of ioat_enumerate_channels
  dmaengine: stm32-dma: check whether length is aligned on FIFO threshold
  dt-bindings: dmaengine: usb-dmac: Add binding for r8a7744
  ...
This commit is contained in:
Linus Torvalds 2018-10-25 06:43:18 -07:00
commit a41efc2a0f
53 changed files with 2109 additions and 968 deletions

View file

@ -2,8 +2,13 @@
Required properties:
- compatible: Should be "ingenic,jz4780-dma"
- reg: Should contain the DMA controller registers location and length.
- compatible: Should be one of:
* ingenic,jz4740-dma
* ingenic,jz4725b-dma
* ingenic,jz4770-dma
* ingenic,jz4780-dma
- reg: Should contain the DMA channel registers location and length, followed
by the DMA controller registers location and length.
- interrupts: Should contain the interrupt specifier of the DMA controller.
- clocks: Should contain a clock specifier for the JZ4780 PDMA clock.
- #dma-cells: Must be <2>. Number of integer cells in the dmas property of
@ -19,9 +24,10 @@ Optional properties:
Example:
dma: dma@13420000 {
dma: dma-controller@13420000 {
compatible = "ingenic,jz4780-dma";
reg = <0x13420000 0x10000>;
reg = <0x13420000 0x400
0x13421000 0x40>;
interrupt-parent = <&intc>;
interrupts = <10>;

View file

@ -17,6 +17,7 @@ Required Properties:
- compatible: "renesas,dmac-<soctype>", "renesas,rcar-dmac" as fallback.
Examples with soctypes are:
- "renesas,dmac-r8a7743" (RZ/G1M)
- "renesas,dmac-r8a7744" (RZ/G1N)
- "renesas,dmac-r8a7745" (RZ/G1E)
- "renesas,dmac-r8a77470" (RZ/G1C)
- "renesas,dmac-r8a7790" (R-Car H2)

View file

@ -4,6 +4,7 @@ Required Properties:
-compatible: "renesas,<soctype>-usb-dmac", "renesas,usb-dmac" as fallback.
Examples with soctypes are:
- "renesas,r8a7743-usb-dmac" (RZ/G1M)
- "renesas,r8a7744-usb-dmac" (RZ/G1N)
- "renesas,r8a7745-usb-dmac" (RZ/G1E)
- "renesas,r8a7790-usb-dmac" (R-Car H2)
- "renesas,r8a7791-usb-dmac" (R-Car M2-W)

View file

@ -154,6 +154,21 @@
clock-names = "baud", "module";
};
dmac: dma-controller@13020000 {
compatible = "ingenic,jz4740-dma";
reg = <0x13020000 0xbc
0x13020300 0x14>;
#dma-cells = <2>;
interrupt-parent = <&intc>;
interrupts = <29>;
clocks = <&cgu JZ4740_CLK_DMA>;
/* Disable dmac until we have something that uses it */
status = "disabled";
};
uhc: uhc@13030000 {
compatible = "ingenic,jz4740-ohci", "generic-ohci";
reg = <0x13030000 0x1000>;

View file

@ -196,6 +196,36 @@
status = "disabled";
};
dmac0: dma-controller@13420000 {
compatible = "ingenic,jz4770-dma";
reg = <0x13420000 0xC0
0x13420300 0x20>;
#dma-cells = <1>;
clocks = <&cgu JZ4770_CLK_DMA>;
interrupt-parent = <&intc>;
interrupts = <24>;
/* Disable dmac0 until we have something that uses it */
status = "disabled";
};
dmac1: dma-controller@13420100 {
compatible = "ingenic,jz4770-dma";
reg = <0x13420100 0xC0
0x13420400 0x20>;
#dma-cells = <1>;
clocks = <&cgu JZ4770_CLK_DMA>;
interrupt-parent = <&intc>;
interrupts = <23>;
/* Disable dmac1 until we have something that uses it */
status = "disabled";
};
uhc: uhc@13430000 {
compatible = "generic-ohci";
reg = <0x13430000 0x1000>;

View file

@ -266,7 +266,8 @@
dma: dma@13420000 {
compatible = "ingenic,jz4780-dma";
reg = <0x13420000 0x10000>;
reg = <0x13420000 0x400
0x13421000 0x40>;
#dma-cells = <2>;
interrupt-parent = <&intc>;

View file

@ -143,7 +143,7 @@ config DMA_JZ4740
config DMA_JZ4780
tristate "JZ4780 DMA support"
depends on MACH_JZ4780 || COMPILE_TEST
depends on MIPS || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@ -321,6 +321,17 @@ config LPC18XX_DMAMUX
Enable support for DMA on NXP LPC18xx/43xx platforms
with PL080 and multiplexed DMA request lines.
config MCF_EDMA
tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
depends on M5441x || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
Support the Freescale ColdFire eDMA engine, 64-channel
implementation that performs complex data transfers with
minimal intervention from a host processor.
This module can be found on Freescale ColdFire mcf5441x SoCs.
config MMP_PDMA
bool "MMP PDMA support"
depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST

View file

@ -31,7 +31,8 @@ obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/
obj-$(CONFIG_DW_DMAC_CORE) += dw/
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
obj-$(CONFIG_FSL_RAID) += fsl_raid.o
obj-$(CONFIG_HSU_DMA) += hsu/
obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o

View file

@ -1320,7 +1320,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
if (unlikely(!is_slave_direction(direction)))
goto err_out;
if (sconfig->direction == DMA_MEM_TO_DEV)
if (direction == DMA_MEM_TO_DEV)
reg_width = convert_buswidth(sconfig->dst_addr_width);
else
reg_width = convert_buswidth(sconfig->src_addr_width);

View file

@ -1600,7 +1600,7 @@ static void at_xdmac_tasklet(unsigned long data)
if (atchan->status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
spin_lock_bh(&atchan->lock);
spin_lock(&atchan->lock);
desc = list_first_entry(&atchan->xfers_list,
struct at_xdmac_desc,
xfer_node);
@ -1610,7 +1610,7 @@ static void at_xdmac_tasklet(unsigned long data)
txd = &desc->tx_dma_desc;
at_xdmac_remove_xfer(atchan, desc);
spin_unlock_bh(&atchan->lock);
spin_unlock(&atchan->lock);
if (!at_xdmac_chan_is_cyclic(atchan)) {
dma_cookie_complete(txd);

View file

@ -778,14 +778,6 @@ static int bcm2835_dma_slave_config(struct dma_chan *chan,
{
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
if ((cfg->direction == DMA_DEV_TO_MEM &&
cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
(cfg->direction == DMA_MEM_TO_DEV &&
cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
!is_slave_direction(cfg->direction)) {
return -EINVAL;
}
c->cfg = *cfg;
return 0;

View file

@ -1306,6 +1306,7 @@ struct coh901318_chan {
unsigned long nbr_active_done;
unsigned long busy;
struct dma_slave_config config;
u32 addr;
u32 ctrl;
@ -1402,6 +1403,10 @@ static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
return container_of(chan, struct coh901318_chan, chan);
}
static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
struct dma_slave_config *config,
enum dma_transfer_direction direction);
static inline const struct coh901318_params *
cohc_chan_param(struct coh901318_chan *cohc)
{
@ -2360,6 +2365,8 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (lli == NULL)
goto err_dma_alloc;
coh901318_dma_set_runtimeconfig(chan, &cohc->config, direction);
/* initiate allocated lli list */
ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len,
cohc->addr,
@ -2499,7 +2506,8 @@ static const struct burst_table burst_sizes[] = {
};
static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
struct dma_slave_config *config)
struct dma_slave_config *config,
enum dma_transfer_direction direction)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
dma_addr_t addr;
@ -2509,11 +2517,11 @@ static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
int i = 0;
/* We only support mem to per or per to mem transfers */
if (config->direction == DMA_DEV_TO_MEM) {
if (direction == DMA_DEV_TO_MEM) {
addr = config->src_addr;
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
} else if (config->direction == DMA_MEM_TO_DEV) {
} else if (direction == DMA_MEM_TO_DEV) {
addr = config->dst_addr;
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
@ -2579,6 +2587,16 @@ static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
return 0;
}
static int coh901318_dma_slave_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
memcpy(&cohc->config, config, sizeof(*config));
return 0;
}
static void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
struct coh901318_base *base)
{
@ -2684,7 +2702,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
base->dma_slave.device_tx_status = coh901318_tx_status;
base->dma_slave.device_issue_pending = coh901318_issue_pending;
base->dma_slave.device_config = coh901318_dma_set_runtimeconfig;
base->dma_slave.device_config = coh901318_dma_slave_config;
base->dma_slave.device_pause = coh901318_pause;
base->dma_slave.device_resume = coh901318_resume;
base->dma_slave.device_terminate_all = coh901318_terminate_all;
@ -2707,7 +2725,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
base->dma_memcpy.device_tx_status = coh901318_tx_status;
base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
base->dma_memcpy.device_config = coh901318_dma_set_runtimeconfig;
base->dma_memcpy.device_config = coh901318_dma_slave_config;
base->dma_memcpy.device_pause = coh901318_pause;
base->dma_memcpy.device_resume = coh901318_resume;
base->dma_memcpy.device_terminate_all = coh901318_terminate_all;

View file

@ -113,6 +113,7 @@ struct jz4740_dma_desc {
struct jz4740_dmaengine_chan {
struct virt_dma_chan vchan;
unsigned int id;
struct dma_slave_config config;
dma_addr_t fifo_addr;
unsigned int transfer_shift;
@ -203,8 +204,9 @@ static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst)
return JZ4740_DMA_TRANSFER_SIZE_32BYTE;
}
static int jz4740_dma_slave_config(struct dma_chan *c,
struct dma_slave_config *config)
static int jz4740_dma_slave_config_write(struct dma_chan *c,
struct dma_slave_config *config,
enum dma_transfer_direction direction)
{
struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
@ -214,7 +216,7 @@ static int jz4740_dma_slave_config(struct dma_chan *c,
enum jz4740_dma_flags flags;
uint32_t cmd;
switch (config->direction) {
switch (direction) {
case DMA_MEM_TO_DEV:
flags = JZ4740_DMA_SRC_AUTOINC;
transfer_size = jz4740_dma_maxburst(config->dst_maxburst);
@ -265,6 +267,15 @@ static int jz4740_dma_slave_config(struct dma_chan *c,
return 0;
}
static int jz4740_dma_slave_config(struct dma_chan *c,
struct dma_slave_config *config)
{
struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
memcpy(&chan->config, config, sizeof(*config));
return 0;
}
static int jz4740_dma_terminate_all(struct dma_chan *c)
{
struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
@ -407,6 +418,8 @@ static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg(
desc->direction = direction;
desc->cyclic = false;
jz4740_dma_slave_config_write(c, &chan->config, direction);
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
}
@ -438,6 +451,8 @@ static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic(
desc->direction = direction;
desc->cyclic = true;
jz4740_dma_slave_config_write(c, &chan->config, direction);
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
}

View file

@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@ -23,33 +24,35 @@
#include "dmaengine.h"
#include "virt-dma.h"
#define JZ_DMA_NR_CHANNELS 32
/* Global registers. */
#define JZ_DMA_REG_DMAC 0x1000
#define JZ_DMA_REG_DIRQP 0x1004
#define JZ_DMA_REG_DDR 0x1008
#define JZ_DMA_REG_DDRS 0x100c
#define JZ_DMA_REG_DMACP 0x101c
#define JZ_DMA_REG_DSIRQP 0x1020
#define JZ_DMA_REG_DSIRQM 0x1024
#define JZ_DMA_REG_DCIRQP 0x1028
#define JZ_DMA_REG_DCIRQM 0x102c
#define JZ_DMA_REG_DMAC 0x00
#define JZ_DMA_REG_DIRQP 0x04
#define JZ_DMA_REG_DDR 0x08
#define JZ_DMA_REG_DDRS 0x0c
#define JZ_DMA_REG_DCKE 0x10
#define JZ_DMA_REG_DCKES 0x14
#define JZ_DMA_REG_DCKEC 0x18
#define JZ_DMA_REG_DMACP 0x1c
#define JZ_DMA_REG_DSIRQP 0x20
#define JZ_DMA_REG_DSIRQM 0x24
#define JZ_DMA_REG_DCIRQP 0x28
#define JZ_DMA_REG_DCIRQM 0x2c
/* Per-channel registers. */
#define JZ_DMA_REG_CHAN(n) (n * 0x20)
#define JZ_DMA_REG_DSA(n) (0x00 + JZ_DMA_REG_CHAN(n))
#define JZ_DMA_REG_DTA(n) (0x04 + JZ_DMA_REG_CHAN(n))
#define JZ_DMA_REG_DTC(n) (0x08 + JZ_DMA_REG_CHAN(n))
#define JZ_DMA_REG_DRT(n) (0x0c + JZ_DMA_REG_CHAN(n))
#define JZ_DMA_REG_DCS(n) (0x10 + JZ_DMA_REG_CHAN(n))
#define JZ_DMA_REG_DCM(n) (0x14 + JZ_DMA_REG_CHAN(n))
#define JZ_DMA_REG_DDA(n) (0x18 + JZ_DMA_REG_CHAN(n))
#define JZ_DMA_REG_DSD(n) (0x1c + JZ_DMA_REG_CHAN(n))
#define JZ_DMA_REG_DSA 0x00
#define JZ_DMA_REG_DTA 0x04
#define JZ_DMA_REG_DTC 0x08
#define JZ_DMA_REG_DRT 0x0c
#define JZ_DMA_REG_DCS 0x10
#define JZ_DMA_REG_DCM 0x14
#define JZ_DMA_REG_DDA 0x18
#define JZ_DMA_REG_DSD 0x1c
#define JZ_DMA_DMAC_DMAE BIT(0)
#define JZ_DMA_DMAC_AR BIT(2)
#define JZ_DMA_DMAC_HLT BIT(3)
#define JZ_DMA_DMAC_FAIC BIT(27)
#define JZ_DMA_DMAC_FMSC BIT(31)
#define JZ_DMA_DRT_AUTO 0x8
@ -86,6 +89,14 @@
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
#define JZ4780_DMA_CTRL_OFFSET 0x1000
/* macros for use with jz4780_dma_soc_data.flags */
#define JZ_SOC_DATA_ALLOW_LEGACY_DT BIT(0)
#define JZ_SOC_DATA_PROGRAMMABLE_DMA BIT(1)
#define JZ_SOC_DATA_PER_CHAN_PM BIT(2)
#define JZ_SOC_DATA_NO_DCKES_DCKEC BIT(3)
/**
* struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
* @dcm: value for the DCM (channel command) register
@ -94,17 +105,12 @@
* @dtc: transfer count (number of blocks of the transfer size specified in DCM
* to transfer) in the low 24 bits, offset of the next descriptor from the
* descriptor base address in the upper 8 bits.
* @sd: target/source stride difference (in stride transfer mode).
* @drt: request type
*/
struct jz4780_dma_hwdesc {
uint32_t dcm;
uint32_t dsa;
uint32_t dta;
uint32_t dtc;
uint32_t sd;
uint32_t drt;
uint32_t reserved[2];
};
/* Size of allocations for hardware descriptor blocks. */
@ -135,14 +141,22 @@ struct jz4780_dma_chan {
unsigned int curr_hwdesc;
};
struct jz4780_dma_soc_data {
unsigned int nb_channels;
unsigned int transfer_ord_max;
unsigned long flags;
};
struct jz4780_dma_dev {
struct dma_device dma_device;
void __iomem *base;
void __iomem *chn_base;
void __iomem *ctrl_base;
struct clk *clk;
unsigned int irq;
const struct jz4780_dma_soc_data *soc_data;
uint32_t chan_reserved;
struct jz4780_dma_chan chan[JZ_DMA_NR_CHANNELS];
struct jz4780_dma_chan chan[];
};
struct jz4780_dma_filter_data {
@ -169,16 +183,51 @@ static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
dma_device);
}
static inline uint32_t jz4780_dma_readl(struct jz4780_dma_dev *jzdma,
unsigned int reg)
static inline uint32_t jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
unsigned int chn, unsigned int reg)
{
return readl(jzdma->base + reg);
return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
}
static inline void jz4780_dma_writel(struct jz4780_dma_dev *jzdma,
static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma,
unsigned int chn, unsigned int reg, uint32_t val)
{
writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
}
static inline uint32_t jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
unsigned int reg)
{
return readl(jzdma->ctrl_base + reg);
}
static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma,
unsigned int reg, uint32_t val)
{
writel(val, jzdma->base + reg);
writel(val, jzdma->ctrl_base + reg);
}
static inline void jz4780_dma_chan_enable(struct jz4780_dma_dev *jzdma,
unsigned int chn)
{
if (jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) {
unsigned int reg;
if (jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC)
reg = JZ_DMA_REG_DCKE;
else
reg = JZ_DMA_REG_DCKES;
jz4780_dma_ctrl_writel(jzdma, reg, BIT(chn));
}
}
static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma,
unsigned int chn)
{
if ((jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) &&
!(jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC))
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn));
}
static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
@ -215,8 +264,10 @@ static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
kfree(desc);
}
static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift)
static uint32_t jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan,
unsigned long val, uint32_t *shift)
{
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
int ord = ffs(val) - 1;
/*
@ -228,8 +279,8 @@ static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift)
*/
if (ord == 3)
ord = 2;
else if (ord > 7)
ord = 7;
else if (ord > jzdma->soc_data->transfer_ord_max)
ord = jzdma->soc_data->transfer_ord_max;
*shift = ord;
@ -262,7 +313,6 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
desc->dcm = JZ_DMA_DCM_SAI;
desc->dsa = addr;
desc->dta = config->dst_addr;
desc->drt = jzchan->transfer_type;
width = config->dst_addr_width;
maxburst = config->dst_maxburst;
@ -270,7 +320,6 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
desc->dcm = JZ_DMA_DCM_DAI;
desc->dsa = config->src_addr;
desc->dta = addr;
desc->drt = jzchan->transfer_type;
width = config->src_addr_width;
maxburst = config->src_maxburst;
@ -283,7 +332,7 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
* divisible by the transfer size, and we must not use more than the
* maximum burst specified by the user.
*/
tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst),
tsz = jz4780_dma_transfer_size(jzchan, addr | len | (width * maxburst),
&jzchan->transfer_shift);
switch (width) {
@ -412,12 +461,13 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
if (!desc)
return NULL;
tsz = jz4780_dma_transfer_size(dest | src | len,
tsz = jz4780_dma_transfer_size(jzchan, dest | src | len,
&jzchan->transfer_shift);
jzchan->transfer_type = JZ_DMA_DRT_AUTO;
desc->desc[0].dsa = src;
desc->desc[0].dta = dest;
desc->desc[0].drt = JZ_DMA_DRT_AUTO;
desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI |
tsz << JZ_DMA_DCM_TSZ_SHIFT |
JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
@ -472,18 +522,34 @@ static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
(jzchan->curr_hwdesc + 1) % jzchan->desc->count;
}
/* Use 8-word descriptors. */
jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), JZ_DMA_DCS_DES8);
/* Enable the channel's clock. */
jz4780_dma_chan_enable(jzdma, jzchan->id);
/* Use 4-word descriptors. */
jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
/* Set transfer type. */
jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT,
jzchan->transfer_type);
/*
* Set the transfer count. This is redundant for a descriptor-driven
* transfer. However, there can be a delay between the transfer start
* time and when DTCn reg contains the new transfer count. Setting
* it explicitly ensures residue is computed correctly at all times.
*/
jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DTC,
jzchan->desc->desc[jzchan->curr_hwdesc].dtc);
/* Write descriptor address and initiate descriptor fetch. */
desc_phys = jzchan->desc->desc_phys +
(jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc));
jz4780_dma_writel(jzdma, JZ_DMA_REG_DDA(jzchan->id), desc_phys);
jz4780_dma_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DDA, desc_phys);
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
/* Enable the channel. */
jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id),
JZ_DMA_DCS_DES8 | JZ_DMA_DCS_CTE);
jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS,
JZ_DMA_DCS_CTE);
}
static void jz4780_dma_issue_pending(struct dma_chan *chan)
@ -509,12 +575,14 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&jzchan->vchan.lock, flags);
/* Clear the DMA status and stop the transfer. */
jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
if (jzchan->desc) {
vchan_terminate_vdesc(&jzchan->desc->vdesc);
jzchan->desc = NULL;
}
jz4780_dma_chan_disable(jzdma, jzchan->id);
vchan_get_all_descriptors(&jzchan->vchan, &head);
spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
@ -526,8 +594,10 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
static void jz4780_dma_synchronize(struct dma_chan *chan)
{
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
vchan_synchronize(&jzchan->vchan);
jz4780_dma_chan_disable(jzdma, jzchan->id);
}
static int jz4780_dma_config(struct dma_chan *chan,
@ -549,21 +619,17 @@ static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan,
struct jz4780_dma_desc *desc, unsigned int next_sg)
{
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
unsigned int residue, count;
unsigned int count = 0;
unsigned int i;
residue = 0;
for (i = next_sg; i < desc->count; i++)
residue += desc->desc[i].dtc << jzchan->transfer_shift;
count += desc->desc[i].dtc & GENMASK(23, 0);
if (next_sg != 0) {
count = jz4780_dma_readl(jzdma,
JZ_DMA_REG_DTC(jzchan->id));
residue += count << jzchan->transfer_shift;
}
if (next_sg != 0)
count += jz4780_dma_chn_readl(jzdma, jzchan->id,
JZ_DMA_REG_DTC);
return residue;
return count << jzchan->transfer_shift;
}
static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
@ -573,6 +639,7 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
struct virt_dma_desc *vdesc;
enum dma_status status;
unsigned long flags;
unsigned long residue = 0;
status = dma_cookie_status(chan, cookie, txstate);
if ((status == DMA_COMPLETE) || (txstate == NULL))
@ -583,13 +650,13 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
vdesc = vchan_find_desc(&jzchan->vchan, cookie);
if (vdesc) {
/* On the issued list, so hasn't been processed yet */
txstate->residue = jz4780_dma_desc_residue(jzchan,
residue = jz4780_dma_desc_residue(jzchan,
to_jz4780_dma_desc(vdesc), 0);
} else if (cookie == jzchan->desc->vdesc.tx.cookie) {
txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
(jzchan->curr_hwdesc + 1) % jzchan->desc->count);
} else
txstate->residue = 0;
residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
jzchan->curr_hwdesc + 1);
}
dma_set_residue(txstate, residue);
if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
&& jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
@ -606,8 +673,8 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
spin_lock(&jzchan->vchan.lock);
dcs = jz4780_dma_readl(jzdma, JZ_DMA_REG_DCS(jzchan->id));
jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
dcs = jz4780_dma_chn_readl(jzdma, jzchan->id, JZ_DMA_REG_DCS);
jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
if (dcs & JZ_DMA_DCS_AR) {
dev_warn(&jzchan->vchan.chan.dev->device,
@ -646,9 +713,9 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
uint32_t pending, dmac;
int i;
pending = jz4780_dma_readl(jzdma, JZ_DMA_REG_DIRQP);
pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) {
for (i = 0; i < jzdma->soc_data->nb_channels; i++) {
if (!(pending & (1<<i)))
continue;
@ -656,12 +723,12 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
}
/* Clear halt and address error status of all channels. */
dmac = jz4780_dma_readl(jzdma, JZ_DMA_REG_DMAC);
dmac = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DMAC);
dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR);
jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
/* Clear interrupt pending status. */
jz4780_dma_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
return IRQ_HANDLED;
}
@ -728,7 +795,7 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
data.channel = dma_spec->args[1];
if (data.channel > -1) {
if (data.channel >= JZ_DMA_NR_CHANNELS) {
if (data.channel >= jzdma->soc_data->nb_channels) {
dev_err(jzdma->dma_device.dev,
"device requested non-existent channel %u\n",
data.channel);
@ -755,16 +822,29 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
static int jz4780_dma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct jz4780_dma_soc_data *soc_data;
struct jz4780_dma_dev *jzdma;
struct jz4780_dma_chan *jzchan;
struct dma_device *dd;
struct resource *res;
int i, ret;
jzdma = devm_kzalloc(dev, sizeof(*jzdma), GFP_KERNEL);
if (!dev->of_node) {
dev_err(dev, "This driver must be probed from devicetree\n");
return -EINVAL;
}
soc_data = device_get_match_data(dev);
if (!soc_data)
return -EINVAL;
jzdma = devm_kzalloc(dev, sizeof(*jzdma)
+ sizeof(*jzdma->chan) * soc_data->nb_channels,
GFP_KERNEL);
if (!jzdma)
return -ENOMEM;
jzdma->soc_data = soc_data;
platform_set_drvdata(pdev, jzdma);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@ -773,9 +853,26 @@ static int jz4780_dma_probe(struct platform_device *pdev)
return -EINVAL;
}
jzdma->base = devm_ioremap_resource(dev, res);
if (IS_ERR(jzdma->base))
return PTR_ERR(jzdma->base);
jzdma->chn_base = devm_ioremap_resource(dev, res);
if (IS_ERR(jzdma->chn_base))
return PTR_ERR(jzdma->chn_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
jzdma->ctrl_base = devm_ioremap_resource(dev, res);
if (IS_ERR(jzdma->ctrl_base))
return PTR_ERR(jzdma->ctrl_base);
} else if (soc_data->flags & JZ_SOC_DATA_ALLOW_LEGACY_DT) {
/*
* On JZ4780, if the second memory resource was not supplied,
* assume we're using an old devicetree, and calculate the
* offset to the control registers.
*/
jzdma->ctrl_base = jzdma->chn_base + JZ4780_DMA_CTRL_OFFSET;
} else {
dev_err(dev, "failed to get I/O memory\n");
return -EINVAL;
}
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
@ -833,13 +930,15 @@ static int jz4780_dma_probe(struct platform_device *pdev)
* Also set the FMSC bit - it increases MSC performance, so it makes
* little sense not to enable it.
*/
jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC,
JZ_DMA_DMAC_DMAE | JZ_DMA_DMAC_FMSC);
jz4780_dma_writel(jzdma, JZ_DMA_REG_DMACP, 0);
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, JZ_DMA_DMAC_DMAE |
JZ_DMA_DMAC_FAIC | JZ_DMA_DMAC_FMSC);
if (soc_data->flags & JZ_SOC_DATA_PROGRAMMABLE_DMA)
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMACP, 0);
INIT_LIST_HEAD(&dd->channels);
for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) {
for (i = 0; i < soc_data->nb_channels; i++) {
jzchan = &jzdma->chan[i];
jzchan->id = i;
@ -847,7 +946,7 @@ static int jz4780_dma_probe(struct platform_device *pdev)
jzchan->vchan.desc_free = jz4780_dma_desc_free;
}
ret = dma_async_device_register(dd);
ret = dmaenginem_async_device_register(dd);
if (ret) {
dev_err(dev, "failed to register device\n");
goto err_disable_clk;
@ -858,15 +957,12 @@ static int jz4780_dma_probe(struct platform_device *pdev)
jzdma);
if (ret) {
dev_err(dev, "failed to register OF DMA controller\n");
goto err_unregister_dev;
goto err_disable_clk;
}
dev_info(dev, "JZ4780 DMA controller initialised\n");
return 0;
err_unregister_dev:
dma_async_device_unregister(dd);
err_disable_clk:
clk_disable_unprepare(jzdma->clk);
@ -884,15 +980,40 @@ static int jz4780_dma_remove(struct platform_device *pdev)
free_irq(jzdma->irq, jzdma);
for (i = 0; i < JZ_DMA_NR_CHANNELS; i++)
for (i = 0; i < jzdma->soc_data->nb_channels; i++)
tasklet_kill(&jzdma->chan[i].vchan.task);
dma_async_device_unregister(&jzdma->dma_device);
return 0;
}
static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
.nb_channels = 6,
.transfer_ord_max = 5,
};
static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
.nb_channels = 6,
.transfer_ord_max = 5,
.flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
};
static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
.nb_channels = 6,
.transfer_ord_max = 6,
.flags = JZ_SOC_DATA_PER_CHAN_PM,
};
static const struct jz4780_dma_soc_data jz4780_dma_soc_data = {
.nb_channels = 32,
.transfer_ord_max = 7,
.flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA,
};
static const struct of_device_id jz4780_dma_dt_match[] = {
{ .compatible = "ingenic,jz4780-dma", .data = NULL },
{ .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
{ .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
{ .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
{ .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
{},
};
MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);

View file

@ -934,7 +934,7 @@ static int dw_probe(struct platform_device *pdev)
pm_runtime_put(chip->dev);
ret = dma_async_device_register(&dw->dma);
ret = dmaenginem_async_device_register(&dw->dma);
if (ret)
goto err_pm_disable;
@ -977,8 +977,6 @@ static int dw_remove(struct platform_device *pdev)
tasklet_kill(&chan->vc.task);
}
dma_async_device_unregister(&dw->dma);
return 0;
}

View file

@ -886,12 +886,7 @@ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
*/
u32 s = dw->pdata->is_idma32 ? 1 : 2;
/* Check if chan will be configured for slave transfers */
if (!is_slave_direction(sconfig->direction))
return -EINVAL;
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
dwc->direction = sconfig->direction;
sc->src_maxburst = sc->src_maxburst > 1 ? fls(sc->src_maxburst) - s : 0;
sc->dst_maxburst = sc->dst_maxburst > 1 ? fls(sc->dst_maxburst) - s : 0;

View file

@ -284,6 +284,8 @@ MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
#ifdef CONFIG_ACPI
static const struct acpi_device_id dw_dma_acpi_id_table[] = {
{ "INTL9C60", 0 },
{ "80862286", 0 },
{ "808622C0", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);

View file

@ -109,6 +109,9 @@
#define DMA_MAX_CHAN_DESCRIPTORS 32
struct ep93xx_dma_engine;
static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
enum dma_transfer_direction dir,
struct dma_slave_config *config);
/**
* struct ep93xx_dma_desc - EP93xx specific transaction descriptor
@ -180,6 +183,7 @@ struct ep93xx_dma_chan {
struct list_head free_list;
u32 runtime_addr;
u32 runtime_ctrl;
struct dma_slave_config slave_config;
};
/**
@ -1051,6 +1055,8 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL;
}
ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
first = NULL;
for_each_sg(sgl, sg, sg_len, i) {
size_t len = sg_dma_len(sg);
@ -1136,6 +1142,8 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
return NULL;
}
ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
/* Split the buffer into period size chunks */
first = NULL;
for (offset = 0; offset < buf_len; offset += period_len) {
@ -1227,6 +1235,17 @@ static int ep93xx_dma_slave_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
memcpy(&edmac->slave_config, config, sizeof(*config));
return 0;
}
static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
enum dma_transfer_direction dir,
struct dma_slave_config *config)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
enum dma_slave_buswidth width;
unsigned long flags;
u32 addr, ctrl;
@ -1234,7 +1253,7 @@ static int ep93xx_dma_slave_config(struct dma_chan *chan,
if (!edmac->edma->m2m)
return -EINVAL;
switch (config->direction) {
switch (dir) {
case DMA_DEV_TO_MEM:
width = config->src_addr_width;
addr = config->src_addr;

View file

@ -0,0 +1,626 @@
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
// Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
#include <linux/dmapool.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "fsl-edma-common.h"
#define EDMA_CR 0x00
#define EDMA_ES 0x04
#define EDMA_ERQ 0x0C
#define EDMA_EEI 0x14
#define EDMA_SERQ 0x1B
#define EDMA_CERQ 0x1A
#define EDMA_SEEI 0x19
#define EDMA_CEEI 0x18
#define EDMA_CINT 0x1F
#define EDMA_CERR 0x1E
#define EDMA_SSRT 0x1D
#define EDMA_CDNE 0x1C
#define EDMA_INTR 0x24
#define EDMA_ERR 0x2C
#define EDMA64_ERQH 0x08
#define EDMA64_EEIH 0x10
#define EDMA64_SERQ 0x18
#define EDMA64_CERQ 0x19
#define EDMA64_SEEI 0x1a
#define EDMA64_CEEI 0x1b
#define EDMA64_CINT 0x1c
#define EDMA64_CERR 0x1d
#define EDMA64_SSRT 0x1e
#define EDMA64_CDNE 0x1f
#define EDMA64_INTH 0x20
#define EDMA64_INTL 0x24
#define EDMA64_ERRH 0x28
#define EDMA64_ERRL 0x2c
#define EDMA_TCD 0x1000
static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
{
struct edma_regs *regs = &fsl_chan->edma->regs;
u32 ch = fsl_chan->vchan.chan.chan_id;
if (fsl_chan->edma->version == v1) {
edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
edma_writeb(fsl_chan->edma, ch, regs->serq);
} else {
/* ColdFire is big endian, and accesses natively
* big endian I/O peripherals
*/
iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
iowrite8(ch, regs->serq);
}
}
void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
{
struct edma_regs *regs = &fsl_chan->edma->regs;
u32 ch = fsl_chan->vchan.chan.chan_id;
if (fsl_chan->edma->version == v1) {
edma_writeb(fsl_chan->edma, ch, regs->cerq);
edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
} else {
/* ColdFire is big endian, and accesses natively
* big endian I/O peripherals
*/
iowrite8(ch, regs->cerq);
iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
}
}
EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
unsigned int slot, bool enable)
{
u32 ch = fsl_chan->vchan.chan.chan_id;
void __iomem *muxaddr;
unsigned int chans_per_mux, ch_off;
chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
slot = EDMAMUX_CHCFG_SOURCE(slot);
if (enable)
iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
else
iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
}
EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
{
switch (addr_width) {
case 1:
return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
case 2:
return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
case 4:
return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
case 8:
return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
default:
return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
}
}
void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
{
struct fsl_edma_desc *fsl_desc;
int i;
fsl_desc = to_fsl_edma_desc(vdesc);
for (i = 0; i < fsl_desc->n_tcds; i++)
dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
fsl_desc->tcd[i].ptcd);
kfree(fsl_desc);
}
EXPORT_SYMBOL_GPL(fsl_edma_free_desc);
int fsl_edma_terminate_all(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
fsl_edma_disable_request(fsl_chan);
fsl_chan->edesc = NULL;
fsl_chan->idle = true;
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
return 0;
}
EXPORT_SYMBOL_GPL(fsl_edma_terminate_all);
int fsl_edma_pause(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
unsigned long flags;
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
if (fsl_chan->edesc) {
fsl_edma_disable_request(fsl_chan);
fsl_chan->status = DMA_PAUSED;
fsl_chan->idle = true;
}
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(fsl_edma_pause);
int fsl_edma_resume(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
unsigned long flags;
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
if (fsl_chan->edesc) {
fsl_edma_enable_request(fsl_chan);
fsl_chan->status = DMA_IN_PROGRESS;
fsl_chan->idle = false;
}
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(fsl_edma_resume);
int fsl_edma_slave_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
return 0;
}
EXPORT_SYMBOL_GPL(fsl_edma_slave_config);
static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
struct virt_dma_desc *vdesc, bool in_progress)
{
struct fsl_edma_desc *edesc = fsl_chan->edesc;
struct edma_regs *regs = &fsl_chan->edma->regs;
u32 ch = fsl_chan->vchan.chan.chan_id;
enum dma_transfer_direction dir = edesc->dirn;
dma_addr_t cur_addr, dma_addr;
size_t len, size;
int i;
/* calculate the total size in this desc */
for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
* le16_to_cpu(edesc->tcd[i].vtcd->biter);
if (!in_progress)
return len;
if (dir == DMA_MEM_TO_DEV)
cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].saddr);
else
cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].daddr);
/* figure out the finished and calculate the residue */
for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
* le16_to_cpu(edesc->tcd[i].vtcd->biter);
if (dir == DMA_MEM_TO_DEV)
dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
else
dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
len -= size;
if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
len += dma_addr + size - cur_addr;
break;
}
}
return len;
}
enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct virt_dma_desc *vdesc;
enum dma_status status;
unsigned long flags;
status = dma_cookie_status(chan, cookie, txstate);
if (status == DMA_COMPLETE)
return status;
if (!txstate)
return fsl_chan->status;
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
txstate->residue =
fsl_edma_desc_residue(fsl_chan, vdesc, true);
else if (vdesc)
txstate->residue =
fsl_edma_desc_residue(fsl_chan, vdesc, false);
else
txstate->residue = 0;
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return fsl_chan->status;
}
EXPORT_SYMBOL_GPL(fsl_edma_tx_status);
static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
struct fsl_edma_hw_tcd *tcd)
{
struct fsl_edma_engine *edma = fsl_chan->edma;
struct edma_regs *regs = &fsl_chan->edma->regs;
u32 ch = fsl_chan->vchan.chan.chan_id;
/*
* TCD parameters are stored in struct fsl_edma_hw_tcd in little
* endian format. However, we need to load the TCD registers in
* big- or little-endian obeying the eDMA engine model endian.
*/
edma_writew(edma, 0, &regs->tcd[ch].csr);
edma_writel(edma, le32_to_cpu(tcd->saddr), &regs->tcd[ch].saddr);
edma_writel(edma, le32_to_cpu(tcd->daddr), &regs->tcd[ch].daddr);
edma_writew(edma, le16_to_cpu(tcd->attr), &regs->tcd[ch].attr);
edma_writew(edma, le16_to_cpu(tcd->soff), &regs->tcd[ch].soff);
edma_writel(edma, le32_to_cpu(tcd->nbytes), &regs->tcd[ch].nbytes);
edma_writel(edma, le32_to_cpu(tcd->slast), &regs->tcd[ch].slast);
edma_writew(edma, le16_to_cpu(tcd->citer), &regs->tcd[ch].citer);
edma_writew(edma, le16_to_cpu(tcd->biter), &regs->tcd[ch].biter);
edma_writew(edma, le16_to_cpu(tcd->doff), &regs->tcd[ch].doff);
edma_writel(edma, le32_to_cpu(tcd->dlast_sga),
&regs->tcd[ch].dlast_sga);
edma_writew(edma, le16_to_cpu(tcd->csr), &regs->tcd[ch].csr);
}
static inline
void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
u16 biter, u16 doff, u32 dlast_sga, bool major_int,
bool disable_req, bool enable_sg)
{
u16 csr = 0;
/*
* eDMA hardware SGs require the TCDs to be stored in little
* endian format irrespective of the register endian model.
* So we put the value in little endian in memory, waiting
* for fsl_edma_set_tcd_regs doing the swap.
*/
tcd->saddr = cpu_to_le32(src);
tcd->daddr = cpu_to_le32(dst);
tcd->attr = cpu_to_le16(attr);
tcd->soff = cpu_to_le16(soff);
tcd->nbytes = cpu_to_le32(nbytes);
tcd->slast = cpu_to_le32(slast);
tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
tcd->doff = cpu_to_le16(doff);
tcd->dlast_sga = cpu_to_le32(dlast_sga);
tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
if (major_int)
csr |= EDMA_TCD_CSR_INT_MAJOR;
if (disable_req)
csr |= EDMA_TCD_CSR_D_REQ;
if (enable_sg)
csr |= EDMA_TCD_CSR_E_SG;
tcd->csr = cpu_to_le16(csr);
}
static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
int sg_len)
{
struct fsl_edma_desc *fsl_desc;
int i;
fsl_desc = kzalloc(sizeof(*fsl_desc) +
sizeof(struct fsl_edma_sw_tcd) *
sg_len, GFP_NOWAIT);
if (!fsl_desc)
return NULL;
fsl_desc->echan = fsl_chan;
fsl_desc->n_tcds = sg_len;
for (i = 0; i < sg_len; i++) {
fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
if (!fsl_desc->tcd[i].vtcd)
goto err;
}
return fsl_desc;
err:
while (--i >= 0)
dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
fsl_desc->tcd[i].ptcd);
kfree(fsl_desc);
return NULL;
}
struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct fsl_edma_desc *fsl_desc;
dma_addr_t dma_buf_next;
int sg_len, i;
u32 src_addr, dst_addr, last_sg, nbytes;
u16 soff, doff, iter;
if (!is_slave_direction(direction))
return NULL;
sg_len = buf_len / period_len;
fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
if (!fsl_desc)
return NULL;
fsl_desc->iscyclic = true;
fsl_desc->dirn = direction;
dma_buf_next = dma_addr;
if (direction == DMA_MEM_TO_DEV) {
fsl_chan->attr =
fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
nbytes = fsl_chan->cfg.dst_addr_width *
fsl_chan->cfg.dst_maxburst;
} else {
fsl_chan->attr =
fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
nbytes = fsl_chan->cfg.src_addr_width *
fsl_chan->cfg.src_maxburst;
}
iter = period_len / nbytes;
for (i = 0; i < sg_len; i++) {
if (dma_buf_next >= dma_addr + buf_len)
dma_buf_next = dma_addr;
/* get next sg's physical address */
last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
if (direction == DMA_MEM_TO_DEV) {
src_addr = dma_buf_next;
dst_addr = fsl_chan->cfg.dst_addr;
soff = fsl_chan->cfg.dst_addr_width;
doff = 0;
} else {
src_addr = fsl_chan->cfg.src_addr;
dst_addr = dma_buf_next;
soff = 0;
doff = fsl_chan->cfg.src_addr_width;
}
fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
fsl_chan->attr, soff, nbytes, 0, iter,
iter, doff, last_sg, true, false, true);
dma_buf_next += period_len;
}
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
}
EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic);
struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags, void *context)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct fsl_edma_desc *fsl_desc;
struct scatterlist *sg;
u32 src_addr, dst_addr, last_sg, nbytes;
u16 soff, doff, iter;
int i;
if (!is_slave_direction(direction))
return NULL;
fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
if (!fsl_desc)
return NULL;
fsl_desc->iscyclic = false;
fsl_desc->dirn = direction;
if (direction == DMA_MEM_TO_DEV) {
fsl_chan->attr =
fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
nbytes = fsl_chan->cfg.dst_addr_width *
fsl_chan->cfg.dst_maxburst;
} else {
fsl_chan->attr =
fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
nbytes = fsl_chan->cfg.src_addr_width *
fsl_chan->cfg.src_maxburst;
}
for_each_sg(sgl, sg, sg_len, i) {
/* get next sg's physical address */
last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
if (direction == DMA_MEM_TO_DEV) {
src_addr = sg_dma_address(sg);
dst_addr = fsl_chan->cfg.dst_addr;
soff = fsl_chan->cfg.dst_addr_width;
doff = 0;
} else {
src_addr = fsl_chan->cfg.src_addr;
dst_addr = sg_dma_address(sg);
soff = 0;
doff = fsl_chan->cfg.src_addr_width;
}
iter = sg_dma_len(sg) / nbytes;
if (i < sg_len - 1) {
last_sg = fsl_desc->tcd[(i + 1)].ptcd;
fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
dst_addr, fsl_chan->attr, soff,
nbytes, 0, iter, iter, doff, last_sg,
false, false, true);
} else {
last_sg = 0;
fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
dst_addr, fsl_chan->attr, soff,
nbytes, 0, iter, iter, doff, last_sg,
true, true, false);
}
}
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
}
EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
{
struct virt_dma_desc *vdesc;
vdesc = vchan_next_desc(&fsl_chan->vchan);
if (!vdesc)
return;
fsl_chan->edesc = to_fsl_edma_desc(vdesc);
fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
fsl_edma_enable_request(fsl_chan);
fsl_chan->status = DMA_IN_PROGRESS;
fsl_chan->idle = false;
}
EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc);
void fsl_edma_issue_pending(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
unsigned long flags;
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
if (unlikely(fsl_chan->pm_state != RUNNING)) {
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
/* cannot submit due to suspend */
return;
}
if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
fsl_edma_xfer_desc(fsl_chan);
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
}
EXPORT_SYMBOL_GPL(fsl_edma_issue_pending);
int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
sizeof(struct fsl_edma_hw_tcd),
32, 0);
return 0;
}
EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
void fsl_edma_free_chan_resources(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
fsl_edma_disable_request(fsl_chan);
fsl_edma_chan_mux(fsl_chan, 0, false);
fsl_chan->edesc = NULL;
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
dma_pool_destroy(fsl_chan->tcd_pool);
fsl_chan->tcd_pool = NULL;
}
EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
{
struct fsl_edma_chan *chan, *_chan;
list_for_each_entry_safe(chan, _chan,
&dmadev->channels, vchan.chan.device_node) {
list_del(&chan->vchan.chan.device_node);
tasklet_kill(&chan->vchan.task);
}
}
EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
/*
* On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
* register offsets are different compared to ColdFire mcf5441x 64 channels
* edma (here called "v2").
*
* This function sets up register offsets as per proper declared version
* so must be called in xxx_edma_probe() just after setting the
* edma "version" and "membase" appropriately.
*/
void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
{
edma->regs.cr = edma->membase + EDMA_CR;
edma->regs.es = edma->membase + EDMA_ES;
edma->regs.erql = edma->membase + EDMA_ERQ;
edma->regs.eeil = edma->membase + EDMA_EEI;
edma->regs.serq = edma->membase + ((edma->version == v1) ?
EDMA_SERQ : EDMA64_SERQ);
edma->regs.cerq = edma->membase + ((edma->version == v1) ?
EDMA_CERQ : EDMA64_CERQ);
edma->regs.seei = edma->membase + ((edma->version == v1) ?
EDMA_SEEI : EDMA64_SEEI);
edma->regs.ceei = edma->membase + ((edma->version == v1) ?
EDMA_CEEI : EDMA64_CEEI);
edma->regs.cint = edma->membase + ((edma->version == v1) ?
EDMA_CINT : EDMA64_CINT);
edma->regs.cerr = edma->membase + ((edma->version == v1) ?
EDMA_CERR : EDMA64_CERR);
edma->regs.ssrt = edma->membase + ((edma->version == v1) ?
EDMA_SSRT : EDMA64_SSRT);
edma->regs.cdne = edma->membase + ((edma->version == v1) ?
EDMA_CDNE : EDMA64_CDNE);
edma->regs.intl = edma->membase + ((edma->version == v1) ?
EDMA_INTR : EDMA64_INTL);
edma->regs.errl = edma->membase + ((edma->version == v1) ?
EDMA_ERR : EDMA64_ERRL);
if (edma->version == v2) {
edma->regs.erqh = edma->membase + EDMA64_ERQH;
edma->regs.eeih = edma->membase + EDMA64_EEIH;
edma->regs.errh = edma->membase + EDMA64_ERRH;
edma->regs.inth = edma->membase + EDMA64_INTH;
}
edma->regs.tcd = edma->membase + EDMA_TCD;
}
EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
MODULE_LICENSE("GPL v2");

View file

@ -0,0 +1,233 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2013-2014 Freescale Semiconductor, Inc.
* Copyright 2018 Angelo Dureghello <angelo@sysam.it>
*/
#ifndef _FSL_EDMA_COMMON_H_
#define _FSL_EDMA_COMMON_H_
#include "virt-dma.h"
#define EDMA_CR_EDBG BIT(1)
#define EDMA_CR_ERCA BIT(2)
#define EDMA_CR_ERGA BIT(3)
#define EDMA_CR_HOE BIT(4)
#define EDMA_CR_HALT BIT(5)
#define EDMA_CR_CLM BIT(6)
#define EDMA_CR_EMLM BIT(7)
#define EDMA_CR_ECX BIT(16)
#define EDMA_CR_CX BIT(17)
#define EDMA_SEEI_SEEI(x) ((x) & GENMASK(4, 0))
#define EDMA_CEEI_CEEI(x) ((x) & GENMASK(4, 0))
#define EDMA_CINT_CINT(x) ((x) & GENMASK(4, 0))
#define EDMA_CERR_CERR(x) ((x) & GENMASK(4, 0))
#define EDMA_TCD_ATTR_DSIZE(x) (((x) & GENMASK(2, 0)))
#define EDMA_TCD_ATTR_DMOD(x) (((x) & GENMASK(4, 0)) << 3)
#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
#define EDMA_TCD_ATTR_DSIZE_8BIT 0
#define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0)
#define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1)
#define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1))
#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(3) | BIT(0))
#define EDMA_TCD_ATTR_SSIZE_8BIT 0
#define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8)
#define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8)
#define EDMA_TCD_ATTR_SSIZE_64BIT (EDMA_TCD_ATTR_DSIZE_64BIT << 8)
#define EDMA_TCD_ATTR_SSIZE_32BYTE (EDMA_TCD_ATTR_DSIZE_32BYTE << 8)
#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0))
#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0))
#define EDMA_TCD_CSR_START BIT(0)
#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
#define EDMA_TCD_CSR_INT_HALF BIT(2)
#define EDMA_TCD_CSR_D_REQ BIT(3)
#define EDMA_TCD_CSR_E_SG BIT(4)
#define EDMA_TCD_CSR_E_LINK BIT(5)
#define EDMA_TCD_CSR_ACTIVE BIT(6)
#define EDMA_TCD_CSR_DONE BIT(7)
#define EDMAMUX_CHCFG_DIS 0x0
#define EDMAMUX_CHCFG_ENBL 0x80
#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
#define DMAMUX_NR 2
#define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
enum fsl_edma_pm_state {
RUNNING = 0,
SUSPENDED,
};
struct fsl_edma_hw_tcd {
__le32 saddr;
__le16 soff;
__le16 attr;
__le32 nbytes;
__le32 slast;
__le32 daddr;
__le16 doff;
__le16 citer;
__le32 dlast_sga;
__le16 csr;
__le16 biter;
};
/*
* These are iomem pointers, for both v32 and v64.
*/
struct edma_regs {
void __iomem *cr;
void __iomem *es;
void __iomem *erqh;
void __iomem *erql; /* aka erq on v32 */
void __iomem *eeih;
void __iomem *eeil; /* aka eei on v32 */
void __iomem *seei;
void __iomem *ceei;
void __iomem *serq;
void __iomem *cerq;
void __iomem *cint;
void __iomem *cerr;
void __iomem *ssrt;
void __iomem *cdne;
void __iomem *inth;
void __iomem *intl;
void __iomem *errh;
void __iomem *errl;
struct fsl_edma_hw_tcd __iomem *tcd;
};
struct fsl_edma_sw_tcd {
dma_addr_t ptcd;
struct fsl_edma_hw_tcd *vtcd;
};
struct fsl_edma_chan {
struct virt_dma_chan vchan;
enum dma_status status;
enum fsl_edma_pm_state pm_state;
bool idle;
u32 slave_id;
struct fsl_edma_engine *edma;
struct fsl_edma_desc *edesc;
struct dma_slave_config cfg;
u32 attr;
struct dma_pool *tcd_pool;
};
struct fsl_edma_desc {
struct virt_dma_desc vdesc;
struct fsl_edma_chan *echan;
bool iscyclic;
enum dma_transfer_direction dirn;
unsigned int n_tcds;
struct fsl_edma_sw_tcd tcd[];
};
enum edma_version {
v1, /* 32ch, Vybdir, mpc57x, etc */
v2, /* 64ch Coldfire */
};
struct fsl_edma_engine {
struct dma_device dma_dev;
void __iomem *membase;
void __iomem *muxbase[DMAMUX_NR];
struct clk *muxclk[DMAMUX_NR];
struct mutex fsl_edma_mutex;
u32 n_chans;
int txirq;
int errirq;
bool big_endian;
enum edma_version version;
struct edma_regs regs;
struct fsl_edma_chan chans[];
};
/*
* R/W functions for big- or little-endian registers:
* The eDMA controller's endian is independent of the CPU core's endian.
* For the big-endian IP module, the offset for 8-bit or 16-bit registers
* should also be swapped opposite to that in little-endian IP.
*/
static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
{
if (edma->big_endian)
return ioread32be(addr);
else
return ioread32(addr);
}
static inline void edma_writeb(struct fsl_edma_engine *edma,
u8 val, void __iomem *addr)
{
/* swap the reg offset for these in big-endian mode */
if (edma->big_endian)
iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
else
iowrite8(val, addr);
}
static inline void edma_writew(struct fsl_edma_engine *edma,
u16 val, void __iomem *addr)
{
/* swap the reg offset for these in big-endian mode */
if (edma->big_endian)
iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
else
iowrite16(val, addr);
}
static inline void edma_writel(struct fsl_edma_engine *edma,
u32 val, void __iomem *addr)
{
if (edma->big_endian)
iowrite32be(val, addr);
else
iowrite32(val, addr);
}
static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
{
return container_of(chan, struct fsl_edma_chan, vchan.chan);
}
static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
{
return container_of(vd, struct fsl_edma_desc, vdesc);
}
void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan);
void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
unsigned int slot, bool enable);
void fsl_edma_free_desc(struct virt_dma_desc *vdesc);
int fsl_edma_terminate_all(struct dma_chan *chan);
int fsl_edma_pause(struct dma_chan *chan);
int fsl_edma_resume(struct dma_chan *chan);
int fsl_edma_slave_config(struct dma_chan *chan,
struct dma_slave_config *cfg);
enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate);
struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags);
struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags, void *context);
void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan);
void fsl_edma_issue_pending(struct dma_chan *chan);
int fsl_edma_alloc_chan_resources(struct dma_chan *chan);
void fsl_edma_free_chan_resources(struct dma_chan *chan);
void fsl_edma_cleanup_vchan(struct dma_device *dmadev);
void fsl_edma_setup_regs(struct fsl_edma_engine *edma);
#endif /* _FSL_EDMA_COMMON_H_ */

View file

@ -13,671 +13,31 @@
* option) any later version.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_dma.h>
#include "virt-dma.h"
#define EDMA_CR 0x00
#define EDMA_ES 0x04
#define EDMA_ERQ 0x0C
#define EDMA_EEI 0x14
#define EDMA_SERQ 0x1B
#define EDMA_CERQ 0x1A
#define EDMA_SEEI 0x19
#define EDMA_CEEI 0x18
#define EDMA_CINT 0x1F
#define EDMA_CERR 0x1E
#define EDMA_SSRT 0x1D
#define EDMA_CDNE 0x1C
#define EDMA_INTR 0x24
#define EDMA_ERR 0x2C
#define EDMA_TCD_SADDR(x) (0x1000 + 32 * (x))
#define EDMA_TCD_SOFF(x) (0x1004 + 32 * (x))
#define EDMA_TCD_ATTR(x) (0x1006 + 32 * (x))
#define EDMA_TCD_NBYTES(x) (0x1008 + 32 * (x))
#define EDMA_TCD_SLAST(x) (0x100C + 32 * (x))
#define EDMA_TCD_DADDR(x) (0x1010 + 32 * (x))
#define EDMA_TCD_DOFF(x) (0x1014 + 32 * (x))
#define EDMA_TCD_CITER_ELINK(x) (0x1016 + 32 * (x))
#define EDMA_TCD_CITER(x) (0x1016 + 32 * (x))
#define EDMA_TCD_DLAST_SGA(x) (0x1018 + 32 * (x))
#define EDMA_TCD_CSR(x) (0x101C + 32 * (x))
#define EDMA_TCD_BITER_ELINK(x) (0x101E + 32 * (x))
#define EDMA_TCD_BITER(x) (0x101E + 32 * (x))
#define EDMA_CR_EDBG BIT(1)
#define EDMA_CR_ERCA BIT(2)
#define EDMA_CR_ERGA BIT(3)
#define EDMA_CR_HOE BIT(4)
#define EDMA_CR_HALT BIT(5)
#define EDMA_CR_CLM BIT(6)
#define EDMA_CR_EMLM BIT(7)
#define EDMA_CR_ECX BIT(16)
#define EDMA_CR_CX BIT(17)
#define EDMA_SEEI_SEEI(x) ((x) & 0x1F)
#define EDMA_CEEI_CEEI(x) ((x) & 0x1F)
#define EDMA_CINT_CINT(x) ((x) & 0x1F)
#define EDMA_CERR_CERR(x) ((x) & 0x1F)
#define EDMA_TCD_ATTR_DSIZE(x) (((x) & 0x0007))
#define EDMA_TCD_ATTR_DMOD(x) (((x) & 0x001F) << 3)
#define EDMA_TCD_ATTR_SSIZE(x) (((x) & 0x0007) << 8)
#define EDMA_TCD_ATTR_SMOD(x) (((x) & 0x001F) << 11)
#define EDMA_TCD_ATTR_SSIZE_8BIT (0x0000)
#define EDMA_TCD_ATTR_SSIZE_16BIT (0x0100)
#define EDMA_TCD_ATTR_SSIZE_32BIT (0x0200)
#define EDMA_TCD_ATTR_SSIZE_64BIT (0x0300)
#define EDMA_TCD_ATTR_SSIZE_32BYTE (0x0500)
#define EDMA_TCD_ATTR_DSIZE_8BIT (0x0000)
#define EDMA_TCD_ATTR_DSIZE_16BIT (0x0001)
#define EDMA_TCD_ATTR_DSIZE_32BIT (0x0002)
#define EDMA_TCD_ATTR_DSIZE_64BIT (0x0003)
#define EDMA_TCD_ATTR_DSIZE_32BYTE (0x0005)
#define EDMA_TCD_SOFF_SOFF(x) (x)
#define EDMA_TCD_NBYTES_NBYTES(x) (x)
#define EDMA_TCD_SLAST_SLAST(x) (x)
#define EDMA_TCD_DADDR_DADDR(x) (x)
#define EDMA_TCD_CITER_CITER(x) ((x) & 0x7FFF)
#define EDMA_TCD_DOFF_DOFF(x) (x)
#define EDMA_TCD_DLAST_SGA_DLAST_SGA(x) (x)
#define EDMA_TCD_BITER_BITER(x) ((x) & 0x7FFF)
#define EDMA_TCD_CSR_START BIT(0)
#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
#define EDMA_TCD_CSR_INT_HALF BIT(2)
#define EDMA_TCD_CSR_D_REQ BIT(3)
#define EDMA_TCD_CSR_E_SG BIT(4)
#define EDMA_TCD_CSR_E_LINK BIT(5)
#define EDMA_TCD_CSR_ACTIVE BIT(6)
#define EDMA_TCD_CSR_DONE BIT(7)
#define EDMAMUX_CHCFG_DIS 0x0
#define EDMAMUX_CHCFG_ENBL 0x80
#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
#define DMAMUX_NR 2
#define FSL_EDMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
enum fsl_edma_pm_state {
RUNNING = 0,
SUSPENDED,
};
struct fsl_edma_hw_tcd {
__le32 saddr;
__le16 soff;
__le16 attr;
__le32 nbytes;
__le32 slast;
__le32 daddr;
__le16 doff;
__le16 citer;
__le32 dlast_sga;
__le16 csr;
__le16 biter;
};
struct fsl_edma_sw_tcd {
dma_addr_t ptcd;
struct fsl_edma_hw_tcd *vtcd;
};
struct fsl_edma_slave_config {
enum dma_transfer_direction dir;
enum dma_slave_buswidth addr_width;
u32 dev_addr;
u32 burst;
u32 attr;
};
struct fsl_edma_chan {
struct virt_dma_chan vchan;
enum dma_status status;
enum fsl_edma_pm_state pm_state;
bool idle;
u32 slave_id;
struct fsl_edma_engine *edma;
struct fsl_edma_desc *edesc;
struct fsl_edma_slave_config fsc;
struct dma_pool *tcd_pool;
};
struct fsl_edma_desc {
struct virt_dma_desc vdesc;
struct fsl_edma_chan *echan;
bool iscyclic;
unsigned int n_tcds;
struct fsl_edma_sw_tcd tcd[];
};
struct fsl_edma_engine {
struct dma_device dma_dev;
void __iomem *membase;
void __iomem *muxbase[DMAMUX_NR];
struct clk *muxclk[DMAMUX_NR];
struct mutex fsl_edma_mutex;
u32 n_chans;
int txirq;
int errirq;
bool big_endian;
struct fsl_edma_chan chans[];
};
/*
* R/W functions for big- or little-endian registers:
* The eDMA controller's endian is independent of the CPU core's endian.
* For the big-endian IP module, the offset for 8-bit or 16-bit registers
* should also be swapped opposite to that in little-endian IP.
*/
static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
{
if (edma->big_endian)
return ioread32be(addr);
else
return ioread32(addr);
}
static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr)
{
/* swap the reg offset for these in big-endian mode */
if (edma->big_endian)
iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
else
iowrite8(val, addr);
}
static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr)
{
/* swap the reg offset for these in big-endian mode */
if (edma->big_endian)
iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
else
iowrite16(val, addr);
}
static void edma_writel(struct fsl_edma_engine *edma, u32 val, void __iomem *addr)
{
if (edma->big_endian)
iowrite32be(val, addr);
else
iowrite32(val, addr);
}
static struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
{
return container_of(chan, struct fsl_edma_chan, vchan.chan);
}
static struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
{
return container_of(vd, struct fsl_edma_desc, vdesc);
}
static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
{
void __iomem *addr = fsl_chan->edma->membase;
u32 ch = fsl_chan->vchan.chan.chan_id;
edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI);
edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ);
}
static void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
{
void __iomem *addr = fsl_chan->edma->membase;
u32 ch = fsl_chan->vchan.chan.chan_id;
edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ);
edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI);
}
static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
unsigned int slot, bool enable)
{
u32 ch = fsl_chan->vchan.chan.chan_id;
void __iomem *muxaddr;
unsigned chans_per_mux, ch_off;
chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
slot = EDMAMUX_CHCFG_SOURCE(slot);
if (enable)
iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
else
iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
}
static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
{
switch (addr_width) {
case 1:
return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
case 2:
return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
case 4:
return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
case 8:
return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
default:
return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
}
}
static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
{
struct fsl_edma_desc *fsl_desc;
int i;
fsl_desc = to_fsl_edma_desc(vdesc);
for (i = 0; i < fsl_desc->n_tcds; i++)
dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
fsl_desc->tcd[i].ptcd);
kfree(fsl_desc);
}
static int fsl_edma_terminate_all(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
fsl_edma_disable_request(fsl_chan);
fsl_chan->edesc = NULL;
fsl_chan->idle = true;
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
return 0;
}
static int fsl_edma_pause(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
unsigned long flags;
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
if (fsl_chan->edesc) {
fsl_edma_disable_request(fsl_chan);
fsl_chan->status = DMA_PAUSED;
fsl_chan->idle = true;
}
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
}
static int fsl_edma_resume(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
unsigned long flags;
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
if (fsl_chan->edesc) {
fsl_edma_enable_request(fsl_chan);
fsl_chan->status = DMA_IN_PROGRESS;
fsl_chan->idle = false;
}
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
}
static int fsl_edma_slave_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
fsl_chan->fsc.dir = cfg->direction;
if (cfg->direction == DMA_DEV_TO_MEM) {
fsl_chan->fsc.dev_addr = cfg->src_addr;
fsl_chan->fsc.addr_width = cfg->src_addr_width;
fsl_chan->fsc.burst = cfg->src_maxburst;
fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
} else if (cfg->direction == DMA_MEM_TO_DEV) {
fsl_chan->fsc.dev_addr = cfg->dst_addr;
fsl_chan->fsc.addr_width = cfg->dst_addr_width;
fsl_chan->fsc.burst = cfg->dst_maxburst;
fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
} else {
return -EINVAL;
}
return 0;
}
static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
struct virt_dma_desc *vdesc, bool in_progress)
{
struct fsl_edma_desc *edesc = fsl_chan->edesc;
void __iomem *addr = fsl_chan->edma->membase;
u32 ch = fsl_chan->vchan.chan.chan_id;
enum dma_transfer_direction dir = fsl_chan->fsc.dir;
dma_addr_t cur_addr, dma_addr;
size_t len, size;
int i;
/* calculate the total size in this desc */
for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
* le16_to_cpu(edesc->tcd[i].vtcd->biter);
if (!in_progress)
return len;
if (dir == DMA_MEM_TO_DEV)
cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_SADDR(ch));
else
cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_DADDR(ch));
/* figure out the finished and calculate the residue */
for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
* le16_to_cpu(edesc->tcd[i].vtcd->biter);
if (dir == DMA_MEM_TO_DEV)
dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
else
dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
len -= size;
if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
len += dma_addr + size - cur_addr;
break;
}
}
return len;
}
static enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct virt_dma_desc *vdesc;
enum dma_status status;
unsigned long flags;
status = dma_cookie_status(chan, cookie, txstate);
if (status == DMA_COMPLETE)
return status;
if (!txstate)
return fsl_chan->status;
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, true);
else if (vdesc)
txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, false);
else
txstate->residue = 0;
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return fsl_chan->status;
}
static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
struct fsl_edma_hw_tcd *tcd)
{
struct fsl_edma_engine *edma = fsl_chan->edma;
void __iomem *addr = fsl_chan->edma->membase;
u32 ch = fsl_chan->vchan.chan.chan_id;
/*
* TCD parameters are stored in struct fsl_edma_hw_tcd in little
* endian format. However, we need to load the TCD registers in
* big- or little-endian obeying the eDMA engine model endian.
*/
edma_writew(edma, 0, addr + EDMA_TCD_CSR(ch));
edma_writel(edma, le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR(ch));
edma_writel(edma, le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR(ch));
edma_writew(edma, le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR(ch));
edma_writew(edma, le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF(ch));
edma_writel(edma, le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES(ch));
edma_writel(edma, le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST(ch));
edma_writew(edma, le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER(ch));
edma_writew(edma, le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER(ch));
edma_writew(edma, le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF(ch));
edma_writel(edma, le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA(ch));
edma_writew(edma, le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR(ch));
}
static inline
void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
u16 biter, u16 doff, u32 dlast_sga, bool major_int,
bool disable_req, bool enable_sg)
{
u16 csr = 0;
/*
* eDMA hardware SGs require the TCDs to be stored in little
* endian format irrespective of the register endian model.
* So we put the value in little endian in memory, waiting
* for fsl_edma_set_tcd_regs doing the swap.
*/
tcd->saddr = cpu_to_le32(src);
tcd->daddr = cpu_to_le32(dst);
tcd->attr = cpu_to_le16(attr);
tcd->soff = cpu_to_le16(EDMA_TCD_SOFF_SOFF(soff));
tcd->nbytes = cpu_to_le32(EDMA_TCD_NBYTES_NBYTES(nbytes));
tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast));
tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
tcd->doff = cpu_to_le16(EDMA_TCD_DOFF_DOFF(doff));
tcd->dlast_sga = cpu_to_le32(EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga));
tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
if (major_int)
csr |= EDMA_TCD_CSR_INT_MAJOR;
if (disable_req)
csr |= EDMA_TCD_CSR_D_REQ;
if (enable_sg)
csr |= EDMA_TCD_CSR_E_SG;
tcd->csr = cpu_to_le16(csr);
}
static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
int sg_len)
{
struct fsl_edma_desc *fsl_desc;
int i;
fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma_sw_tcd) * sg_len,
GFP_NOWAIT);
if (!fsl_desc)
return NULL;
fsl_desc->echan = fsl_chan;
fsl_desc->n_tcds = sg_len;
for (i = 0; i < sg_len; i++) {
fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
if (!fsl_desc->tcd[i].vtcd)
goto err;
}
return fsl_desc;
err:
while (--i >= 0)
dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
fsl_desc->tcd[i].ptcd);
kfree(fsl_desc);
return NULL;
}
static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct fsl_edma_desc *fsl_desc;
dma_addr_t dma_buf_next;
int sg_len, i;
u32 src_addr, dst_addr, last_sg, nbytes;
u16 soff, doff, iter;
if (!is_slave_direction(fsl_chan->fsc.dir))
return NULL;
sg_len = buf_len / period_len;
fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
if (!fsl_desc)
return NULL;
fsl_desc->iscyclic = true;
dma_buf_next = dma_addr;
nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
iter = period_len / nbytes;
for (i = 0; i < sg_len; i++) {
if (dma_buf_next >= dma_addr + buf_len)
dma_buf_next = dma_addr;
/* get next sg's physical address */
last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
src_addr = dma_buf_next;
dst_addr = fsl_chan->fsc.dev_addr;
soff = fsl_chan->fsc.addr_width;
doff = 0;
} else {
src_addr = fsl_chan->fsc.dev_addr;
dst_addr = dma_buf_next;
soff = 0;
doff = fsl_chan->fsc.addr_width;
}
fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
fsl_chan->fsc.attr, soff, nbytes, 0, iter,
iter, doff, last_sg, true, false, true);
dma_buf_next += period_len;
}
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
}
static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags, void *context)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct fsl_edma_desc *fsl_desc;
struct scatterlist *sg;
u32 src_addr, dst_addr, last_sg, nbytes;
u16 soff, doff, iter;
int i;
if (!is_slave_direction(fsl_chan->fsc.dir))
return NULL;
fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
if (!fsl_desc)
return NULL;
fsl_desc->iscyclic = false;
nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
for_each_sg(sgl, sg, sg_len, i) {
/* get next sg's physical address */
last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
src_addr = sg_dma_address(sg);
dst_addr = fsl_chan->fsc.dev_addr;
soff = fsl_chan->fsc.addr_width;
doff = 0;
} else {
src_addr = fsl_chan->fsc.dev_addr;
dst_addr = sg_dma_address(sg);
soff = 0;
doff = fsl_chan->fsc.addr_width;
}
iter = sg_dma_len(sg) / nbytes;
if (i < sg_len - 1) {
last_sg = fsl_desc->tcd[(i + 1)].ptcd;
fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
dst_addr, fsl_chan->fsc.attr, soff,
nbytes, 0, iter, iter, doff, last_sg,
false, false, true);
} else {
last_sg = 0;
fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
dst_addr, fsl_chan->fsc.attr, soff,
nbytes, 0, iter, iter, doff, last_sg,
true, true, false);
}
}
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
}
static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
{
struct virt_dma_desc *vdesc;
vdesc = vchan_next_desc(&fsl_chan->vchan);
if (!vdesc)
return;
fsl_chan->edesc = to_fsl_edma_desc(vdesc);
fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
fsl_edma_enable_request(fsl_chan);
fsl_chan->status = DMA_IN_PROGRESS;
fsl_chan->idle = false;
}
#include "fsl-edma-common.h"
static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *fsl_edma = dev_id;
unsigned int intr, ch;
void __iomem *base_addr;
struct edma_regs *regs = &fsl_edma->regs;
struct fsl_edma_chan *fsl_chan;
base_addr = fsl_edma->membase;
intr = edma_readl(fsl_edma, base_addr + EDMA_INTR);
intr = edma_readl(fsl_edma, regs->intl);
if (!intr)
return IRQ_NONE;
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
if (intr & (0x1 << ch)) {
edma_writeb(fsl_edma, EDMA_CINT_CINT(ch),
base_addr + EDMA_CINT);
edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint);
fsl_chan = &fsl_edma->chans[ch];
@ -705,16 +65,16 @@ static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *fsl_edma = dev_id;
unsigned int err, ch;
struct edma_regs *regs = &fsl_edma->regs;
err = edma_readl(fsl_edma, fsl_edma->membase + EDMA_ERR);
err = edma_readl(fsl_edma, regs->errl);
if (!err)
return IRQ_NONE;
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
if (err & (0x1 << ch)) {
fsl_edma_disable_request(&fsl_edma->chans[ch]);
edma_writeb(fsl_edma, EDMA_CERR_CERR(ch),
fsl_edma->membase + EDMA_CERR);
edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr);
fsl_edma->chans[ch].status = DMA_ERROR;
fsl_edma->chans[ch].idle = true;
}
@ -730,25 +90,6 @@ static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
return fsl_edma_err_handler(irq, dev_id);
}
static void fsl_edma_issue_pending(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
unsigned long flags;
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
if (unlikely(fsl_chan->pm_state != RUNNING)) {
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
/* cannot submit due to suspend */
return;
}
if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
fsl_edma_xfer_desc(fsl_chan);
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
}
static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
@ -781,34 +122,6 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
return NULL;
}
static int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
sizeof(struct fsl_edma_hw_tcd),
32, 0);
return 0;
}
static void fsl_edma_free_chan_resources(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
fsl_edma_disable_request(fsl_chan);
fsl_edma_chan_mux(fsl_chan, 0, false);
fsl_chan->edesc = NULL;
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
dma_pool_destroy(fsl_chan->tcd_pool);
fsl_chan->tcd_pool = NULL;
}
static int
fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
@ -876,6 +189,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct fsl_edma_engine *fsl_edma;
struct fsl_edma_chan *fsl_chan;
struct edma_regs *regs;
struct resource *res;
int len, chans;
int ret, i;
@ -891,6 +205,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (!fsl_edma)
return -ENOMEM;
fsl_edma->version = v1;
fsl_edma->n_chans = chans;
mutex_init(&fsl_edma->fsl_edma_mutex);
@ -899,6 +214,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (IS_ERR(fsl_edma->membase))
return PTR_ERR(fsl_edma->membase);
fsl_edma_setup_regs(fsl_edma);
regs = &fsl_edma->regs;
for (i = 0; i < DMAMUX_NR; i++) {
char clkname[32];
@ -939,11 +257,11 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_chan->vchan.desc_free = fsl_edma_free_desc;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr);
fsl_edma_chan_mux(fsl_chan, 0, false);
}
edma_writel(fsl_edma, ~0, fsl_edma->membase + EDMA_INTR);
edma_writel(fsl_edma, ~0, regs->intl);
ret = fsl_edma_irq_init(pdev, fsl_edma);
if (ret)
return ret;
@ -990,22 +308,11 @@ static int fsl_edma_probe(struct platform_device *pdev)
}
/* enable round robin arbitration */
edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, fsl_edma->membase + EDMA_CR);
edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0;
}
static void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
{
struct fsl_edma_chan *chan, *_chan;
list_for_each_entry_safe(chan, _chan,
&dmadev->channels, vchan.chan.device_node) {
list_del(&chan->vchan.chan.device_node);
tasklet_kill(&chan->vchan.task);
}
}
static int fsl_edma_remove(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@ -1048,18 +355,18 @@ static int fsl_edma_resume_early(struct device *dev)
{
struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
struct fsl_edma_chan *fsl_chan;
struct edma_regs *regs = &fsl_edma->regs;
int i;
for (i = 0; i < fsl_edma->n_chans; i++) {
fsl_chan = &fsl_edma->chans[i];
fsl_chan->pm_state = RUNNING;
edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr);
if (fsl_chan->slave_id != 0)
fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
}
edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA,
fsl_edma->membase + EDMA_CR);
edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0;
}

View file

@ -987,7 +987,7 @@ static void dma_do_tasklet(unsigned long data)
chan_dbg(chan, "tasklet entry\n");
spin_lock_bh(&chan->desc_lock);
spin_lock(&chan->desc_lock);
/* the hardware is now idle and ready for more */
chan->idle = true;
@ -995,7 +995,7 @@ static void dma_do_tasklet(unsigned long data)
/* Run all cleanup for descriptors which have been completed */
fsldma_cleanup_descriptors(chan);
spin_unlock_bh(&chan->desc_lock);
spin_unlock(&chan->desc_lock);
chan_dbg(chan, "tasklet exit\n");
}

View file

@ -348,10 +348,6 @@ static int hsu_dma_slave_config(struct dma_chan *chan,
{
struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
/* Check if chan will be configured for slave transfers */
if (!is_slave_direction(config->direction))
return -EINVAL;
memcpy(&hsuc->config, config, sizeof(hsuc->config));
return 0;

View file

@ -142,9 +142,8 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
{
struct idma64_chan *idma64c = &idma64->chan[c];
struct idma64_desc *desc;
unsigned long flags;
spin_lock_irqsave(&idma64c->vchan.lock, flags);
spin_lock(&idma64c->vchan.lock);
desc = idma64c->desc;
if (desc) {
if (status_err & (1 << c)) {
@ -161,7 +160,7 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
if (idma64c->desc == NULL || desc->status == DMA_ERROR)
idma64_stop_transfer(idma64c);
}
spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
spin_unlock(&idma64c->vchan.lock);
}
static irqreturn_t idma64_irq(int irq, void *dev)
@ -408,10 +407,6 @@ static int idma64_slave_config(struct dma_chan *chan,
{
struct idma64_chan *idma64c = to_idma64_chan(chan);
/* Check if chan will be configured for slave transfers */
if (!is_slave_direction(config->direction))
return -EINVAL;
memcpy(&idma64c->config, config, sizeof(idma64c->config));
convert_burst(&idma64c->config.src_maxburst);

View file

@ -162,6 +162,7 @@ struct imxdma_channel {
bool enabled_2d;
int slot_2d;
unsigned int irq;
struct dma_slave_config config;
};
enum imx_dma_type {
@ -675,14 +676,15 @@ static int imxdma_terminate_all(struct dma_chan *chan)
return 0;
}
static int imxdma_config(struct dma_chan *chan,
struct dma_slave_config *dmaengine_cfg)
static int imxdma_config_write(struct dma_chan *chan,
struct dma_slave_config *dmaengine_cfg,
enum dma_transfer_direction direction)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct imxdma_engine *imxdma = imxdmac->imxdma;
unsigned int mode = 0;
if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
if (direction == DMA_DEV_TO_MEM) {
imxdmac->per_address = dmaengine_cfg->src_addr;
imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
imxdmac->word_size = dmaengine_cfg->src_addr_width;
@ -723,6 +725,16 @@ static int imxdma_config(struct dma_chan *chan,
return 0;
}
static int imxdma_config(struct dma_chan *chan,
struct dma_slave_config *dmaengine_cfg)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
memcpy(&imxdmac->config, dmaengine_cfg, sizeof(*dmaengine_cfg));
return 0;
}
static enum dma_status imxdma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
@ -905,6 +917,8 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
desc->desc.callback = NULL;
desc->desc.callback_param = NULL;
imxdma_config_write(chan, &imxdmac->config, direction);
return &desc->desc;
}

View file

@ -129,7 +129,7 @@ static void
ioat_init_channel(struct ioatdma_device *ioat_dma,
struct ioatdma_chan *ioat_chan, int idx);
static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
static int ioat_dca_enabled = 1;
@ -575,7 +575,7 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
* ioat_enumerate_channels - find and initialize the device's channels
* @ioat_dma: the ioat dma device to be enumerated
*/
static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
{
struct ioatdma_chan *ioat_chan;
struct device *dev = &ioat_dma->pdev->dev;
@ -594,7 +594,7 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
xfercap_log &= 0x1f; /* bits [4:0] valid */
if (xfercap_log == 0)
return 0;
return;
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
for (i = 0; i < dma->chancnt; i++) {
@ -611,7 +611,6 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
}
}
dma->chancnt = i;
return i;
}
/**
@ -1205,8 +1204,15 @@ static void ioat_shutdown(struct pci_dev *pdev)
spin_lock_bh(&ioat_chan->prep_lock);
set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
del_timer_sync(&ioat_chan->timer);
spin_unlock_bh(&ioat_chan->prep_lock);
/*
* Synchronization rule for del_timer_sync():
* - The caller must not hold locks which would prevent
* completion of the timer's handler.
* So prep_lock cannot be held before calling it.
*/
del_timer_sync(&ioat_chan->timer);
/* this should quiesce then reset */
ioat_reset_hw(ioat_chan);
}

View file

@ -87,10 +87,10 @@ struct k3_dma_chan {
struct virt_dma_chan vc;
struct k3_dma_phy *phy;
struct list_head node;
enum dma_transfer_direction dir;
dma_addr_t dev_addr;
enum dma_status status;
bool cyclic;
struct dma_slave_config slave_config;
};
struct k3_dma_phy {
@ -118,6 +118,10 @@ struct k3_dma_dev {
#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
static int k3_dma_config_write(struct dma_chan *chan,
enum dma_transfer_direction dir,
struct dma_slave_config *cfg);
static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
{
return container_of(chan, struct k3_dma_chan, vc.chan);
@ -501,14 +505,8 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
copy = min_t(size_t, len, DMA_MAX_SIZE);
k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
if (c->dir == DMA_MEM_TO_DEV) {
src += copy;
} else if (c->dir == DMA_DEV_TO_MEM) {
dst += copy;
} else {
src += copy;
dst += copy;
}
src += copy;
dst += copy;
len -= copy;
} while (len);
@ -542,6 +540,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
if (!ds)
return NULL;
num = 0;
k3_dma_config_write(chan, dir, &c->slave_config);
for_each_sg(sgl, sg, sglen, i) {
addr = sg_dma_address(sg);
@ -602,6 +601,7 @@ k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
avail = buf_len;
total = avail;
num = 0;
k3_dma_config_write(chan, dir, &c->slave_config);
if (period_len < modulo)
modulo = period_len;
@ -642,18 +642,26 @@ static int k3_dma_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
struct k3_dma_chan *c = to_k3_chan(chan);
memcpy(&c->slave_config, cfg, sizeof(*cfg));
return 0;
}
static int k3_dma_config_write(struct dma_chan *chan,
enum dma_transfer_direction dir,
struct dma_slave_config *cfg)
{
struct k3_dma_chan *c = to_k3_chan(chan);
u32 maxburst = 0, val = 0;
enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
if (cfg == NULL)
return -EINVAL;
c->dir = cfg->direction;
if (c->dir == DMA_DEV_TO_MEM) {
if (dir == DMA_DEV_TO_MEM) {
c->ccfg = CX_CFG_DSTINCR;
c->dev_addr = cfg->src_addr;
maxburst = cfg->src_maxburst;
width = cfg->src_addr_width;
} else if (c->dir == DMA_MEM_TO_DEV) {
} else if (dir == DMA_MEM_TO_DEV) {
c->ccfg = CX_CFG_SRCINCR;
c->dev_addr = cfg->dst_addr;
maxburst = cfg->dst_maxburst;

317
drivers/dma/mcf-edma.c Normal file
View file

@ -0,0 +1,317 @@
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
// Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/platform_device.h>
#include <linux/platform_data/dma-mcf-edma.h>
#include "fsl-edma-common.h"
#define EDMA_CHANNELS 64
#define EDMA_MASK_CH(x) ((x) & GENMASK(5, 0))
static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *mcf_edma = dev_id;
struct edma_regs *regs = &mcf_edma->regs;
unsigned int ch;
struct fsl_edma_chan *mcf_chan;
u64 intmap;
intmap = ioread32(regs->inth);
intmap <<= 32;
intmap |= ioread32(regs->intl);
if (!intmap)
return IRQ_NONE;
for (ch = 0; ch < mcf_edma->n_chans; ch++) {
if (intmap & BIT(ch)) {
iowrite8(EDMA_MASK_CH(ch), regs->cint);
mcf_chan = &mcf_edma->chans[ch];
spin_lock(&mcf_chan->vchan.lock);
if (!mcf_chan->edesc->iscyclic) {
list_del(&mcf_chan->edesc->vdesc.node);
vchan_cookie_complete(&mcf_chan->edesc->vdesc);
mcf_chan->edesc = NULL;
mcf_chan->status = DMA_COMPLETE;
mcf_chan->idle = true;
} else {
vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
}
if (!mcf_chan->edesc)
fsl_edma_xfer_desc(mcf_chan);
spin_unlock(&mcf_chan->vchan.lock);
}
}
return IRQ_HANDLED;
}
static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *mcf_edma = dev_id;
struct edma_regs *regs = &mcf_edma->regs;
unsigned int err, ch;
err = ioread32(regs->errl);
if (!err)
return IRQ_NONE;
for (ch = 0; ch < (EDMA_CHANNELS / 2); ch++) {
if (err & BIT(ch)) {
fsl_edma_disable_request(&mcf_edma->chans[ch]);
iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
mcf_edma->chans[ch].status = DMA_ERROR;
mcf_edma->chans[ch].idle = true;
}
}
err = ioread32(regs->errh);
if (!err)
return IRQ_NONE;
for (ch = (EDMA_CHANNELS / 2); ch < EDMA_CHANNELS; ch++) {
if (err & (BIT(ch - (EDMA_CHANNELS / 2)))) {
fsl_edma_disable_request(&mcf_edma->chans[ch]);
iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
mcf_edma->chans[ch].status = DMA_ERROR;
mcf_edma->chans[ch].idle = true;
}
}
return IRQ_HANDLED;
}
static int mcf_edma_irq_init(struct platform_device *pdev,
struct fsl_edma_engine *mcf_edma)
{
int ret = 0, i;
struct resource *res;
res = platform_get_resource_byname(pdev,
IORESOURCE_IRQ, "edma-tx-00-15");
if (!res)
return -1;
for (ret = 0, i = res->start; i <= res->end; ++i)
ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
if (ret)
return ret;
res = platform_get_resource_byname(pdev,
IORESOURCE_IRQ, "edma-tx-16-55");
if (!res)
return -1;
for (ret = 0, i = res->start; i <= res->end; ++i)
ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
if (ret)
return ret;
ret = platform_get_irq_byname(pdev, "edma-tx-56-63");
if (ret != -ENXIO) {
ret = request_irq(ret, mcf_edma_tx_handler,
0, "eDMA", mcf_edma);
if (ret)
return ret;
}
ret = platform_get_irq_byname(pdev, "edma-err");
if (ret != -ENXIO) {
ret = request_irq(ret, mcf_edma_err_handler,
0, "eDMA", mcf_edma);
if (ret)
return ret;
}
return 0;
}
static void mcf_edma_irq_free(struct platform_device *pdev,
struct fsl_edma_engine *mcf_edma)
{
int irq;
struct resource *res;
res = platform_get_resource_byname(pdev,
IORESOURCE_IRQ, "edma-tx-00-15");
if (res) {
for (irq = res->start; irq <= res->end; irq++)
free_irq(irq, mcf_edma);
}
res = platform_get_resource_byname(pdev,
IORESOURCE_IRQ, "edma-tx-16-55");
if (res) {
for (irq = res->start; irq <= res->end; irq++)
free_irq(irq, mcf_edma);
}
irq = platform_get_irq_byname(pdev, "edma-tx-56-63");
if (irq != -ENXIO)
free_irq(irq, mcf_edma);
irq = platform_get_irq_byname(pdev, "edma-err");
if (irq != -ENXIO)
free_irq(irq, mcf_edma);
}
static int mcf_edma_probe(struct platform_device *pdev)
{
struct mcf_edma_platform_data *pdata;
struct fsl_edma_engine *mcf_edma;
struct fsl_edma_chan *mcf_chan;
struct edma_regs *regs;
struct resource *res;
int ret, i, len, chans;
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data supplied\n");
return -EINVAL;
}
chans = pdata->dma_channels;
len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
if (!mcf_edma)
return -ENOMEM;
mcf_edma->n_chans = chans;
/* Set up version for ColdFire edma */
mcf_edma->version = v2;
mcf_edma->big_endian = 1;
if (!mcf_edma->n_chans) {
dev_info(&pdev->dev, "setting default channel number to 64");
mcf_edma->n_chans = 64;
}
mutex_init(&mcf_edma->fsl_edma_mutex);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mcf_edma->membase = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mcf_edma->membase))
return PTR_ERR(mcf_edma->membase);
fsl_edma_setup_regs(mcf_edma);
regs = &mcf_edma->regs;
INIT_LIST_HEAD(&mcf_edma->dma_dev.channels);
for (i = 0; i < mcf_edma->n_chans; i++) {
struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i];
mcf_chan->edma = mcf_edma;
mcf_chan->slave_id = i;
mcf_chan->idle = true;
mcf_chan->vchan.desc_free = fsl_edma_free_desc;
vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
iowrite32(0x0, &regs->tcd[i].csr);
}
iowrite32(~0, regs->inth);
iowrite32(~0, regs->intl);
ret = mcf_edma_irq_init(pdev, mcf_edma);
if (ret)
return ret;
dma_cap_set(DMA_PRIVATE, mcf_edma->dma_dev.cap_mask);
dma_cap_set(DMA_SLAVE, mcf_edma->dma_dev.cap_mask);
dma_cap_set(DMA_CYCLIC, mcf_edma->dma_dev.cap_mask);
mcf_edma->dma_dev.dev = &pdev->dev;
mcf_edma->dma_dev.device_alloc_chan_resources =
fsl_edma_alloc_chan_resources;
mcf_edma->dma_dev.device_free_chan_resources =
fsl_edma_free_chan_resources;
mcf_edma->dma_dev.device_config = fsl_edma_slave_config;
mcf_edma->dma_dev.device_prep_dma_cyclic =
fsl_edma_prep_dma_cyclic;
mcf_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
mcf_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
mcf_edma->dma_dev.device_pause = fsl_edma_pause;
mcf_edma->dma_dev.device_resume = fsl_edma_resume;
mcf_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
mcf_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
mcf_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
mcf_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
mcf_edma->dma_dev.directions =
BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
mcf_edma->dma_dev.filter.fn = mcf_edma_filter_fn;
mcf_edma->dma_dev.filter.map = pdata->slave_map;
mcf_edma->dma_dev.filter.mapcnt = pdata->slavecnt;
platform_set_drvdata(pdev, mcf_edma);
ret = dma_async_device_register(&mcf_edma->dma_dev);
if (ret) {
dev_err(&pdev->dev,
"Can't register Freescale eDMA engine. (%d)\n", ret);
return ret;
}
/* Enable round robin arbitration */
iowrite32(EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0;
}
static int mcf_edma_remove(struct platform_device *pdev)
{
struct fsl_edma_engine *mcf_edma = platform_get_drvdata(pdev);
mcf_edma_irq_free(pdev, mcf_edma);
fsl_edma_cleanup_vchan(&mcf_edma->dma_dev);
dma_async_device_unregister(&mcf_edma->dma_dev);
return 0;
}
static struct platform_driver mcf_edma_driver = {
.driver = {
.name = "mcf-edma",
},
.probe = mcf_edma_probe,
.remove = mcf_edma_remove,
};
bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
{
if (chan->device->dev->driver == &mcf_edma_driver.driver) {
struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan);
return (mcf_chan->slave_id == (uintptr_t)param);
}
return false;
}
EXPORT_SYMBOL(mcf_edma_filter_fn);
static int __init mcf_edma_init(void)
{
return platform_driver_register(&mcf_edma_driver);
}
subsys_initcall(mcf_edma_init);
static void __exit mcf_edma_exit(void)
{
platform_driver_unregister(&mcf_edma_driver);
}
module_exit(mcf_edma_exit);
MODULE_ALIAS("platform:mcf-edma");
MODULE_DESCRIPTION("Freescale eDMA engine driver, ColdFire family");
MODULE_LICENSE("GPL v2");

View file

@ -116,6 +116,7 @@ struct mmp_tdma_chan {
u32 burst_sz;
enum dma_slave_buswidth buswidth;
enum dma_status status;
struct dma_slave_config slave_config;
int idx;
enum mmp_tdma_type type;
@ -139,6 +140,10 @@ struct mmp_tdma_device {
#define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan)
static int mmp_tdma_config_write(struct dma_chan *chan,
enum dma_transfer_direction dir,
struct dma_slave_config *dmaengine_cfg);
static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys)
{
writel(phys, tdmac->reg_base + TDNDPR);
@ -442,6 +447,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
if (!desc)
goto err_out;
mmp_tdma_config_write(chan, direction, &tdmac->slave_config);
while (buf < buf_len) {
desc = &tdmac->desc_arr[i];
@ -495,7 +502,18 @@ static int mmp_tdma_config(struct dma_chan *chan,
{
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
memcpy(&tdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
return 0;
}
static int mmp_tdma_config_write(struct dma_chan *chan,
enum dma_transfer_direction dir,
struct dma_slave_config *dmaengine_cfg)
{
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
if (dir == DMA_DEV_TO_MEM) {
tdmac->dev_addr = dmaengine_cfg->src_addr;
tdmac->burst_sz = dmaengine_cfg->src_maxburst;
tdmac->buswidth = dmaengine_cfg->src_addr_width;
@ -504,7 +522,7 @@ static int mmp_tdma_config(struct dma_chan *chan,
tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
tdmac->buswidth = dmaengine_cfg->dst_addr_width;
}
tdmac->dir = dmaengine_cfg->direction;
tdmac->dir = dir;
return mmp_tdma_config_chan(chan);
}
@ -530,9 +548,6 @@ static void mmp_tdma_issue_pending(struct dma_chan *chan)
static int mmp_tdma_remove(struct platform_device *pdev)
{
struct mmp_tdma_device *tdev = platform_get_drvdata(pdev);
dma_async_device_unregister(&tdev->device);
return 0;
}
@ -696,7 +711,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
platform_set_drvdata(pdev, tdev);
ret = dma_async_device_register(&tdev->device);
ret = dmaenginem_async_device_register(&tdev->device);
if (ret) {
dev_err(tdev->device.dev, "unable to register\n");
return ret;
@ -708,7 +723,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
if (ret) {
dev_err(tdev->device.dev,
"failed to register controller\n");
dma_async_device_unregister(&tdev->device);
return ret;
}
}

View file

@ -348,9 +348,9 @@ static void mv_xor_tasklet(unsigned long data)
{
struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
spin_lock_bh(&chan->lock);
spin_lock(&chan->lock);
mv_chan_slot_cleanup(chan);
spin_unlock_bh(&chan->lock);
spin_unlock(&chan->lock);
}
static struct mv_xor_desc_slot *

View file

@ -847,7 +847,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan;
ret = dma_async_device_register(&mxs_dma->dma_device);
ret = dmaenginem_async_device_register(&mxs_dma->dma_device);
if (ret) {
dev_err(mxs_dma->dma_device.dev, "unable to register\n");
return ret;
@ -857,7 +857,6 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
if (ret) {
dev_err(mxs_dma->dma_device.dev,
"failed to register controller\n");
dma_async_device_unregister(&mxs_dma->dma_device);
}
dev_info(mxs_dma->dma_device.dev, "initialized\n");

View file

@ -1,10 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
* Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*/
#include <linux/bitmap.h>
@ -1095,8 +1092,8 @@ static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
if (!dchan)
return NULL;
dev_dbg(dchan->device->dev, "Entry %s(%s)\n", __func__,
dma_spec->np->name);
dev_dbg(dchan->device->dev, "Entry %s(%pOFn)\n", __func__,
dma_spec->np);
chan = nbpf_to_chan(dchan);

View file

@ -21,6 +21,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/slab.h>
#include "virt-dma.h"
@ -161,10 +162,12 @@ struct owl_dma_lli {
* struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor
* @vd: virtual DMA descriptor
* @lli_list: link list of lli nodes
* @cyclic: flag to indicate cyclic transfers
*/
struct owl_dma_txd {
struct virt_dma_desc vd;
struct list_head lli_list;
bool cyclic;
};
/**
@ -186,11 +189,15 @@ struct owl_dma_pchan {
* @vc: wrappped virtual channel
* @pchan: the physical channel utilized by this channel
* @txd: active transaction on this channel
* @cfg: slave configuration for this channel
* @drq: physical DMA request ID for this channel
*/
struct owl_dma_vchan {
struct virt_dma_chan vc;
struct owl_dma_pchan *pchan;
struct owl_dma_txd *txd;
struct dma_slave_config cfg;
u8 drq;
};
/**
@ -200,6 +207,7 @@ struct owl_dma_vchan {
* @clk: clock for the DMA controller
* @lock: a lock to use when change DMA controller global register
* @lli_pool: a pool for the LLI descriptors
* @irq: interrupt ID for the DMA controller
* @nr_pchans: the number of physical channels
* @pchans: array of data for the physical channels
* @nr_vchans: the number of physical channels
@ -336,9 +344,11 @@ static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od)
static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
struct owl_dma_lli *prev,
struct owl_dma_lli *next)
struct owl_dma_lli *next,
bool is_cyclic)
{
list_add_tail(&next->node, &txd->lli_list);
if (!is_cyclic)
list_add_tail(&next->node, &txd->lli_list);
if (prev) {
prev->hw.next_lli = next->phys;
@ -351,7 +361,9 @@ static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
struct owl_dma_lli *lli,
dma_addr_t src, dma_addr_t dst,
u32 len, enum dma_transfer_direction dir)
u32 len, enum dma_transfer_direction dir,
struct dma_slave_config *sconfig,
bool is_cyclic)
{
struct owl_dma_lli_hw *hw = &lli->hw;
u32 mode;
@ -364,6 +376,32 @@ static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
OWL_DMA_MODE_DT_DCU | OWL_DMA_MODE_SAM_INC |
OWL_DMA_MODE_DAM_INC;
break;
case DMA_MEM_TO_DEV:
mode |= OWL_DMA_MODE_TS(vchan->drq)
| OWL_DMA_MODE_ST_DCU | OWL_DMA_MODE_DT_DEV
| OWL_DMA_MODE_SAM_INC | OWL_DMA_MODE_DAM_CONST;
/*
* Hardware only supports 32bit and 8bit buswidth. Since the
* default is 32bit, select 8bit only when requested.
*/
if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
mode |= OWL_DMA_MODE_NDDBW_8BIT;
break;
case DMA_DEV_TO_MEM:
mode |= OWL_DMA_MODE_TS(vchan->drq)
| OWL_DMA_MODE_ST_DEV | OWL_DMA_MODE_DT_DCU
| OWL_DMA_MODE_SAM_CONST | OWL_DMA_MODE_DAM_INC;
/*
* Hardware only supports 32bit and 8bit buswidth. Since the
* default is 32bit, select 8bit only when requested.
*/
if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
mode |= OWL_DMA_MODE_NDDBW_8BIT;
break;
default:
return -EINVAL;
@ -381,7 +419,10 @@ static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
OWL_DMA_LLC_SAV_LOAD_NEXT |
OWL_DMA_LLC_DAV_LOAD_NEXT);
hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK);
if (is_cyclic)
hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_BLOCK);
else
hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK);
return 0;
}
@ -443,6 +484,16 @@ static void owl_dma_terminate_pchan(struct owl_dma *od,
spin_unlock_irqrestore(&od->lock, flags);
}
static void owl_dma_pause_pchan(struct owl_dma_pchan *pchan)
{
pchan_writel(pchan, 1, OWL_DMAX_PAUSE);
}
static void owl_dma_resume_pchan(struct owl_dma_pchan *pchan)
{
pchan_writel(pchan, 0, OWL_DMAX_PAUSE);
}
static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
{
struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
@ -464,7 +515,10 @@ static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
lli = list_first_entry(&txd->lli_list,
struct owl_dma_lli, node);
int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK;
if (txd->cyclic)
int_ctl = OWL_DMA_INTCTL_BLOCK;
else
int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK;
pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME);
pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL,
@ -627,6 +681,54 @@ static int owl_dma_terminate_all(struct dma_chan *chan)
return 0;
}
static int owl_dma_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct owl_dma_vchan *vchan = to_owl_vchan(chan);
/* Reject definitely invalid configurations */
if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
return -EINVAL;
memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config));
return 0;
}
static int owl_dma_pause(struct dma_chan *chan)
{
struct owl_dma_vchan *vchan = to_owl_vchan(chan);
unsigned long flags;
spin_lock_irqsave(&vchan->vc.lock, flags);
owl_dma_pause_pchan(vchan->pchan);
spin_unlock_irqrestore(&vchan->vc.lock, flags);
return 0;
}
static int owl_dma_resume(struct dma_chan *chan)
{
struct owl_dma_vchan *vchan = to_owl_vchan(chan);
unsigned long flags;
if (!vchan->pchan && !vchan->txd)
return 0;
dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
spin_lock_irqsave(&vchan->vc.lock, flags);
owl_dma_resume_pchan(vchan->pchan);
spin_unlock_irqrestore(&vchan->vc.lock, flags);
return 0;
}
static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan)
{
struct owl_dma_pchan *pchan;
@ -754,13 +856,14 @@ static struct dma_async_tx_descriptor
bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH);
ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset,
bytes, DMA_MEM_TO_MEM);
bytes, DMA_MEM_TO_MEM,
&vchan->cfg, txd->cyclic);
if (ret) {
dev_warn(chan2dev(chan), "failed to config lli\n");
goto err_txd_free;
}
prev = owl_dma_add_lli(txd, prev, lli);
prev = owl_dma_add_lli(txd, prev, lli, false);
}
return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
@ -770,6 +873,133 @@ err_txd_free:
return NULL;
}
static struct dma_async_tx_descriptor
*owl_dma_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl,
unsigned int sg_len,
enum dma_transfer_direction dir,
unsigned long flags, void *context)
{
struct owl_dma *od = to_owl_dma(chan->device);
struct owl_dma_vchan *vchan = to_owl_vchan(chan);
struct dma_slave_config *sconfig = &vchan->cfg;
struct owl_dma_txd *txd;
struct owl_dma_lli *lli, *prev = NULL;
struct scatterlist *sg;
dma_addr_t addr, src = 0, dst = 0;
size_t len;
int ret, i;
txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
if (!txd)
return NULL;
INIT_LIST_HEAD(&txd->lli_list);
for_each_sg(sgl, sg, sg_len, i) {
addr = sg_dma_address(sg);
len = sg_dma_len(sg);
if (len > OWL_DMA_FRAME_MAX_LENGTH) {
dev_err(od->dma.dev,
"frame length exceeds max supported length");
goto err_txd_free;
}
lli = owl_dma_alloc_lli(od);
if (!lli) {
dev_err(chan2dev(chan), "failed to allocate lli");
goto err_txd_free;
}
if (dir == DMA_MEM_TO_DEV) {
src = addr;
dst = sconfig->dst_addr;
} else {
src = sconfig->src_addr;
dst = addr;
}
ret = owl_dma_cfg_lli(vchan, lli, src, dst, len, dir, sconfig,
txd->cyclic);
if (ret) {
dev_warn(chan2dev(chan), "failed to config lli");
goto err_txd_free;
}
prev = owl_dma_add_lli(txd, prev, lli, false);
}
return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
err_txd_free:
owl_dma_free_txd(od, txd);
return NULL;
}
static struct dma_async_tx_descriptor
*owl_prep_dma_cyclic(struct dma_chan *chan,
dma_addr_t buf_addr, size_t buf_len,
size_t period_len,
enum dma_transfer_direction dir,
unsigned long flags)
{
struct owl_dma *od = to_owl_dma(chan->device);
struct owl_dma_vchan *vchan = to_owl_vchan(chan);
struct dma_slave_config *sconfig = &vchan->cfg;
struct owl_dma_txd *txd;
struct owl_dma_lli *lli, *prev = NULL, *first = NULL;
dma_addr_t src = 0, dst = 0;
unsigned int periods = buf_len / period_len;
int ret, i;
txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
if (!txd)
return NULL;
INIT_LIST_HEAD(&txd->lli_list);
txd->cyclic = true;
for (i = 0; i < periods; i++) {
lli = owl_dma_alloc_lli(od);
if (!lli) {
dev_warn(chan2dev(chan), "failed to allocate lli");
goto err_txd_free;
}
if (dir == DMA_MEM_TO_DEV) {
src = buf_addr + (period_len * i);
dst = sconfig->dst_addr;
} else if (dir == DMA_DEV_TO_MEM) {
src = sconfig->src_addr;
dst = buf_addr + (period_len * i);
}
ret = owl_dma_cfg_lli(vchan, lli, src, dst, period_len,
dir, sconfig, txd->cyclic);
if (ret) {
dev_warn(chan2dev(chan), "failed to config lli");
goto err_txd_free;
}
if (!first)
first = lli;
prev = owl_dma_add_lli(txd, prev, lli, false);
}
/* close the cyclic list */
owl_dma_add_lli(txd, prev, first, true);
return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
err_txd_free:
owl_dma_free_txd(od, txd);
return NULL;
}
static void owl_dma_free_chan_resources(struct dma_chan *chan)
{
struct owl_dma_vchan *vchan = to_owl_vchan(chan);
@ -790,6 +1020,27 @@ static inline void owl_dma_free(struct owl_dma *od)
}
}
static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct owl_dma *od = ofdma->of_dma_data;
struct owl_dma_vchan *vchan;
struct dma_chan *chan;
u8 drq = dma_spec->args[0];
if (drq > od->nr_vchans)
return NULL;
chan = dma_get_any_slave_channel(&od->dma);
if (!chan)
return NULL;
vchan = to_owl_vchan(chan);
vchan->drq = drq;
return chan;
}
static int owl_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@ -833,12 +1084,19 @@ static int owl_dma_probe(struct platform_device *pdev)
spin_lock_init(&od->lock);
dma_cap_set(DMA_MEMCPY, od->dma.cap_mask);
dma_cap_set(DMA_SLAVE, od->dma.cap_mask);
dma_cap_set(DMA_CYCLIC, od->dma.cap_mask);
od->dma.dev = &pdev->dev;
od->dma.device_free_chan_resources = owl_dma_free_chan_resources;
od->dma.device_tx_status = owl_dma_tx_status;
od->dma.device_issue_pending = owl_dma_issue_pending;
od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy;
od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg;
od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic;
od->dma.device_config = owl_dma_config;
od->dma.device_pause = owl_dma_pause;
od->dma.device_resume = owl_dma_resume;
od->dma.device_terminate_all = owl_dma_terminate_all;
od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
@ -910,8 +1168,18 @@ static int owl_dma_probe(struct platform_device *pdev)
goto err_pool_free;
}
/* Device-tree DMA controller registration */
ret = of_dma_controller_register(pdev->dev.of_node,
owl_dma_of_xlate, od);
if (ret) {
dev_err(&pdev->dev, "of_dma_controller_register failed\n");
goto err_dma_unregister;
}
return 0;
err_dma_unregister:
dma_async_device_unregister(&od->dma);
err_pool_free:
clk_disable_unprepare(od->clk);
dma_pool_destroy(od->lli_pool);
@ -923,6 +1191,7 @@ static int owl_dma_remove(struct platform_device *pdev)
{
struct owl_dma *od = platform_get_drvdata(pdev);
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&od->dma);
/* Mask all interrupts for this execution environment */

View file

@ -4360,7 +4360,7 @@ static ssize_t enable_store(struct device_driver *dev, const char *buf,
}
static DRIVER_ATTR_RW(enable);
static ssize_t poly_store(struct device_driver *dev, char *buf)
static ssize_t poly_show(struct device_driver *dev, char *buf)
{
ssize_t size = 0;
u32 reg;

View file

@ -1285,7 +1285,6 @@ static int pxad_remove(struct platform_device *op)
pxad_cleanup_debugfs(pdev);
pxad_free_channels(&pdev->slave);
dma_async_device_unregister(&pdev->slave);
return 0;
}
@ -1396,7 +1395,7 @@ static int pxad_init_dmadev(struct platform_device *op,
init_waitqueue_head(&c->wq_state);
}
return dma_async_device_register(&pdev->slave);
return dmaenginem_async_device_register(&pdev->slave);
}
static int pxad_probe(struct platform_device *op)
@ -1433,7 +1432,7 @@ static int pxad_probe(struct platform_device *op)
"#dma-requests set to default 32 as missing in OF: %d",
ret);
nb_requestors = 32;
};
}
} else if (pdata && pdata->dma_channels) {
dma_channels = pdata->dma_channels;
nb_requestors = pdata->nb_requestors;

View file

@ -198,6 +198,7 @@ struct rcar_dmac {
struct dma_device engine;
struct device *dev;
void __iomem *iomem;
struct device_dma_parameters parms;
unsigned int n_channels;
struct rcar_dmac_chan *channels;
@ -1792,6 +1793,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac);
dmac->dev->dma_parms = &dmac->parms;
dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
ret = rcar_dmac_parse_of(&pdev->dev, dmac);

View file

@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Renesas SuperH DMA Engine support
*
* Copyright (C) 2013 Renesas Electronics, Inc.
*
* This is free software; you can redistribute it and/or modify it under the
* terms of version 2 the GNU General Public License as published by the Free
* Software Foundation.
*/
#ifndef SHDMA_ARM_H

View file

@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Dmaengine driver base library for DMA controllers, found on SH-based SoCs
*
@ -7,10 +8,6 @@
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>

View file

@ -1,12 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
* SHDMA Device Tree glue
*
* Copyright (C) 2013 Renesas Electronics Inc.
* Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*
* This is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*/
#include <linux/dmaengine.h>

View file

@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas SuperH DMA Engine support for r8a73a4 (APE6) SoCs
*
* Copyright (C) 2013 Renesas Electronics, Inc.
*
* This is free software; you can redistribute it and/or modify it under the
* terms of version 2 the GNU General Public License as published by the Free
* Software Foundation.
*/
#include <linux/sh_dma.h>

View file

@ -1,14 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Renesas SuperH DMA Engine support
*
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#ifndef __DMA_SHDMA_H
#define __DMA_SHDMA_H

View file

@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Renesas SuperH DMA Engine support
*
@ -8,11 +9,6 @@
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* - DMA of SuperH does not have Hardware DMA chain mode.
* - MAX DMA size is 16MB.
*

View file

@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas SUDMAC support
*
@ -8,10 +9,6 @@
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*/
#include <linux/dmaengine.h>

View file

@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas USB DMA Controller Driver
*
@ -6,10 +7,6 @@
* based on rcar-dmac.c
* Copyright (C) 2014 Renesas Electronics Inc.
* Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
* This is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>

View file

@ -68,6 +68,7 @@
/* SPRD_DMA_CHN_CFG register definition */
#define SPRD_DMA_CHN_EN BIT(0)
#define SPRD_DMA_LINKLIST_EN BIT(4)
#define SPRD_DMA_WAIT_BDONE_OFFSET 24
#define SPRD_DMA_DONOT_WAIT_BDONE 1
@ -103,7 +104,7 @@
#define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
#define SPRD_DMA_FIX_SEL_OFFSET 21
#define SPRD_DMA_FIX_EN_OFFSET 20
#define SPRD_DMA_LLIST_END_OFFSET 19
#define SPRD_DMA_LLIST_END BIT(19)
#define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0)
/* SPRD_DMA_CHN_BLK_LEN register definition */
@ -164,6 +165,7 @@ struct sprd_dma_desc {
struct sprd_dma_chn {
struct virt_dma_chan vc;
void __iomem *chn_base;
struct sprd_dma_linklist linklist;
struct dma_slave_config slave_cfg;
u32 chn_num;
u32 dev_id;
@ -582,7 +584,8 @@ static int sprd_dma_get_step(enum dma_slave_buswidth buswidth)
}
static int sprd_dma_fill_desc(struct dma_chan *chan,
struct sprd_dma_desc *sdesc,
struct sprd_dma_chn_hw *hw,
unsigned int sglen, int sg_index,
dma_addr_t src, dma_addr_t dst, u32 len,
enum dma_transfer_direction dir,
unsigned long flags,
@ -590,7 +593,6 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
{
struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
u32 int_mode = flags & SPRD_DMA_INT_MASK;
int src_datawidth, dst_datawidth, src_step, dst_step;
@ -670,12 +672,52 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
temp |= (src_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
hw->trsf_step = temp;
/* link-list configuration */
if (schan->linklist.phy_addr) {
if (sg_index == sglen - 1)
hw->frg_len |= SPRD_DMA_LLIST_END;
hw->cfg |= SPRD_DMA_LINKLIST_EN;
/* link-list index */
temp = (sg_index + 1) % sglen;
/* Next link-list configuration's physical address offset */
temp = temp * sizeof(*hw) + SPRD_DMA_CHN_SRC_ADDR;
/*
* Set the link-list pointer point to next link-list
* configuration's physical address.
*/
hw->llist_ptr = schan->linklist.phy_addr + temp;
} else {
hw->llist_ptr = 0;
}
hw->frg_step = 0;
hw->src_blk_step = 0;
hw->des_blk_step = 0;
return 0;
}
static int sprd_dma_fill_linklist_desc(struct dma_chan *chan,
unsigned int sglen, int sg_index,
dma_addr_t src, dma_addr_t dst, u32 len,
enum dma_transfer_direction dir,
unsigned long flags,
struct dma_slave_config *slave_cfg)
{
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
struct sprd_dma_chn_hw *hw;
if (!schan->linklist.virt_addr)
return -EINVAL;
hw = (struct sprd_dma_chn_hw *)(schan->linklist.virt_addr +
sg_index * sizeof(*hw));
return sprd_dma_fill_desc(chan, hw, sglen, sg_index, src, dst, len,
dir, flags, slave_cfg);
}
static struct dma_async_tx_descriptor *
sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
@ -744,10 +786,20 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
u32 len = 0;
int ret, i;
/* TODO: now we only support one sg for each DMA configuration. */
if (!is_slave_direction(dir) || sglen > 1)
if (!is_slave_direction(dir))
return NULL;
if (context) {
struct sprd_dma_linklist *ll_cfg =
(struct sprd_dma_linklist *)context;
schan->linklist.phy_addr = ll_cfg->phy_addr;
schan->linklist.virt_addr = ll_cfg->virt_addr;
} else {
schan->linklist.phy_addr = 0;
schan->linklist.virt_addr = 0;
}
sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
if (!sdesc)
return NULL;
@ -762,10 +814,25 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
src = slave_cfg->src_addr;
dst = sg_dma_address(sg);
}
/*
* The link-list mode needs at least 2 link-list
* configurations. If there is only one sg, it doesn't
* need to fill the link-list configuration.
*/
if (sglen < 2)
break;
ret = sprd_dma_fill_linklist_desc(chan, sglen, i, src, dst, len,
dir, flags, slave_cfg);
if (ret) {
kfree(sdesc);
return NULL;
}
}
ret = sprd_dma_fill_desc(chan, sdesc, src, dst, len, dir, flags,
slave_cfg);
ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
dir, flags, slave_cfg);
if (ret) {
kfree(sdesc);
return NULL;

View file

@ -833,7 +833,7 @@ static int st_fdma_probe(struct platform_device *pdev)
fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
ret = dma_async_device_register(&fdev->dma_device);
ret = dmaenginem_async_device_register(&fdev->dma_device);
if (ret) {
dev_err(&pdev->dev,
"Failed to register DMA device (%d)\n", ret);
@ -844,15 +844,13 @@ static int st_fdma_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to register controller (%d)\n", ret);
goto err_dma_dev;
goto err_rproc;
}
dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq);
return 0;
err_dma_dev:
dma_async_device_unregister(&fdev->dma_device);
err_rproc:
st_fdma_free(fdev);
st_slim_rproc_put(fdev->slim_rproc);
@ -867,7 +865,6 @@ static int st_fdma_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, fdev->irq, fdev);
st_slim_rproc_put(fdev->slim_rproc);
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&fdev->dma_device);
return 0;
}

View file

@ -2839,7 +2839,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
d40_ops_init(base, &base->dma_slave);
err = dma_async_device_register(&base->dma_slave);
err = dmaenginem_async_device_register(&base->dma_slave);
if (err) {
d40_err(base->dev, "Failed to register slave channels\n");
@ -2854,12 +2854,12 @@ static int __init d40_dmaengine_init(struct d40_base *base,
d40_ops_init(base, &base->dma_memcpy);
err = dma_async_device_register(&base->dma_memcpy);
err = dmaenginem_async_device_register(&base->dma_memcpy);
if (err) {
d40_err(base->dev,
"Failed to register memcpy only channels\n");
goto unregister_slave;
goto exit;
}
d40_chan_init(base, &base->dma_both, base->phy_chans,
@ -2871,18 +2871,14 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
d40_ops_init(base, &base->dma_both);
err = dma_async_device_register(&base->dma_both);
err = dmaenginem_async_device_register(&base->dma_both);
if (err) {
d40_err(base->dev,
"Failed to register logical and physical capable channels\n");
goto unregister_memcpy;
goto exit;
}
return 0;
unregister_memcpy:
dma_async_device_unregister(&base->dma_memcpy);
unregister_slave:
dma_async_device_unregister(&base->dma_slave);
exit:
return err;
}

View file

@ -308,20 +308,12 @@ static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold,
static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold)
{
switch (threshold) {
case STM32_DMA_FIFO_THRESHOLD_FULL:
if (buf_len >= STM32_DMA_MAX_BURST)
return true;
else
return false;
case STM32_DMA_FIFO_THRESHOLD_HALFFULL:
if (buf_len >= STM32_DMA_MAX_BURST / 2)
return true;
else
return false;
default:
return false;
}
/*
* Buffer or period length has to be aligned on FIFO depth.
* Otherwise bytes may be stuck within FIFO at buffer or period
* length.
*/
return ((buf_len % ((threshold + 1) * 4)) == 0);
}
static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold,

View file

@ -1656,7 +1656,7 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return ret;
}
ret = dma_async_device_register(dd);
ret = dmaenginem_async_device_register(dd);
if (ret)
return ret;
@ -1674,8 +1674,6 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return 0;
err_unregister:
dma_async_device_unregister(dd);
return ret;
}

View file

@ -545,7 +545,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
}
dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
td_desc->desc_list_len, DMA_MEM_TO_DEV);
td_desc->desc_list_len, DMA_TO_DEVICE);
return &td_desc->txd;
}

View file

@ -58,4 +58,73 @@ enum sprd_dma_int_type {
SPRD_DMA_CFGERR_INT,
};
/*
* struct sprd_dma_linklist - DMA link-list address structure
* @virt_addr: link-list virtual address to configure link-list node
* @phy_addr: link-list physical address to link DMA transfer
*
* The Spreadtrum DMA controller supports the link-list mode, that means slaves
* can supply several groups configurations (each configuration represents one
* DMA transfer) saved in memory, and DMA controller will link these groups
* configurations by writing the physical address of each configuration into the
* link-list register.
*
* Just as shown below, the link-list pointer register will be pointed to the
* physical address of 'configuration 1', and the 'configuration 1' link-list
* pointer will be pointed to 'configuration 2', and so on.
* Once trigger the DMA transfer, the DMA controller will load 'configuration
* 1' to its registers automatically, after 'configuration 1' transaction is
* done, DMA controller will load 'configuration 2' automatically, until all
* DMA transactions are done.
*
* Note: The last link-list pointer should point to the physical address
* of 'configuration 1', which can avoid DMA controller loads incorrect
* configuration when the last configuration transaction is done.
*
* DMA controller linklist memory
* ====================== -----------------------
*| | | configuration 1 |<---
*| DMA controller | ------->| | |
*| | | | | |
*| | | | | |
*| | | | | |
*| linklist pointer reg |---- ----| linklist pointer | |
* ====================== | ----------------------- |
* | |
* | ----------------------- |
* | | configuration 2 | |
* --->| | |
* | | |
* | | |
* | | |
* ----| linklist pointer | |
* | ----------------------- |
* | |
* | ----------------------- |
* | | configuration 3 | |
* --->| | |
* | | |
* | . | |
* . |
* . |
* . |
* | . |
* | ----------------------- |
* | | configuration n | |
* --->| | |
* | | |
* | | |
* | | |
* | linklist pointer |----
* -----------------------
*
* To support the link-list mode, DMA slaves should allocate one segment memory
* from always-on IRAM or dma coherent memory to store these groups of DMA
* configuration, and pass the virtual and physical address to DMA controller.
*/
struct sprd_dma_linklist {
unsigned long virt_addr;
phys_addr_t phy_addr;
};
#endif

View file

@ -85,7 +85,7 @@ static inline enum dma_transfer_direction
ep93xx_dma_chan_direction(struct dma_chan *chan)
{
if (!ep93xx_dma_chan_is_m2p(chan))
return DMA_NONE;
return DMA_TRANS_NONE;
/* even channels are for TX, odd for RX */
return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;

View file

@ -0,0 +1,38 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Freescale eDMA platform data, ColdFire SoC's family.
*
* Copyright (c) 2017 Angelo Dureghello <angelo@sysam.it>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __LINUX_PLATFORM_DATA_MCF_EDMA_H__
#define __LINUX_PLATFORM_DATA_MCF_EDMA_H__
struct dma_slave_map;
bool mcf_edma_filter_fn(struct dma_chan *chan, void *param);
#define MCF_EDMA_FILTER_PARAM(ch) ((void *)ch)
/**
* struct mcf_edma_platform_data - platform specific data for eDMA engine
*
* @ver The eDMA module version.
* @dma_channels The number of eDMA channels.
*/
struct mcf_edma_platform_data {
int dma_channels;
const struct dma_slave_map *slave_map;
int slavecnt;
};
#endif /* __LINUX_PLATFORM_DATA_MCF_EDMA_H__ */