From 9a0f780958bbcb85604636fa340e2a1efaa4f432 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Mon, 13 May 2019 13:39:51 +0200 Subject: [PATCH 01/68] dmaengine: sudmac: remove unused driver SUDMAC driver was introduced in v3.10 but was never integrated for use by any platform. As it is unused remove it. Signed-off-by: Simon Horman Acked-by: Yoshihiro Shimoda Signed-off-by: Vinod Koul --- drivers/dma/sh/Kconfig | 6 - drivers/dma/sh/Makefile | 1 - drivers/dma/sh/sudmac.c | 414 ---------------------------------------- include/linux/sudmac.h | 52 ----- 4 files changed, 473 deletions(-) delete mode 100644 drivers/dma/sh/sudmac.c delete mode 100644 include/linux/sudmac.h diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index 4d6b02b3b1f1..54d5d0369d3c 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig @@ -47,9 +47,3 @@ config RENESAS_USB_DMAC help This driver supports the USB-DMA controller found in the Renesas SoCs. - -config SUDMAC - tristate "Renesas SUDMAC support" - depends on SH_DMAE_BASE - help - Enable support for the Renesas SUDMAC controllers. diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index 42110dd57a56..112fbd22bb3f 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile @@ -15,4 +15,3 @@ obj-$(CONFIG_SH_DMAE) += shdma.o obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o -obj-$(CONFIG_SUDMAC) += sudmac.o diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c deleted file mode 100644 index 30cc3553cb8b..000000000000 --- a/drivers/dma/sh/sudmac.c +++ /dev/null @@ -1,414 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Renesas SUDMAC support - * - * Copyright (C) 2013 Renesas Solutions Corp. - * - * based on drivers/dma/sh/shdma.c: - * Copyright (C) 2011-2012 Guennadi Liakhovetski - * Copyright (C) 2009 Nobuhiro Iwamatsu - * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. - * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -struct sudmac_chan { - struct shdma_chan shdma_chan; - void __iomem *base; - char dev_id[16]; /* unique name per DMAC of channel */ - - u32 offset; /* for CFG, BA, BBC, CA, CBC, DEN */ - u32 cfg; - u32 dint_end_bit; -}; - -struct sudmac_device { - struct shdma_dev shdma_dev; - struct sudmac_pdata *pdata; - void __iomem *chan_reg; -}; - -struct sudmac_regs { - u32 base_addr; - u32 base_byte_count; -}; - -struct sudmac_desc { - struct sudmac_regs hw; - struct shdma_desc shdma_desc; -}; - -#define to_chan(schan) container_of(schan, struct sudmac_chan, shdma_chan) -#define to_desc(sdesc) container_of(sdesc, struct sudmac_desc, shdma_desc) -#define to_sdev(sc) container_of(sc->shdma_chan.dma_chan.device, \ - struct sudmac_device, shdma_dev.dma_dev) - -/* SUDMAC register */ -#define SUDMAC_CH0CFG 0x00 -#define SUDMAC_CH0BA 0x10 -#define SUDMAC_CH0BBC 0x18 -#define SUDMAC_CH0CA 0x20 -#define SUDMAC_CH0CBC 0x28 -#define SUDMAC_CH0DEN 0x30 -#define SUDMAC_DSTSCLR 0x38 -#define SUDMAC_DBUFCTRL 0x3C -#define SUDMAC_DINTCTRL 0x40 -#define SUDMAC_DINTSTS 0x44 -#define SUDMAC_DINTSTSCLR 0x48 -#define SUDMAC_CH0SHCTRL 0x50 - -/* Definitions for the sudmac_channel.config */ -#define SUDMAC_SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */ -#define SUDMAC_RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */ -#define SUDMAC_LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */ - -/* Definitions for the sudmac_channel.dint_end_bit */ -#define SUDMAC_CH1ENDE 0x0002 /* b1: Ch1 DMA Transfer End Int Enable */ -#define SUDMAC_CH0ENDE 0x0001 /* b0: Ch0 DMA Transfer End Int Enable */ - -#define SUDMAC_DRV_NAME "sudmac" - -static void sudmac_writel(struct sudmac_chan *sc, u32 data, u32 reg) -{ - iowrite32(data, sc->base + reg); -} - -static u32 sudmac_readl(struct sudmac_chan *sc, u32 reg) -{ - return ioread32(sc->base + reg); -} - -static bool sudmac_is_busy(struct sudmac_chan *sc) -{ - u32 den = sudmac_readl(sc, SUDMAC_CH0DEN + sc->offset); - - if (den) - return true; /* working */ - - return false; /* waiting */ -} - -static void sudmac_set_reg(struct sudmac_chan *sc, struct sudmac_regs *hw, - struct shdma_desc *sdesc) -{ - sudmac_writel(sc, sc->cfg, SUDMAC_CH0CFG + sc->offset); - sudmac_writel(sc, hw->base_addr, SUDMAC_CH0BA + sc->offset); - sudmac_writel(sc, hw->base_byte_count, SUDMAC_CH0BBC + sc->offset); -} - -static void sudmac_start(struct sudmac_chan *sc) -{ - u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL); - - sudmac_writel(sc, dintctrl | sc->dint_end_bit, SUDMAC_DINTCTRL); - sudmac_writel(sc, 1, SUDMAC_CH0DEN + sc->offset); -} - -static void sudmac_start_xfer(struct shdma_chan *schan, - struct shdma_desc *sdesc) -{ - struct sudmac_chan *sc = to_chan(schan); - struct sudmac_desc *sd = to_desc(sdesc); - - sudmac_set_reg(sc, &sd->hw, sdesc); - sudmac_start(sc); -} - -static bool sudmac_channel_busy(struct shdma_chan *schan) -{ - struct sudmac_chan *sc = to_chan(schan); - - return sudmac_is_busy(sc); -} - -static void sudmac_setup_xfer(struct shdma_chan *schan, int slave_id) -{ -} - -static const struct sudmac_slave_config *sudmac_find_slave( - struct sudmac_chan *sc, int slave_id) -{ - struct sudmac_device *sdev = to_sdev(sc); - struct sudmac_pdata *pdata = sdev->pdata; - const struct sudmac_slave_config *cfg; - int i; - - for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) - if (cfg->slave_id == slave_id) - return cfg; - - return NULL; -} - -static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, - dma_addr_t slave_addr, bool try) -{ - struct sudmac_chan *sc = to_chan(schan); - const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id); - - if (!cfg) - return -ENODEV; - - return 0; -} - -static inline void sudmac_dma_halt(struct sudmac_chan *sc) -{ - u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL); - - sudmac_writel(sc, 0, SUDMAC_CH0DEN + sc->offset); - sudmac_writel(sc, dintctrl & ~sc->dint_end_bit, SUDMAC_DINTCTRL); - sudmac_writel(sc, sc->dint_end_bit, SUDMAC_DINTSTSCLR); -} - -static int sudmac_desc_setup(struct shdma_chan *schan, - struct shdma_desc *sdesc, - dma_addr_t src, dma_addr_t dst, size_t *len) -{ - struct sudmac_chan *sc = to_chan(schan); - struct sudmac_desc *sd = to_desc(sdesc); - - dev_dbg(sc->shdma_chan.dev, "%s: src=%pad, dst=%pad, len=%zu\n", - __func__, &src, &dst, *len); - - if (*len > schan->max_xfer_len) - *len = schan->max_xfer_len; - - if (dst) - sd->hw.base_addr = dst; - else if (src) - sd->hw.base_addr = src; - sd->hw.base_byte_count = *len; - - return 0; -} - -static void sudmac_halt(struct shdma_chan *schan) -{ - struct sudmac_chan *sc = to_chan(schan); - - sudmac_dma_halt(sc); -} - -static bool sudmac_chan_irq(struct shdma_chan *schan, int irq) -{ - struct sudmac_chan *sc = to_chan(schan); - u32 dintsts = sudmac_readl(sc, SUDMAC_DINTSTS); - - if (!(dintsts & sc->dint_end_bit)) - return false; - - /* DMA stop */ - sudmac_dma_halt(sc); - - return true; -} - -static size_t sudmac_get_partial(struct shdma_chan *schan, - struct shdma_desc *sdesc) -{ - struct sudmac_chan *sc = to_chan(schan); - struct sudmac_desc *sd = to_desc(sdesc); - u32 current_byte_count = sudmac_readl(sc, SUDMAC_CH0CBC + sc->offset); - - return sd->hw.base_byte_count - current_byte_count; -} - -static bool sudmac_desc_completed(struct shdma_chan *schan, - struct shdma_desc *sdesc) -{ - struct sudmac_chan *sc = to_chan(schan); - struct sudmac_desc *sd = to_desc(sdesc); - u32 current_addr = sudmac_readl(sc, SUDMAC_CH0CA + sc->offset); - - return sd->hw.base_addr + sd->hw.base_byte_count == current_addr; -} - -static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq, - unsigned long flags) -{ - struct shdma_dev *sdev = &su_dev->shdma_dev; - struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); - struct sudmac_chan *sc; - struct shdma_chan *schan; - int err; - - sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL); - if (!sc) - return -ENOMEM; - - schan = &sc->shdma_chan; - schan->max_xfer_len = 64 * 1024 * 1024 - 1; - - shdma_chan_probe(sdev, schan, id); - - sc->base = su_dev->chan_reg; - - /* get platform_data */ - sc->offset = su_dev->pdata->channel->offset; - if (su_dev->pdata->channel->config & SUDMAC_TX_BUFFER_MODE) - sc->cfg |= SUDMAC_SENDBUFM; - if (su_dev->pdata->channel->config & SUDMAC_RX_END_MODE) - sc->cfg |= SUDMAC_RCVENDM; - sc->cfg |= (su_dev->pdata->channel->wait << 4) & SUDMAC_LBA_WAIT; - - if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH0) - sc->dint_end_bit |= SUDMAC_CH0ENDE; - if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH1) - sc->dint_end_bit |= SUDMAC_CH1ENDE; - - /* set up channel irq */ - if (pdev->id >= 0) - snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d.%d", - pdev->id, id); - else - snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d", id); - - err = shdma_request_irq(schan, irq, flags, sc->dev_id); - if (err) { - dev_err(sdev->dma_dev.dev, - "DMA channel %d request_irq failed %d\n", id, err); - goto err_no_irq; - } - - return 0; - -err_no_irq: - /* remove from dmaengine device node */ - shdma_chan_remove(schan); - return err; -} - -static void sudmac_chan_remove(struct sudmac_device *su_dev) -{ - struct shdma_chan *schan; - int i; - - shdma_for_each_chan(schan, &su_dev->shdma_dev, i) { - BUG_ON(!schan); - - shdma_chan_remove(schan); - } -} - -static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan) -{ - /* SUDMAC doesn't need the address */ - return 0; -} - -static struct shdma_desc *sudmac_embedded_desc(void *buf, int i) -{ - return &((struct sudmac_desc *)buf)[i].shdma_desc; -} - -static const struct shdma_ops sudmac_shdma_ops = { - .desc_completed = sudmac_desc_completed, - .halt_channel = sudmac_halt, - .channel_busy = sudmac_channel_busy, - .slave_addr = sudmac_slave_addr, - .desc_setup = sudmac_desc_setup, - .set_slave = sudmac_set_slave, - .setup_xfer = sudmac_setup_xfer, - .start_xfer = sudmac_start_xfer, - .embedded_desc = sudmac_embedded_desc, - .chan_irq = sudmac_chan_irq, - .get_partial = sudmac_get_partial, -}; - -static int sudmac_probe(struct platform_device *pdev) -{ - struct sudmac_pdata *pdata = dev_get_platdata(&pdev->dev); - int err, i; - struct sudmac_device *su_dev; - struct dma_device *dma_dev; - struct resource *chan, *irq_res; - - /* get platform data */ - if (!pdata) - return -ENODEV; - - irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!irq_res) - return -ENODEV; - - err = -ENOMEM; - su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device), - GFP_KERNEL); - if (!su_dev) - return err; - - dma_dev = &su_dev->shdma_dev.dma_dev; - - chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); - su_dev->chan_reg = devm_ioremap_resource(&pdev->dev, chan); - if (IS_ERR(su_dev->chan_reg)) - return PTR_ERR(su_dev->chan_reg); - - dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); - - su_dev->shdma_dev.ops = &sudmac_shdma_ops; - su_dev->shdma_dev.desc_size = sizeof(struct sudmac_desc); - err = shdma_init(&pdev->dev, &su_dev->shdma_dev, pdata->channel_num); - if (err < 0) - return err; - - /* platform data */ - su_dev->pdata = dev_get_platdata(&pdev->dev); - - platform_set_drvdata(pdev, su_dev); - - /* Create DMA Channel */ - for (i = 0; i < pdata->channel_num; i++) { - err = sudmac_chan_probe(su_dev, i, irq_res->start, IRQF_SHARED); - if (err) - goto chan_probe_err; - } - - err = dma_async_device_register(&su_dev->shdma_dev.dma_dev); - if (err < 0) - goto chan_probe_err; - - return err; - -chan_probe_err: - sudmac_chan_remove(su_dev); - - shdma_cleanup(&su_dev->shdma_dev); - - return err; -} - -static int sudmac_remove(struct platform_device *pdev) -{ - struct sudmac_device *su_dev = platform_get_drvdata(pdev); - struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev; - - dma_async_device_unregister(dma_dev); - sudmac_chan_remove(su_dev); - shdma_cleanup(&su_dev->shdma_dev); - - return 0; -} - -static struct platform_driver sudmac_driver = { - .driver = { - .name = SUDMAC_DRV_NAME, - }, - .probe = sudmac_probe, - .remove = sudmac_remove, -}; -module_platform_driver(sudmac_driver); - -MODULE_AUTHOR("Yoshihiro Shimoda"); -MODULE_DESCRIPTION("Renesas SUDMAC driver"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:" SUDMAC_DRV_NAME); diff --git a/include/linux/sudmac.h b/include/linux/sudmac.h deleted file mode 100644 index 377b8a5788fa..000000000000 --- a/include/linux/sudmac.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Header for the SUDMAC driver - * - * Copyright (C) 2013 Renesas Solutions Corp. - * - * This is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - */ -#ifndef SUDMAC_H -#define SUDMAC_H - -#include -#include -#include - -/* Used by slave DMA clients to request DMA to/from a specific peripheral */ -struct sudmac_slave { - struct shdma_slave shdma_slave; /* Set by the platform */ -}; - -/* - * Supplied by platforms to specify, how a DMA channel has to be configured for - * a certain peripheral - */ -struct sudmac_slave_config { - int slave_id; -}; - -struct sudmac_channel { - unsigned long offset; - unsigned long config; - unsigned long wait; /* The configuable range is 0 to 3 */ - unsigned long dint_end_bit; -}; - -struct sudmac_pdata { - const struct sudmac_slave_config *slave; - int slave_num; - const struct sudmac_channel *channel; - int channel_num; -}; - -/* Definitions for the sudmac_channel.config */ -#define SUDMAC_TX_BUFFER_MODE BIT(0) -#define SUDMAC_RX_END_MODE BIT(1) - -/* Definitions for the sudmac_channel.dint_end_bit */ -#define SUDMAC_DMA_BIT_CH0 BIT(0) -#define SUDMAC_DMA_BIT_CH1 BIT(1) - -#endif From d8b9626af54605c30f0ff4eebf6f7360e2212a89 Mon Sep 17 00:00:00 2001 From: Paul Cercueil Date: Sat, 4 May 2019 23:34:32 +0200 Subject: [PATCH 02/68] dmaengine: jz4780: Use SPDX license notifier Use SPDX license notifier instead of plain text in the header. Signed-off-by: Paul Cercueil Signed-off-by: Vinod Koul --- drivers/dma/dma-jz4780.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 9ce0a386225b..02075417c69f 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -1,13 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Ingenic JZ4780 DMA controller * * Copyright (c) 2015 Imagination Technologies * Author: Alex Smith - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. */ #include From e40543931fe34d7808bf87398a0daca44c919d25 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Tue, 7 May 2019 09:54:41 +0200 Subject: [PATCH 03/68] dmaengine: stm32-dma: Fix redundant call to platform_get_irq Commit c6504be53972 ("dmaengine: stm32-dma: Fix unsigned variable compared with zero") duplicated the call to platform_get_irq. So remove the first call to platform_get_irq. Fixes: c6504be53972 ("dmaengine: stm32-dma: Fix unsigned variable compared with zero") Signed-off-by: Amelie Delaunay Signed-off-by: Vinod Koul --- drivers/dma/stm32-dma.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 88d9c6c4389f..67fdd02f9845 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -1366,7 +1366,6 @@ static int stm32_dma_probe(struct platform_device *pdev) for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { chan = &dmadev->chan[i]; - chan->irq = platform_get_irq(pdev, i); ret = platform_get_irq(pdev, i); if (ret < 0) { if (ret != -EPROBE_DEFER) From 8add6cce98482da67e971addd7eae8f22f8e6c7a Mon Sep 17 00:00:00 2001 From: Dragos Bogdan Date: Thu, 16 May 2019 10:04:43 +0300 Subject: [PATCH 04/68] dmaengine: axi-dmac: Add support for interleaved cyclic transfers The DMAC HDL core supports interleaved & cyclic transfers. An example use-case for this mode is when the controller is used as a video DMA. This change sets the `cyclic` field to true, so that when the IRQ comes and the `axi_dmac_transfer_done()` callback is called (from the interrupt handler) the proper `vchan_cyclic_callback()` is called. This way the DMAEngine framework will process data correctly for interleaved + cyclic transfers. This doesn't fix anything. It's an enhancement to the driver. Signed-off-by: Dragos Bogdan Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index f32fdf21edbd..4d2cae0bebb5 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -562,6 +562,9 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved( desc->sg[0].y_len = 1; } + if (flags & DMA_CYCLIC) + desc->cyclic = true; + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); } From a3ee0bf23eaec2ae46fc6b1266bdaa5995b55c1e Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Thu, 16 May 2019 12:44:30 +0300 Subject: [PATCH 05/68] dmaengine: axi-dmac: Enable TLAST handling The TLAST flag is used by the DMAC HDL controller to signal to the controller that the following segment (to be submitted) is the last one (in a series of segments). A receiver DMA (typically another DMAC) can read this parameter (from the transfer), and terminate the transfer earlier. A typical use-case for this, is when the receiver expects a certain amount of segments, but for some reason (e.g. an ADC capture which can have an unknown number of digital samples) the number of actual segments is smaller. The receiver would read this flag, and then the DMAC would finish. Signed-off-by: Michael Hennerich Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 4d2cae0bebb5..8e64b81c2d3c 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -71,6 +71,7 @@ #define AXI_DMAC_IRQ_EOT BIT(1) #define AXI_DMAC_FLAG_CYCLIC BIT(0) +#define AXI_DMAC_FLAG_LAST BIT(1) /* The maximum ID allocated by the hardware is 31 */ #define AXI_DMAC_SG_UNUSED 32U @@ -216,6 +217,7 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) desc->num_submitted = 0; /* Start again */ else chan->next_desc = NULL; + flags |= AXI_DMAC_FLAG_LAST; } else { chan->next_desc = desc; } From b5d89905d0391de2f0e04b709ab60eee6cf76a6e Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Thu, 16 May 2019 11:31:34 +0300 Subject: [PATCH 06/68] dmaengine: axi-dmac: Sanity check memory mapped interface support The AXI-DMAC supports different types of interface for the data source and destination ports. Typically one of those ports is a memory-mapped interface while the other is some kind of streaming interface. The information about which kind of interface is used for each port is encoded in the devicetree. It is also possible in the driver to detect whether a port supports memory-mapped transfers or not. For streaming interfaces the address register is read-only and will always return 0. So in order to check if a port supports memory-mapped transfers write a non-zero value to the corresponding address register and check that the value read-back is still non zero. This allows to detect mismatches between the devicetree description and the actual hardware configuration. Unfortunately it is not possible to autodetect the interface types since there is no method to distinguish between the different streaming ports. So the best thing that can be done is to error out when a memory mapped port is described in the devicetree but none is detected in the hardware. Signed-off-by: Lars-Peter Clausen Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 8e64b81c2d3c..0984ae6eb155 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -637,7 +637,7 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan, return 0; } -static void axi_dmac_detect_caps(struct axi_dmac *dmac) +static int axi_dmac_detect_caps(struct axi_dmac *dmac) { struct axi_dmac_chan *chan = &dmac->chan; @@ -653,6 +653,24 @@ static void axi_dmac_detect_caps(struct axi_dmac *dmac) chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH); if (chan->max_length != UINT_MAX) chan->max_length++; + + axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff); + if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 && + chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) { + dev_err(dmac->dma_dev.dev, + "Destination memory-mapped interface not supported."); + return -ENODEV; + } + + axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff); + if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 && + chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) { + dev_err(dmac->dma_dev.dev, + "Source memory-mapped interface not supported."); + return -ENODEV; + } + + return 0; } static int axi_dmac_probe(struct platform_device *pdev) @@ -728,7 +746,9 @@ static int axi_dmac_probe(struct platform_device *pdev) if (ret < 0) return ret; - axi_dmac_detect_caps(dmac); + ret = axi_dmac_detect_caps(dmac); + if (ret) + goto err_clk_disable; axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00); From f935d7dc8125e231e3e1620d97b2367b008889f4 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 21 May 2019 19:36:44 +0530 Subject: [PATCH 07/68] =?UTF-8?q?dmaengine:=20xilinx=5Fdma:=20Remove=20set?= =?UTF-8?q?=20but=20unused=20=E2=80=98tail=5Fdesc=E2=80=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We get a compiler warn about variable ‘tail_desc’ set but not used drivers/dma/xilinx/xilinx_dma.c:1102:42: warning: variable ‘tail_desc’ set but not used [-Wunused-but-set-variable] struct xilinx_dma_tx_descriptor *desc, *tail_desc; So remove it. Reviewed-by: Radhey Shyam Pandey Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index c43c1a154604..34564224e675 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -1099,7 +1099,7 @@ static void xilinx_dma_start(struct xilinx_dma_chan *chan) static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) { struct xilinx_vdma_config *config = &chan->config; - struct xilinx_dma_tx_descriptor *desc, *tail_desc; + struct xilinx_dma_tx_descriptor *desc; u32 reg, j; struct xilinx_vdma_tx_segment *segment, *last = NULL; int i = 0; @@ -1116,8 +1116,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) desc = list_first_entry(&chan->pending_list, struct xilinx_dma_tx_descriptor, node); - tail_desc = list_last_entry(&chan->pending_list, - struct xilinx_dma_tx_descriptor, node); /* Configure the hardware using info in the config structure */ if (chan->has_vflip) { From f5151311c3f37f6edc85b2253ccf6d3e2a4c4c26 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 20 May 2019 19:32:14 +0800 Subject: [PATCH 08/68] dmaengine: Add matching device node validation in __dma_request_channel() When user try to request one DMA channel by __dma_request_channel(), it won't validate if it is the correct DMA device to request, that will lead each DMA engine driver to validate the correct device node in their filter function if it is necessary. Thus we can add the matching device node validation in the DMA engine core, to remove all of device node validation in the drivers. Tested-by: Peter Ujfalusi Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 10 ++++++++-- drivers/dma/of-dma.c | 4 ++-- include/linux/dmaengine.h | 12 ++++++++---- 3 files changed, 18 insertions(+), 8 deletions(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 3a11b1092e80..610080c629bb 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -641,11 +641,13 @@ EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); * @mask: capabilities that the channel must satisfy * @fn: optional callback to disposition available channels * @fn_param: opaque parameter to pass to dma_filter_fn + * @np: device node to look for DMA channels * * Returns pointer to appropriate DMA channel on success or NULL. */ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, - dma_filter_fn fn, void *fn_param) + dma_filter_fn fn, void *fn_param, + struct device_node *np) { struct dma_device *device, *_d; struct dma_chan *chan = NULL; @@ -653,6 +655,10 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, /* Find a channel */ mutex_lock(&dma_list_mutex); list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { + /* Finds a DMA controller with matching device node */ + if (np && device->dev->of_node && np != device->dev->of_node) + continue; + chan = find_candidate(device, mask, fn, fn_param); if (!IS_ERR(chan)) break; @@ -769,7 +775,7 @@ struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask) if (!mask) return ERR_PTR(-ENODEV); - chan = __dma_request_channel(mask, NULL, NULL); + chan = __dma_request_channel(mask, NULL, NULL, NULL); if (!chan) { mutex_lock(&dma_list_mutex); if (list_empty(&dma_device_list)) diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 91fd395c90c4..6b43d04da05d 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -316,8 +316,8 @@ struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, if (count != 1) return NULL; - return dma_request_channel(info->dma_cap, info->filter_fn, - &dma_spec->args[0]); + return __dma_request_channel(&info->dma_cap, info->filter_fn, + &dma_spec->args[0], dma_spec->np); } EXPORT_SYMBOL_GPL(of_dma_simple_xlate); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index d49ec5c31944..504085b2bf21 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -1314,7 +1314,8 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); void dma_issue_pending_all(void); struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, - dma_filter_fn fn, void *fn_param); + dma_filter_fn fn, void *fn_param, + struct device_node *np); struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); struct dma_chan *dma_request_chan(struct device *dev, const char *name); @@ -1339,7 +1340,9 @@ static inline void dma_issue_pending_all(void) { } static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, - dma_filter_fn fn, void *fn_param) + dma_filter_fn fn, + void *fn_param, + struct device_node *np) { return NULL; } @@ -1411,7 +1414,8 @@ void dma_async_device_unregister(struct dma_device *device); void dma_run_dependencies(struct dma_async_tx_descriptor *tx); struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); -#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) +#define dma_request_channel(mask, x, y) \ + __dma_request_channel(&(mask), x, y, NULL) #define dma_request_slave_channel_compat(mask, x, y, dev, name) \ __dma_request_slave_channel_compat(&(mask), x, y, dev, name) @@ -1429,6 +1433,6 @@ static inline struct dma_chan if (!fn || !fn_param) return NULL; - return __dma_request_channel(mask, fn, fn_param); + return __dma_request_channel(mask, fn, fn_param, NULL); } #endif /* DMAENGINE_H */ From 37c0afeb41f1c6061b8c5fec1eeec36117bd1193 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 20 May 2019 19:32:15 +0800 Subject: [PATCH 09/68] soc: tegra: fuse: Use dma_request_channel instead of __dma_request_channel() The __dma_request_channel() prototype has been changed to help to do device node validation, thus we can use dma_request_channel() instead of __dma_request_channel() to keep kernel bisectable. Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/soc/tegra/fuse/fuse-tegra20.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/tegra/fuse/fuse-tegra20.c b/drivers/soc/tegra/fuse/fuse-tegra20.c index 49ff017f3ded..f40a06f8423f 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra20.c +++ b/drivers/soc/tegra/fuse/fuse-tegra20.c @@ -110,7 +110,7 @@ static int tegra20_fuse_probe(struct tegra_fuse *fuse) dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - fuse->apbdma.chan = __dma_request_channel(&mask, dma_filter, NULL); + fuse->apbdma.chan = dma_request_channel(mask, dma_filter, NULL); if (!fuse->apbdma.chan) return -EPROBE_DEFER; From 990c0b53bf6599a9c9c7df1529dde681dee6cf64 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 20 May 2019 19:32:16 +0800 Subject: [PATCH 10/68] dmaengine: imx-sdma: Let the core do the device node validation Let the DMA engine core do the device node validation instead of drivers. Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 9 ++------- include/linux/platform_data/dma-imx.h | 1 - 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 99d9f431ae2c..ca296f0849ef 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1934,16 +1934,11 @@ disable_clk_ipg: static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) { struct sdma_channel *sdmac = to_sdma_chan(chan); - struct sdma_engine *sdma = sdmac->sdma; struct imx_dma_data *data = fn_param; if (!imx_dma_is_general_purpose(chan)) return false; - /* return false if it's not the right device */ - if (sdma->dev->of_node != data->of_node) - return false; - sdmac->data = *data; chan->private = &sdmac->data; @@ -1971,9 +1966,9 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec, * be set to sdmac->event_id1. */ data.dma_request2 = 0; - data.of_node = ofdma->of_node; - return dma_request_channel(mask, sdma_filter_fn, &data); + return __dma_request_channel(&mask, sdma_filter_fn, &data, + ofdma->of_node); } static int sdma_probe(struct platform_device *pdev) diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index 9daea8d42a10..7d964e787299 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h @@ -55,7 +55,6 @@ struct imx_dma_data { int dma_request2; /* secondary DMA request line */ enum sdma_peripheral_type peripheral_type; int priority; - struct device_node *of_node; }; static inline int imx_dma_is_ipu(struct dma_chan *chan) From c88ba7b940f8ec9b50216a69db4ddfa1af58a98c Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 20 May 2019 19:32:17 +0800 Subject: [PATCH 11/68] dmaengine: dma-jz4780: Let the core do the device node validation Let the DMA engine core do the device node validation instead of drivers. Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/dma-jz4780.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 02075417c69f..4064391ccc13 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -156,7 +156,6 @@ struct jz4780_dma_dev { }; struct jz4780_dma_filter_data { - struct device_node *of_node; uint32_t transfer_type; int channel; }; @@ -761,8 +760,6 @@ static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param) struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); struct jz4780_dma_filter_data *data = param; - if (jzdma->dma_device.dev->of_node != data->of_node) - return false; if (data->channel > -1) { if (data->channel != jzchan->id) @@ -786,7 +783,6 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec, if (dma_spec->args_count != 2) return NULL; - data.of_node = ofdma->of_node; data.transfer_type = dma_spec->args[0]; data.channel = dma_spec->args[1]; @@ -811,7 +807,8 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec, return dma_get_slave_channel( &jzdma->chan[data.channel].vchan.chan); } else { - return dma_request_channel(mask, jz4780_dma_filter_fn, &data); + return __dma_request_channel(&mask, jz4780_dma_filter_fn, &data, + ofdma->of_node); } } From 1d967195fd456ae0e5425ee39611455de32dc9de Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 20 May 2019 19:32:18 +0800 Subject: [PATCH 12/68] dmaengine: mmp_tdma: Let the core do the device node validation Let the DMA engine core do the device node validation instead of drivers. Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/mmp_tdma.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 0c56faa03e9a..e76858b6b334 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -586,18 +586,12 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, } struct mmp_tdma_filter_param { - struct device_node *of_node; unsigned int chan_id; }; static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param) { struct mmp_tdma_filter_param *param = fn_param; - struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); - struct dma_device *pdma_device = tdmac->chan.device; - - if (pdma_device->dev->of_node != param->of_node) - return false; if (chan->chan_id != param->chan_id) return false; @@ -615,13 +609,13 @@ static struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec, if (dma_spec->args_count != 1) return NULL; - param.of_node = ofdma->of_node; param.chan_id = dma_spec->args[0]; if (param.chan_id >= TDMA_CHANNEL_NUM) return NULL; - return dma_request_channel(mask, mmp_tdma_filter_fn, ¶m); + return __dma_request_channel(&mask, mmp_tdma_filter_fn, ¶m, + ofdma->of_node); } static const struct of_device_id mmp_tdma_dt_ids[] = { From caf5e3e6e14d0b7df5c404683ff5d4fce4733a99 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 20 May 2019 19:32:19 +0800 Subject: [PATCH 13/68] dmaengine: mxs-dma: Let the core do the device node validation Let the DMA engine core do the device node validation instead of drivers. Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/mxs-dma.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 22cc7f68ef6e..8ce5e790352f 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -716,7 +716,6 @@ err_out: } struct mxs_dma_filter_param { - struct device_node *of_node; unsigned int chan_id; }; @@ -727,9 +726,6 @@ static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param) struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int chan_irq; - if (mxs_dma->dma_device.dev->of_node != param->of_node) - return false; - if (chan->chan_id != param->chan_id) return false; @@ -752,13 +748,13 @@ static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec, if (dma_spec->args_count != 1) return NULL; - param.of_node = ofdma->of_node; param.chan_id = dma_spec->args[0]; if (param.chan_id >= mxs_dma->nr_channels) return NULL; - return dma_request_channel(mask, mxs_dma_filter_fn, ¶m); + return __dma_request_channel(&mask, mxs_dma_filter_fn, ¶m, + ofdma->of_node); } static int __init mxs_dma_probe(struct platform_device *pdev) From 1dc1b29aa15ead508142bb109ef52686213d07f4 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 20 May 2019 19:32:20 +0800 Subject: [PATCH 14/68] dmaengine: sh: rcar-dmac: Let the core do the device node validation Let the DMA engine core do the device node validation instead of drivers. Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/sh/rcar-dmac.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 33ab1b607e2b..67df54ac3294 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1654,8 +1654,7 @@ static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg) * Forcing it to call dma_request_channel() and iterate through all * channels from all controllers is just pointless. */ - if (chan->device->device_config != rcar_dmac_device_config || - dma_spec->np != chan->device->dev->of_node) + if (chan->device->device_config != rcar_dmac_device_config) return false; return !test_and_set_bit(dma_spec->args[0], dmac->modules); @@ -1675,7 +1674,8 @@ static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec, dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec); + chan = __dma_request_channel(&mask, rcar_dmac_chan_filter, dma_spec, + ofdma->of_node); if (!chan) return NULL; From c54d86641fe774154b7742b765e3e2f8affcb7e6 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 20 May 2019 19:32:21 +0800 Subject: [PATCH 15/68] dmaengine: sh: usb-dmac: Let the core do the device node validation Let the DMA engine core do the device node validation instead of drivers. Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/sh/usb-dmac.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index 59403f6d008a..0afabf395930 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -636,9 +636,6 @@ static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg) struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); struct of_phandle_args *dma_spec = arg; - if (dma_spec->np != chan->device->dev->of_node) - return false; - /* USB-DMAC should be used with fixed usb controller's FIFO */ if (uchan->index != dma_spec->args[0]) return false; @@ -659,7 +656,8 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec, dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - chan = dma_request_channel(mask, usb_dmac_chan_filter, dma_spec); + chan = __dma_request_channel(&mask, usb_dmac_chan_filter, dma_spec, + ofdma->of_node); if (!chan) return NULL; From 7b11ef9653d2520540df708f92949f06ed8a42e2 Mon Sep 17 00:00:00 2001 From: Weitao Hou Date: Sun, 26 May 2019 15:13:24 +0800 Subject: [PATCH 16/68] dmaengine: stm32: use to_platform_device() Use to_platform_device() instead of open-coding it. Signed-off-by: Weitao Hou Signed-off-by: Vinod Koul --- drivers/dma/stm32-dmamux.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c index a67119199c45..63af24d4c834 100644 --- a/drivers/dma/stm32-dmamux.c +++ b/drivers/dma/stm32-dmamux.c @@ -306,8 +306,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev) #ifdef CONFIG_PM static int stm32_dmamux_runtime_suspend(struct device *dev) { - struct platform_device *pdev = - container_of(dev, struct platform_device, dev); + struct platform_device *pdev = to_platform_device(dev); struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev); clk_disable_unprepare(stm32_dmamux->clk); @@ -317,8 +316,7 @@ static int stm32_dmamux_runtime_suspend(struct device *dev) static int stm32_dmamux_runtime_resume(struct device *dev) { - struct platform_device *pdev = - container_of(dev, struct platform_device, dev); + struct platform_device *pdev = to_platform_device(dev); struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev); int ret; From d27ac2e02bf256d4e824e7c1e1e1afa2b96cefcc Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Mon, 27 May 2019 09:55:16 +0300 Subject: [PATCH 17/68] include: fpga: adi-axi-common.h: add common regs & defs header The AXI HDL cores provided for Analog Devices reference designs all share some common base registers (e.g. version register at address 0x00). To reduce duplication for this, a common header is added to define these registers as well as bitfields & macros to work with these registers. Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- include/linux/fpga/adi-axi-common.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 include/linux/fpga/adi-axi-common.h diff --git a/include/linux/fpga/adi-axi-common.h b/include/linux/fpga/adi-axi-common.h new file mode 100644 index 000000000000..7fc95d5c95bb --- /dev/null +++ b/include/linux/fpga/adi-axi-common.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Analog Devices AXI common registers & definitions + * + * Copyright 2019 Analog Devices Inc. + * + * https://wiki.analog.com/resources/fpga/docs/axi_ip + * https://wiki.analog.com/resources/fpga/docs/hdl/regmap + */ + +#ifndef ADI_AXI_COMMON_H_ +#define ADI_AXI_COMMON_H_ + +#define ADI_AXI_REG_VERSION 0x0000 + +#define ADI_AXI_PCORE_VER(major, minor, patch) \ + (((major) << 16) | ((minor) << 8) | (patch)) + +#endif /* ADI_AXI_COMMON_H_ */ From a5b20600a67a9b78a63e8d5b0c59327ddf636064 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Mon, 27 May 2019 09:55:17 +0300 Subject: [PATCH 18/68] dmaengine: axi-dmac: Discover length alignment requirement Starting with version 4.1.a the AXI-DMAC is capable of reporting the required length alignment. The LSBs that are required to be set for alignment will always read back as set from the transfer length register. It is not possible to clear them by writing a 0. This means the driver can discover the length alignment requirement by writing 0 to that register and reading back the value. Since the DMA will support length alignment requirements that are different from the address alignment requirement track both of them independently. For older versions of the peripheral assume that the length alignment requirement is equal to the address alignment requirement. Signed-off-by: Lars-Peter Clausen Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 0984ae6eb155..74ae6246d9a5 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -20,6 +20,7 @@ #include #include #include +#include #include @@ -110,7 +111,8 @@ struct axi_dmac_chan { unsigned int dest_type; unsigned int max_length; - unsigned int align_mask; + unsigned int address_align_mask; + unsigned int length_align_mask; bool hw_cyclic; bool hw_2d; @@ -169,14 +171,14 @@ static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len) { if (len == 0) return false; - if ((len & chan->align_mask) != 0) /* Not aligned */ + if ((len & chan->length_align_mask) != 0) /* Not aligned */ return false; return true; } static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr) { - if ((addr & chan->align_mask) != 0) /* Not aligned */ + if ((addr & chan->address_align_mask) != 0) /* Not aligned */ return false; return true; } @@ -394,7 +396,7 @@ static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan, num_segments = DIV_ROUND_UP(period_len, chan->max_length); segment_size = DIV_ROUND_UP(period_len, num_segments); /* Take care of alignment */ - segment_size = ((segment_size - 1) | chan->align_mask) + 1; + segment_size = ((segment_size - 1) | chan->length_align_mask) + 1; for (i = 0; i < num_periods; i++) { len = period_len; @@ -623,7 +625,7 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan, return ret; chan->dest_width = val / 8; - chan->align_mask = max(chan->dest_width, chan->src_width) - 1; + chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1; if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) chan->direction = DMA_MEM_TO_MEM; @@ -640,6 +642,9 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan, static int axi_dmac_detect_caps(struct axi_dmac *dmac) { struct axi_dmac_chan *chan = &dmac->chan; + unsigned int version; + + version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION); axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC); if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC) @@ -670,6 +675,14 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac) return -ENODEV; } + if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) { + axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00); + chan->length_align_mask = + axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH); + } else { + chan->length_align_mask = chan->address_align_mask; + } + return 0; } From 5b969bd1d9cdfc8200f3dccf07a0270eaa2f2bbd Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Mon, 27 May 2019 09:55:18 +0300 Subject: [PATCH 19/68] dmaengine: axi-dmac: assign `copy_align` property The `copy_align` property is a generic property that describes alignment for DMA memcpy & sg ops. It serves mostly an informational purpose, and can be used in DMA tests, to pass the info to know what alignment to expect. Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 74ae6246d9a5..d5e29bbc3d43 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -763,6 +763,8 @@ static int axi_dmac_probe(struct platform_device *pdev) if (ret) goto err_clk_disable; + dma_dev->copy_align = (dmac->chan.address_align_mask + 1); + axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00); ret = dma_async_device_register(dma_dev); From 8f95adcf3a5aabb7a416d3c03c561acd580df213 Mon Sep 17 00:00:00 2001 From: Peng Ma Date: Wed, 22 May 2019 03:21:02 +0000 Subject: [PATCH 20/68] dmaengine: fsl-qdma: fixed the source/destination descriptor format CMD of Source/Destination descriptor format should be lower of struct fsl_qdma_engine number data address. Signed-off-by: Peng Ma Signed-off-by: Vinod Koul --- drivers/dma/fsl-qdma.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c index aa1d0ae3d207..da8fdf5dca74 100644 --- a/drivers/dma/fsl-qdma.c +++ b/drivers/dma/fsl-qdma.c @@ -113,6 +113,7 @@ /* Field definition for Descriptor offset */ #define QDMA_CCDF_STATUS 20 #define QDMA_CCDF_OFFSET 20 +#define QDMA_SDDF_CMD(x) (((u64)(x)) << 32) /* Field definition for safe loop count*/ #define FSL_QDMA_HALT_COUNT 1500 @@ -341,6 +342,7 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan) static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp, dma_addr_t dst, dma_addr_t src, u32 len) { + u32 cmd; struct fsl_qdma_format *sdf, *ddf; struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest; @@ -369,14 +371,14 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp, /* This entry is the last entry. */ qdma_csgf_set_f(csgf_dest, len); /* Descriptor Buffer */ - sdf->data = - cpu_to_le64(FSL_QDMA_CMD_RWTTYPE << - FSL_QDMA_CMD_RWTTYPE_OFFSET); - ddf->data = - cpu_to_le64(FSL_QDMA_CMD_RWTTYPE << - FSL_QDMA_CMD_RWTTYPE_OFFSET); - ddf->data |= - cpu_to_le64(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET); + cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE << + FSL_QDMA_CMD_RWTTYPE_OFFSET); + sdf->data = QDMA_SDDF_CMD(cmd); + + cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE << + FSL_QDMA_CMD_RWTTYPE_OFFSET); + cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET); + ddf->data = QDMA_SDDF_CMD(cmd); } /* From c983d805a7bb253b9015bc2c03bc8827031ce590 Mon Sep 17 00:00:00 2001 From: Peng Ma Date: Wed, 22 May 2019 03:21:03 +0000 Subject: [PATCH 21/68] dmaengine: fsl-qdma: Continue to clear register on error When an error occurs we should clean the error register then to return Signed-off-by: Peng Ma [vkoul: change patch title] Signed-off-by: Vinod Koul --- drivers/dma/fsl-qdma.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c index da8fdf5dca74..8e341c0c13bc 100644 --- a/drivers/dma/fsl-qdma.c +++ b/drivers/dma/fsl-qdma.c @@ -703,10 +703,8 @@ static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id) intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR); - if (intr) { + if (intr) dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n"); - return IRQ_NONE; - } qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR); return IRQ_HANDLED; From dc161064beb83c668e0f85766b92b1e7ed186e58 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Thu, 30 May 2019 00:43:55 +0300 Subject: [PATCH 22/68] dmaengine: tegra-apb: Error out if DMA_PREP_INTERRUPT flag is unset Apparently driver was never tested with DMA_PREP_INTERRUPT flag being unset since it completely disables interrupt handling instead of skipping the callbacks invocations, hence putting channel into unusable state. The flag is always set by all of kernel drivers that use APB DMA, so let's error out in otherwise case for consistency. It won't be difficult to support that case properly if ever will be needed. Signed-off-by: Dmitry Osipenko Acked-by: Jon Hunter Signed-off-by: Vinod Koul --- drivers/dma/tegra20-apb-dma.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index cf462b1abc0b..2c84a660ba36 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -988,8 +988,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; } - if (flags & DMA_PREP_INTERRUPT) + if (flags & DMA_PREP_INTERRUPT) { csr |= TEGRA_APBDMA_CSR_IE_EOC; + } else { + WARN_ON_ONCE(1); + return NULL; + } apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; @@ -1131,8 +1135,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; } - if (flags & DMA_PREP_INTERRUPT) + if (flags & DMA_PREP_INTERRUPT) { csr |= TEGRA_APBDMA_CSR_IE_EOC; + } else { + WARN_ON_ONCE(1); + return NULL; + } apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; From f48d14c218ccaaf7c22f84582d4bdb5fa2234c76 Mon Sep 17 00:00:00 2001 From: Jernej Skrabec Date: Mon, 27 May 2019 22:14:53 +0200 Subject: [PATCH 23/68] dt-bindings: arm64: allwinner: h6: Add binding for DMA controller MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DMA in H6 is similar to other DMA controller, except it is first which supports more than 32 request sources and has 16 channels. It also needs additional clock to be enabled. Signed-off-by: Jernej Skrabec Reviewed-by: Rob Herring Signed-off-by: Clément Péron Acked-by: Maxime Ripard Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/sun6i-dma.txt | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/dma/sun6i-dma.txt b/Documentation/devicetree/bindings/dma/sun6i-dma.txt index 7fccc20d8331..cae31f4e77ba 100644 --- a/Documentation/devicetree/bindings/dma/sun6i-dma.txt +++ b/Documentation/devicetree/bindings/dma/sun6i-dma.txt @@ -28,12 +28,17 @@ Example: }; ------------------------------------------------------------------------------ -For A64 DMA controller: +For A64 and H6 DMA controller: Required properties: -- compatible: "allwinner,sun50i-a64-dma" +- compatible: Must be one of + "allwinner,sun50i-a64-dma" + "allwinner,sun50i-h6-dma" - dma-channels: Number of DMA channels supported by the controller. Refer to Documentation/devicetree/bindings/dma/dma.txt +- clocks: In addition to parent AHB clock, it should also contain mbus + clock (H6 only) +- clock-names: Should contain "bus" and "mbus" (H6 only) - all properties above, i.e. reg, interrupts, clocks, resets and #dma-cells Optional properties: From 43a90fc76a3ebe0ce3315725c7f0fa832df50c8e Mon Sep 17 00:00:00 2001 From: Jernej Skrabec Date: Mon, 27 May 2019 22:14:54 +0200 Subject: [PATCH 24/68] dmaengine: sun6i: Add a quirk for additional mbus clock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit H6 DMA controller needs additional mbus clock to be enabled. Add a quirk for it and handle it accordingly. Signed-off-by: Jernej Skrabec Signed-off-by: Clément Péron Acked-by: Maxime Ripard Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 0cd13f17fc11..7d9606997251 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -129,6 +129,7 @@ struct sun6i_dma_config { u32 dst_burst_lengths; u32 src_addr_widths; u32 dst_addr_widths; + bool has_mbus_clk; }; /* @@ -182,6 +183,7 @@ struct sun6i_dma_dev { struct dma_device slave; void __iomem *base; struct clk *clk; + struct clk *clk_mbus; int irq; spinlock_t lock; struct reset_control *rstc; @@ -1208,6 +1210,14 @@ static int sun6i_dma_probe(struct platform_device *pdev) return PTR_ERR(sdc->clk); } + if (sdc->cfg->has_mbus_clk) { + sdc->clk_mbus = devm_clk_get(&pdev->dev, "mbus"); + if (IS_ERR(sdc->clk_mbus)) { + dev_err(&pdev->dev, "No mbus clock specified\n"); + return PTR_ERR(sdc->clk_mbus); + } + } + sdc->rstc = devm_reset_control_get(&pdev->dev, NULL); if (IS_ERR(sdc->rstc)) { dev_err(&pdev->dev, "No reset controller specified\n"); @@ -1312,11 +1322,19 @@ static int sun6i_dma_probe(struct platform_device *pdev) goto err_reset_assert; } + if (sdc->cfg->has_mbus_clk) { + ret = clk_prepare_enable(sdc->clk_mbus); + if (ret) { + dev_err(&pdev->dev, "Couldn't enable mbus clock\n"); + goto err_clk_disable; + } + } + ret = devm_request_irq(&pdev->dev, sdc->irq, sun6i_dma_interrupt, 0, dev_name(&pdev->dev), sdc); if (ret) { dev_err(&pdev->dev, "Cannot request IRQ\n"); - goto err_clk_disable; + goto err_mbus_clk_disable; } ret = dma_async_device_register(&sdc->slave); @@ -1341,6 +1359,8 @@ err_dma_unregister: dma_async_device_unregister(&sdc->slave); err_irq_disable: sun6i_kill_tasklet(sdc); +err_mbus_clk_disable: + clk_disable_unprepare(sdc->clk_mbus); err_clk_disable: clk_disable_unprepare(sdc->clk); err_reset_assert: @@ -1359,6 +1379,7 @@ static int sun6i_dma_remove(struct platform_device *pdev) sun6i_kill_tasklet(sdc); + clk_disable_unprepare(sdc->clk_mbus); clk_disable_unprepare(sdc->clk); reset_control_assert(sdc->rstc); From 67f34055118cb6dcdfeea9e1980309afa80b2b7c Mon Sep 17 00:00:00 2001 From: Jernej Skrabec Date: Mon, 27 May 2019 22:14:55 +0200 Subject: [PATCH 25/68] dmaengine: sun6i: Add a quirk for setting DRQ fields MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit H6 DMA has more than 32 possible DRQs. That means that current maximum of 31 DRQs is not enough anymore. Add a quirk which will set source and destination DRQ number. Signed-off-by: Jernej Skrabec Signed-off-by: Clément Péron Acked-by: Maxime Ripard Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 48 ++++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 20 deletions(-) diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 7d9606997251..f725b93fd21a 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -68,15 +68,15 @@ #define DMA_CHAN_LLI_ADDR 0x08 #define DMA_CHAN_CUR_CFG 0x0c -#define DMA_CHAN_MAX_DRQ 0x1f -#define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & DMA_CHAN_MAX_DRQ) +#define DMA_CHAN_MAX_DRQ_A31 0x1f +#define DMA_CHAN_CFG_SRC_DRQ_A31(x) ((x) & DMA_CHAN_MAX_DRQ_A31) #define DMA_CHAN_CFG_SRC_IO_MODE BIT(5) #define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5) #define DMA_CHAN_CFG_SRC_BURST_A31(x) (((x) & 0x3) << 7) #define DMA_CHAN_CFG_SRC_BURST_H3(x) (((x) & 0x3) << 6) #define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9) -#define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16) +#define DMA_CHAN_CFG_DST_DRQ_A31(x) (DMA_CHAN_CFG_SRC_DRQ_A31(x) << 16) #define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16) #define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16) #define DMA_CHAN_CFG_DST_BURST_A31(x) (DMA_CHAN_CFG_SRC_BURST_A31(x) << 16) @@ -125,6 +125,7 @@ struct sun6i_dma_config { */ void (*clock_autogate_enable)(struct sun6i_dma_dev *); void (*set_burst_length)(u32 *p_cfg, s8 src_burst, s8 dst_burst); + void (*set_drq)(u32 *p_cfg, s8 src_drq, s8 dst_drq); u32 src_burst_lengths; u32 dst_burst_lengths; u32 src_addr_widths; @@ -311,6 +312,12 @@ static void sun6i_set_burst_length_h3(u32 *p_cfg, s8 src_burst, s8 dst_burst) DMA_CHAN_CFG_DST_BURST_H3(dst_burst); } +static void sun6i_set_drq_a31(u32 *p_cfg, s8 src_drq, s8 dst_drq) +{ + *p_cfg |= DMA_CHAN_CFG_SRC_DRQ_A31(src_drq) | + DMA_CHAN_CFG_DST_DRQ_A31(dst_drq); +} + static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan) { struct sun6i_desc *txd = pchan->desc; @@ -634,14 +641,13 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( burst = convert_burst(8); width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); - v_lli->cfg = DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | - DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | - DMA_CHAN_CFG_DST_LINEAR_MODE | + v_lli->cfg = DMA_CHAN_CFG_DST_LINEAR_MODE | DMA_CHAN_CFG_SRC_LINEAR_MODE | DMA_CHAN_CFG_SRC_WIDTH(width) | DMA_CHAN_CFG_DST_WIDTH(width); sdev->cfg->set_burst_length(&v_lli->cfg, burst, burst); + sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, DRQ_SDRAM); sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); @@ -695,9 +701,8 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( v_lli->dst = sconfig->dst_addr; v_lli->cfg = lli_cfg | DMA_CHAN_CFG_DST_IO_MODE | - DMA_CHAN_CFG_SRC_LINEAR_MODE | - DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | - DMA_CHAN_CFG_DST_DRQ(vchan->port); + DMA_CHAN_CFG_SRC_LINEAR_MODE; + sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); dev_dbg(chan2dev(chan), "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", @@ -710,9 +715,8 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( v_lli->dst = sg_dma_address(sg); v_lli->cfg = lli_cfg | DMA_CHAN_CFG_DST_LINEAR_MODE | - DMA_CHAN_CFG_SRC_IO_MODE | - DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | - DMA_CHAN_CFG_SRC_DRQ(vchan->port); + DMA_CHAN_CFG_SRC_IO_MODE; + sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); dev_dbg(chan2dev(chan), "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", @@ -780,17 +784,15 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic( v_lli->dst = sconfig->dst_addr; v_lli->cfg = lli_cfg | DMA_CHAN_CFG_DST_IO_MODE | - DMA_CHAN_CFG_SRC_LINEAR_MODE | - DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | - DMA_CHAN_CFG_DST_DRQ(vchan->port); + DMA_CHAN_CFG_SRC_LINEAR_MODE; + sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); } else { v_lli->src = sconfig->src_addr; v_lli->dst = buf_addr + period_len * i; v_lli->cfg = lli_cfg | DMA_CHAN_CFG_DST_LINEAR_MODE | - DMA_CHAN_CFG_SRC_IO_MODE | - DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | - DMA_CHAN_CFG_SRC_DRQ(vchan->port); + DMA_CHAN_CFG_SRC_IO_MODE; + sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); } prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd); @@ -1055,6 +1057,7 @@ static struct sun6i_dma_config sun6i_a31_dma_cfg = { .nr_max_requests = 30, .nr_max_vchans = 53, .set_burst_length = sun6i_set_burst_length_a31, + .set_drq = sun6i_set_drq_a31, .src_burst_lengths = BIT(1) | BIT(8), .dst_burst_lengths = BIT(1) | BIT(8), .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | @@ -1076,6 +1079,7 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = { .nr_max_vchans = 37, .clock_autogate_enable = sun6i_enable_clock_autogate_a23, .set_burst_length = sun6i_set_burst_length_a31, + .set_drq = sun6i_set_drq_a31, .src_burst_lengths = BIT(1) | BIT(8), .dst_burst_lengths = BIT(1) | BIT(8), .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | @@ -1092,6 +1096,7 @@ static struct sun6i_dma_config sun8i_a83t_dma_cfg = { .nr_max_vchans = 39, .clock_autogate_enable = sun6i_enable_clock_autogate_a23, .set_burst_length = sun6i_set_burst_length_a31, + .set_drq = sun6i_set_drq_a31, .src_burst_lengths = BIT(1) | BIT(8), .dst_burst_lengths = BIT(1) | BIT(8), .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | @@ -1115,6 +1120,7 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = { .nr_max_vchans = 34, .clock_autogate_enable = sun6i_enable_clock_autogate_h3, .set_burst_length = sun6i_set_burst_length_h3, + .set_drq = sun6i_set_drq_a31, .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | @@ -1134,6 +1140,7 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = { static struct sun6i_dma_config sun50i_a64_dma_cfg = { .clock_autogate_enable = sun6i_enable_clock_autogate_h3, .set_burst_length = sun6i_set_burst_length_h3, + .set_drq = sun6i_set_drq_a31, .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | @@ -1157,6 +1164,7 @@ static struct sun6i_dma_config sun8i_v3s_dma_cfg = { .nr_max_vchans = 24, .clock_autogate_enable = sun6i_enable_clock_autogate_a23, .set_burst_length = sun6i_set_burst_length_a31, + .set_drq = sun6i_set_drq_a31, .src_burst_lengths = BIT(1) | BIT(8), .dst_burst_lengths = BIT(1) | BIT(8), .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | @@ -1272,8 +1280,8 @@ static int sun6i_dma_probe(struct platform_device *pdev) ret = of_property_read_u32(np, "dma-requests", &sdc->max_request); if (ret && !sdc->max_request) { dev_info(&pdev->dev, "Missing dma-requests, using %u.\n", - DMA_CHAN_MAX_DRQ); - sdc->max_request = DMA_CHAN_MAX_DRQ; + DMA_CHAN_MAX_DRQ_A31); + sdc->max_request = DMA_CHAN_MAX_DRQ_A31; } /* From 802440bdf3b78721402f12495dffbb25522119bf Mon Sep 17 00:00:00 2001 From: Jernej Skrabec Date: Mon, 27 May 2019 22:14:56 +0200 Subject: [PATCH 26/68] dmaengine: sun6i: Add a quirk for setting mode fields MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit H6 DMA has mode fields in different position than any other currently supported DMA controller. Add a quirk for that. Signed-off-by: Jernej Skrabec Signed-off-by: Clément Péron Acked-by: Maxime Ripard Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 46 ++++++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index f725b93fd21a..f5cb5e89bf7b 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -70,15 +70,13 @@ #define DMA_CHAN_CUR_CFG 0x0c #define DMA_CHAN_MAX_DRQ_A31 0x1f #define DMA_CHAN_CFG_SRC_DRQ_A31(x) ((x) & DMA_CHAN_MAX_DRQ_A31) -#define DMA_CHAN_CFG_SRC_IO_MODE BIT(5) -#define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5) +#define DMA_CHAN_CFG_SRC_MODE_A31(x) (((x) & 0x1) << 5) #define DMA_CHAN_CFG_SRC_BURST_A31(x) (((x) & 0x3) << 7) #define DMA_CHAN_CFG_SRC_BURST_H3(x) (((x) & 0x3) << 6) #define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9) #define DMA_CHAN_CFG_DST_DRQ_A31(x) (DMA_CHAN_CFG_SRC_DRQ_A31(x) << 16) -#define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16) -#define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16) +#define DMA_CHAN_CFG_DST_MODE_A31(x) (DMA_CHAN_CFG_SRC_MODE_A31(x) << 16) #define DMA_CHAN_CFG_DST_BURST_A31(x) (DMA_CHAN_CFG_SRC_BURST_A31(x) << 16) #define DMA_CHAN_CFG_DST_BURST_H3(x) (DMA_CHAN_CFG_SRC_BURST_H3(x) << 16) #define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16) @@ -98,6 +96,8 @@ #define LLI_LAST_ITEM 0xfffff800 #define NORMAL_WAIT 8 #define DRQ_SDRAM 1 +#define LINEAR_MODE 0 +#define IO_MODE 1 /* forward declaration */ struct sun6i_dma_dev; @@ -126,6 +126,7 @@ struct sun6i_dma_config { void (*clock_autogate_enable)(struct sun6i_dma_dev *); void (*set_burst_length)(u32 *p_cfg, s8 src_burst, s8 dst_burst); void (*set_drq)(u32 *p_cfg, s8 src_drq, s8 dst_drq); + void (*set_mode)(u32 *p_cfg, s8 src_mode, s8 dst_mode); u32 src_burst_lengths; u32 dst_burst_lengths; u32 src_addr_widths; @@ -318,6 +319,12 @@ static void sun6i_set_drq_a31(u32 *p_cfg, s8 src_drq, s8 dst_drq) DMA_CHAN_CFG_DST_DRQ_A31(dst_drq); } +static void sun6i_set_mode_a31(u32 *p_cfg, s8 src_mode, s8 dst_mode) +{ + *p_cfg |= DMA_CHAN_CFG_SRC_MODE_A31(src_mode) | + DMA_CHAN_CFG_DST_MODE_A31(dst_mode); +} + static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan) { struct sun6i_desc *txd = pchan->desc; @@ -641,13 +648,12 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( burst = convert_burst(8); width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); - v_lli->cfg = DMA_CHAN_CFG_DST_LINEAR_MODE | - DMA_CHAN_CFG_SRC_LINEAR_MODE | - DMA_CHAN_CFG_SRC_WIDTH(width) | + v_lli->cfg = DMA_CHAN_CFG_SRC_WIDTH(width) | DMA_CHAN_CFG_DST_WIDTH(width); sdev->cfg->set_burst_length(&v_lli->cfg, burst, burst); sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, DRQ_SDRAM); + sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, LINEAR_MODE); sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); @@ -699,10 +705,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( if (dir == DMA_MEM_TO_DEV) { v_lli->src = sg_dma_address(sg); v_lli->dst = sconfig->dst_addr; - v_lli->cfg = lli_cfg | - DMA_CHAN_CFG_DST_IO_MODE | - DMA_CHAN_CFG_SRC_LINEAR_MODE; + v_lli->cfg = lli_cfg; sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); + sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); dev_dbg(chan2dev(chan), "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", @@ -713,10 +718,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( } else { v_lli->src = sconfig->src_addr; v_lli->dst = sg_dma_address(sg); - v_lli->cfg = lli_cfg | - DMA_CHAN_CFG_DST_LINEAR_MODE | - DMA_CHAN_CFG_SRC_IO_MODE; + v_lli->cfg = lli_cfg; sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); + sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); dev_dbg(chan2dev(chan), "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", @@ -782,17 +786,15 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic( if (dir == DMA_MEM_TO_DEV) { v_lli->src = buf_addr + period_len * i; v_lli->dst = sconfig->dst_addr; - v_lli->cfg = lli_cfg | - DMA_CHAN_CFG_DST_IO_MODE | - DMA_CHAN_CFG_SRC_LINEAR_MODE; + v_lli->cfg = lli_cfg; sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); + sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); } else { v_lli->src = sconfig->src_addr; v_lli->dst = buf_addr + period_len * i; - v_lli->cfg = lli_cfg | - DMA_CHAN_CFG_DST_LINEAR_MODE | - DMA_CHAN_CFG_SRC_IO_MODE; + v_lli->cfg = lli_cfg; sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); + sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); } prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd); @@ -1058,6 +1060,7 @@ static struct sun6i_dma_config sun6i_a31_dma_cfg = { .nr_max_vchans = 53, .set_burst_length = sun6i_set_burst_length_a31, .set_drq = sun6i_set_drq_a31, + .set_mode = sun6i_set_mode_a31, .src_burst_lengths = BIT(1) | BIT(8), .dst_burst_lengths = BIT(1) | BIT(8), .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | @@ -1080,6 +1083,7 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = { .clock_autogate_enable = sun6i_enable_clock_autogate_a23, .set_burst_length = sun6i_set_burst_length_a31, .set_drq = sun6i_set_drq_a31, + .set_mode = sun6i_set_mode_a31, .src_burst_lengths = BIT(1) | BIT(8), .dst_burst_lengths = BIT(1) | BIT(8), .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | @@ -1097,6 +1101,7 @@ static struct sun6i_dma_config sun8i_a83t_dma_cfg = { .clock_autogate_enable = sun6i_enable_clock_autogate_a23, .set_burst_length = sun6i_set_burst_length_a31, .set_drq = sun6i_set_drq_a31, + .set_mode = sun6i_set_mode_a31, .src_burst_lengths = BIT(1) | BIT(8), .dst_burst_lengths = BIT(1) | BIT(8), .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | @@ -1121,6 +1126,7 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = { .clock_autogate_enable = sun6i_enable_clock_autogate_h3, .set_burst_length = sun6i_set_burst_length_h3, .set_drq = sun6i_set_drq_a31, + .set_mode = sun6i_set_mode_a31, .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | @@ -1141,6 +1147,7 @@ static struct sun6i_dma_config sun50i_a64_dma_cfg = { .clock_autogate_enable = sun6i_enable_clock_autogate_h3, .set_burst_length = sun6i_set_burst_length_h3, .set_drq = sun6i_set_drq_a31, + .set_mode = sun6i_set_mode_a31, .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | @@ -1165,6 +1172,7 @@ static struct sun6i_dma_config sun8i_v3s_dma_cfg = { .clock_autogate_enable = sun6i_enable_clock_autogate_a23, .set_burst_length = sun6i_set_burst_length_a31, .set_drq = sun6i_set_drq_a31, + .set_mode = sun6i_set_mode_a31, .src_burst_lengths = BIT(1) | BIT(8), .dst_burst_lengths = BIT(1) | BIT(8), .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | From 2fe5575f36cacaab860ed9822eb6b2ea7b6a52ba Mon Sep 17 00:00:00 2001 From: Jernej Skrabec Date: Mon, 27 May 2019 22:14:57 +0200 Subject: [PATCH 27/68] dmaengine: sun6i: Add support for H6 DMA MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit H6 DMA has more than 32 supported DRQs, which means that configuration register is slightly rearranged. It also needs additional clock to be enabled. Add support for it. Signed-off-by: Jernej Skrabec Signed-off-by: Clément Péron Acked-by: Maxime Ripard Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index f5cb5e89bf7b..ddef87ebdfdb 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -69,14 +69,19 @@ #define DMA_CHAN_CUR_CFG 0x0c #define DMA_CHAN_MAX_DRQ_A31 0x1f +#define DMA_CHAN_MAX_DRQ_H6 0x3f #define DMA_CHAN_CFG_SRC_DRQ_A31(x) ((x) & DMA_CHAN_MAX_DRQ_A31) +#define DMA_CHAN_CFG_SRC_DRQ_H6(x) ((x) & DMA_CHAN_MAX_DRQ_H6) #define DMA_CHAN_CFG_SRC_MODE_A31(x) (((x) & 0x1) << 5) +#define DMA_CHAN_CFG_SRC_MODE_H6(x) (((x) & 0x1) << 8) #define DMA_CHAN_CFG_SRC_BURST_A31(x) (((x) & 0x3) << 7) #define DMA_CHAN_CFG_SRC_BURST_H3(x) (((x) & 0x3) << 6) #define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9) #define DMA_CHAN_CFG_DST_DRQ_A31(x) (DMA_CHAN_CFG_SRC_DRQ_A31(x) << 16) +#define DMA_CHAN_CFG_DST_DRQ_H6(x) (DMA_CHAN_CFG_SRC_DRQ_H6(x) << 16) #define DMA_CHAN_CFG_DST_MODE_A31(x) (DMA_CHAN_CFG_SRC_MODE_A31(x) << 16) +#define DMA_CHAN_CFG_DST_MODE_H6(x) (DMA_CHAN_CFG_SRC_MODE_H6(x) << 16) #define DMA_CHAN_CFG_DST_BURST_A31(x) (DMA_CHAN_CFG_SRC_BURST_A31(x) << 16) #define DMA_CHAN_CFG_DST_BURST_H3(x) (DMA_CHAN_CFG_SRC_BURST_H3(x) << 16) #define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16) @@ -319,12 +324,24 @@ static void sun6i_set_drq_a31(u32 *p_cfg, s8 src_drq, s8 dst_drq) DMA_CHAN_CFG_DST_DRQ_A31(dst_drq); } +static void sun6i_set_drq_h6(u32 *p_cfg, s8 src_drq, s8 dst_drq) +{ + *p_cfg |= DMA_CHAN_CFG_SRC_DRQ_H6(src_drq) | + DMA_CHAN_CFG_DST_DRQ_H6(dst_drq); +} + static void sun6i_set_mode_a31(u32 *p_cfg, s8 src_mode, s8 dst_mode) { *p_cfg |= DMA_CHAN_CFG_SRC_MODE_A31(src_mode) | DMA_CHAN_CFG_DST_MODE_A31(dst_mode); } +static void sun6i_set_mode_h6(u32 *p_cfg, s8 src_mode, s8 dst_mode) +{ + *p_cfg |= DMA_CHAN_CFG_SRC_MODE_H6(src_mode) | + DMA_CHAN_CFG_DST_MODE_H6(dst_mode); +} + static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan) { struct sun6i_desc *txd = pchan->desc; @@ -1160,6 +1177,28 @@ static struct sun6i_dma_config sun50i_a64_dma_cfg = { BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), }; +/* + * The H6 binding uses the number of dma channels from the + * device tree node. + */ +static struct sun6i_dma_config sun50i_h6_dma_cfg = { + .clock_autogate_enable = sun6i_enable_clock_autogate_h3, + .set_burst_length = sun6i_set_burst_length_h3, + .set_drq = sun6i_set_drq_h6, + .set_mode = sun6i_set_mode_h6, + .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), + .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), + .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), + .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), + .has_mbus_clk = true, +}; + /* * The V3s have only 8 physical channels, a maximum DRQ port id of 23, * and a total of 24 usable source and destination endpoints. @@ -1190,6 +1229,7 @@ static const struct of_device_id sun6i_dma_match[] = { { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, { .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg }, { .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg }, + { .compatible = "allwinner,sun50i-h6-dma", .data = &sun50i_h6_dma_cfg }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sun6i_dma_match); From 9135408c3ace4f7201407b1ef79079c30340743e Mon Sep 17 00:00:00 2001 From: Long Cheng Date: Sat, 27 Apr 2019 11:36:30 +0800 Subject: [PATCH 28/68] dmaengine: mediatek: Add MediaTek UART APDMA support Add 8250 UART APDMA to support MediaTek UART. If MediaTek UART is enabled by SERIAL_8250_MT6577, and we can enable this driver to offload the UART device moving bytes. Signed-off-by: Long Cheng Signed-off-by: Sean Wang Signed-off-by: Vinod Koul --- drivers/dma/mediatek/Kconfig | 11 + drivers/dma/mediatek/Makefile | 1 + drivers/dma/mediatek/mtk-uart-apdma.c | 666 ++++++++++++++++++++++++++ 3 files changed, 678 insertions(+) create mode 100644 drivers/dma/mediatek/mtk-uart-apdma.c diff --git a/drivers/dma/mediatek/Kconfig b/drivers/dma/mediatek/Kconfig index 680fc0572d87..ac49eb6c235e 100644 --- a/drivers/dma/mediatek/Kconfig +++ b/drivers/dma/mediatek/Kconfig @@ -24,3 +24,14 @@ config MTK_CQDMA This controller provides the channels which is dedicated to memory-to-memory transfer to offload from CPU. + +config MTK_UART_APDMA + tristate "MediaTek SoCs APDMA support for UART" + depends on OF && SERIAL_8250_MT6577 + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support for the UART DMA engine found on MediaTek MTK SoCs. + When SERIAL_8250_MT6577 is enabled, and if you want to use DMA, + you can enable the config. The DMA engine can only be used + with MediaTek SoCs. diff --git a/drivers/dma/mediatek/Makefile b/drivers/dma/mediatek/Makefile index 41bb3815f636..61a6d29c8e8c 100644 --- a/drivers/dma/mediatek/Makefile +++ b/drivers/dma/mediatek/Makefile @@ -1,2 +1,3 @@ +obj-$(CONFIG_MTK_UART_APDMA) += mtk-uart-apdma.o obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o obj-$(CONFIG_MTK_CQDMA) += mtk-cqdma.o diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c new file mode 100644 index 000000000000..546995c20876 --- /dev/null +++ b/drivers/dma/mediatek/mtk-uart-apdma.c @@ -0,0 +1,666 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MediaTek UART APDMA driver. + * + * Copyright (c) 2019 MediaTek Inc. + * Author: Long Cheng + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../virt-dma.h" + +/* The default number of virtual channel */ +#define MTK_UART_APDMA_NR_VCHANS 8 + +#define VFF_EN_B BIT(0) +#define VFF_STOP_B BIT(0) +#define VFF_FLUSH_B BIT(0) +#define VFF_4G_EN_B BIT(0) +/* rx valid size >= vff thre */ +#define VFF_RX_INT_EN_B (BIT(0) | BIT(1)) +/* tx left size >= vff thre */ +#define VFF_TX_INT_EN_B BIT(0) +#define VFF_WARM_RST_B BIT(0) +#define VFF_RX_INT_CLR_B (BIT(0) | BIT(1)) +#define VFF_TX_INT_CLR_B 0 +#define VFF_STOP_CLR_B 0 +#define VFF_EN_CLR_B 0 +#define VFF_INT_EN_CLR_B 0 +#define VFF_4G_SUPPORT_CLR_B 0 + +/* + * interrupt trigger level for tx + * if threshold is n, no polling is required to start tx. + * otherwise need polling VFF_FLUSH. + */ +#define VFF_TX_THRE(n) (n) +/* interrupt trigger level for rx */ +#define VFF_RX_THRE(n) ((n) * 3 / 4) + +#define VFF_RING_SIZE 0xffff +/* invert this bit when wrap ring head again */ +#define VFF_RING_WRAP 0x10000 + +#define VFF_INT_FLAG 0x00 +#define VFF_INT_EN 0x04 +#define VFF_EN 0x08 +#define VFF_RST 0x0c +#define VFF_STOP 0x10 +#define VFF_FLUSH 0x14 +#define VFF_ADDR 0x1c +#define VFF_LEN 0x24 +#define VFF_THRE 0x28 +#define VFF_WPT 0x2c +#define VFF_RPT 0x30 +/* TX: the buffer size HW can read. RX: the buffer size SW can read. */ +#define VFF_VALID_SIZE 0x3c +/* TX: the buffer size SW can write. RX: the buffer size HW can write. */ +#define VFF_LEFT_SIZE 0x40 +#define VFF_DEBUG_STATUS 0x50 +#define VFF_4G_SUPPORT 0x54 + +struct mtk_uart_apdmadev { + struct dma_device ddev; + struct clk *clk; + bool support_33bits; + unsigned int dma_requests; +}; + +struct mtk_uart_apdma_desc { + struct virt_dma_desc vd; + + dma_addr_t addr; + unsigned int avail_len; +}; + +struct mtk_chan { + struct virt_dma_chan vc; + struct dma_slave_config cfg; + struct mtk_uart_apdma_desc *desc; + enum dma_transfer_direction dir; + + void __iomem *base; + unsigned int irq; + + unsigned int rx_status; +}; + +static inline struct mtk_uart_apdmadev * +to_mtk_uart_apdma_dev(struct dma_device *d) +{ + return container_of(d, struct mtk_uart_apdmadev, ddev); +} + +static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c) +{ + return container_of(c, struct mtk_chan, vc.chan); +} + +static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc + (struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct mtk_uart_apdma_desc, vd.tx); +} + +static void mtk_uart_apdma_write(struct mtk_chan *c, + unsigned int reg, unsigned int val) +{ + writel(val, c->base + reg); +} + +static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg) +{ + return readl(c->base + reg); +} + +static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd) +{ + struct dma_chan *chan = vd->tx.chan; + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + + kfree(c->desc); +} + +static void mtk_uart_apdma_start_tx(struct mtk_chan *c) +{ + struct mtk_uart_apdmadev *mtkd = + to_mtk_uart_apdma_dev(c->vc.chan.device); + struct mtk_uart_apdma_desc *d = c->desc; + unsigned int wpt, vff_sz; + + vff_sz = c->cfg.dst_port_window_size; + if (!mtk_uart_apdma_read(c, VFF_LEN)) { + mtk_uart_apdma_write(c, VFF_ADDR, d->addr); + mtk_uart_apdma_write(c, VFF_LEN, vff_sz); + mtk_uart_apdma_write(c, VFF_THRE, VFF_TX_THRE(vff_sz)); + mtk_uart_apdma_write(c, VFF_WPT, 0); + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); + + if (mtkd->support_33bits) + mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B); + } + + mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B); + if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B) + dev_err(c->vc.chan.device->dev, "Enable TX fail\n"); + + if (!mtk_uart_apdma_read(c, VFF_LEFT_SIZE)) { + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B); + return; + } + + wpt = mtk_uart_apdma_read(c, VFF_WPT); + + wpt += c->desc->avail_len; + if ((wpt & VFF_RING_SIZE) == vff_sz) + wpt = (wpt & VFF_RING_WRAP) ^ VFF_RING_WRAP; + + /* Let DMA start moving data */ + mtk_uart_apdma_write(c, VFF_WPT, wpt); + + /* HW auto set to 0 when left size >= threshold */ + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B); + if (!mtk_uart_apdma_read(c, VFF_FLUSH)) + mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B); +} + +static void mtk_uart_apdma_start_rx(struct mtk_chan *c) +{ + struct mtk_uart_apdmadev *mtkd = + to_mtk_uart_apdma_dev(c->vc.chan.device); + struct mtk_uart_apdma_desc *d = c->desc; + unsigned int vff_sz; + + vff_sz = c->cfg.src_port_window_size; + if (!mtk_uart_apdma_read(c, VFF_LEN)) { + mtk_uart_apdma_write(c, VFF_ADDR, d->addr); + mtk_uart_apdma_write(c, VFF_LEN, vff_sz); + mtk_uart_apdma_write(c, VFF_THRE, VFF_RX_THRE(vff_sz)); + mtk_uart_apdma_write(c, VFF_RPT, 0); + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); + + if (mtkd->support_33bits) + mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B); + } + + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_RX_INT_EN_B); + mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B); + if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B) + dev_err(c->vc.chan.device->dev, "Enable RX fail\n"); +} + +static void mtk_uart_apdma_tx_handler(struct mtk_chan *c) +{ + struct mtk_uart_apdma_desc *d = c->desc; + + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); + mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); + + list_del(&d->vd.node); + vchan_cookie_complete(&d->vd); +} + +static void mtk_uart_apdma_rx_handler(struct mtk_chan *c) +{ + struct mtk_uart_apdma_desc *d = c->desc; + unsigned int len, wg, rg; + int cnt; + + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); + + if (!mtk_uart_apdma_read(c, VFF_VALID_SIZE)) + return; + + mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); + + len = c->cfg.src_port_window_size; + rg = mtk_uart_apdma_read(c, VFF_RPT); + wg = mtk_uart_apdma_read(c, VFF_WPT); + cnt = (wg & VFF_RING_SIZE) - (rg & VFF_RING_SIZE); + + /* + * The buffer is ring buffer. If wrap bit different, + * represents the start of the next cycle for WPT + */ + if ((rg ^ wg) & VFF_RING_WRAP) + cnt += len; + + c->rx_status = d->avail_len - cnt; + mtk_uart_apdma_write(c, VFF_RPT, wg); + + list_del(&d->vd.node); + vchan_cookie_complete(&d->vd); +} + +static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id) +{ + struct dma_chan *chan = (struct dma_chan *)dev_id; + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + if (c->dir == DMA_DEV_TO_MEM) + mtk_uart_apdma_rx_handler(c); + else if (c->dir == DMA_MEM_TO_DEV) + mtk_uart_apdma_tx_handler(c); + spin_unlock_irqrestore(&c->vc.lock, flags); + + return IRQ_HANDLED; +} + +static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan) +{ + struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device); + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + unsigned int status; + int ret; + + ret = pm_runtime_get_sync(mtkd->ddev.dev); + if (ret < 0) { + pm_runtime_put_noidle(chan->device->dev); + return ret; + } + + mtk_uart_apdma_write(c, VFF_ADDR, 0); + mtk_uart_apdma_write(c, VFF_THRE, 0); + mtk_uart_apdma_write(c, VFF_LEN, 0); + mtk_uart_apdma_write(c, VFF_RST, VFF_WARM_RST_B); + + ret = readx_poll_timeout(readl, c->base + VFF_EN, + status, !status, 10, 100); + if (ret) + return ret; + + ret = request_irq(c->irq, mtk_uart_apdma_irq_handler, + IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan); + if (ret < 0) { + dev_err(chan->device->dev, "Can't request dma IRQ\n"); + return -EINVAL; + } + + if (mtkd->support_33bits) + mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B); + + return ret; +} + +static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan) +{ + struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device); + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + + free_irq(c->irq, chan); + + tasklet_kill(&c->vc.task); + + vchan_free_chan_resources(&c->vc); + + pm_runtime_put_sync(mtkd->ddev.dev); +} + +static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + enum dma_status ret; + + ret = dma_cookie_status(chan, cookie, txstate); + if (!txstate) + return ret; + + dma_set_residue(txstate, c->rx_status); + + return ret; +} + +/* + * dmaengine_prep_slave_single will call the function. and sglen is 1. + * 8250 uart using one ring buffer, and deal with one sg. + */ +static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg + (struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sglen, enum dma_transfer_direction dir, + unsigned long tx_flags, void *context) +{ + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + struct mtk_uart_apdma_desc *d; + + if (!is_slave_direction(dir) || sglen != 1) + return NULL; + + /* Now allocate and setup the descriptor */ + d = kzalloc(sizeof(*d), GFP_ATOMIC); + if (!d) + return NULL; + + d->avail_len = sg_dma_len(sgl); + d->addr = sg_dma_address(sgl); + c->dir = dir; + + return vchan_tx_prep(&c->vc, &d->vd, tx_flags); +} + +static void mtk_uart_apdma_issue_pending(struct dma_chan *chan) +{ + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + struct virt_dma_desc *vd; + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + if (vchan_issue_pending(&c->vc)) { + vd = vchan_next_desc(&c->vc); + c->desc = to_mtk_uart_apdma_desc(&vd->tx); + + if (c->dir == DMA_DEV_TO_MEM) + mtk_uart_apdma_start_rx(c); + else if (c->dir == DMA_MEM_TO_DEV) + mtk_uart_apdma_start_tx(c); + } + + spin_unlock_irqrestore(&c->vc.lock, flags); +} + +static int mtk_uart_apdma_slave_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + + memcpy(&c->cfg, config, sizeof(*config)); + + return 0; +} + +static int mtk_uart_apdma_terminate_all(struct dma_chan *chan) +{ + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + unsigned long flags; + unsigned int status; + LIST_HEAD(head); + int ret; + + mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B); + + ret = readx_poll_timeout(readl, c->base + VFF_FLUSH, + status, status != VFF_FLUSH_B, 10, 100); + if (ret) + dev_err(c->vc.chan.device->dev, "flush: fail, status=0x%x\n", + mtk_uart_apdma_read(c, VFF_DEBUG_STATUS)); + + /* + * Stop need 3 steps. + * 1. set stop to 1 + * 2. wait en to 0 + * 3. set stop as 0 + */ + mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_B); + ret = readx_poll_timeout(readl, c->base + VFF_EN, + status, !status, 10, 100); + if (ret) + dev_err(c->vc.chan.device->dev, "stop: fail, status=0x%x\n", + mtk_uart_apdma_read(c, VFF_DEBUG_STATUS)); + + mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_CLR_B); + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); + + if (c->dir == DMA_DEV_TO_MEM) + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); + else if (c->dir == DMA_MEM_TO_DEV) + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); + + synchronize_irq(c->irq); + + spin_lock_irqsave(&c->vc.lock, flags); + vchan_get_all_descriptors(&c->vc, &head); + vchan_dma_desc_free_list(&c->vc, &head); + spin_unlock_irqrestore(&c->vc.lock, flags); + + return 0; +} + +static int mtk_uart_apdma_device_pause(struct dma_chan *chan) +{ + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + + mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); + + synchronize_irq(c->irq); + + spin_unlock_irqrestore(&c->vc.lock, flags); + + return 0; +} + +static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd) +{ + while (!list_empty(&mtkd->ddev.channels)) { + struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels, + struct mtk_chan, vc.chan.device_node); + + list_del(&c->vc.chan.device_node); + tasklet_kill(&c->vc.task); + } +} + +static const struct of_device_id mtk_uart_apdma_match[] = { + { .compatible = "mediatek,mt6577-uart-dma", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match); + +static int mtk_uart_apdma_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct mtk_uart_apdmadev *mtkd; + int bit_mask = 32, rc; + struct resource *res; + struct mtk_chan *c; + unsigned int i; + + mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL); + if (!mtkd) + return -ENOMEM; + + mtkd->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(mtkd->clk)) { + dev_err(&pdev->dev, "No clock specified\n"); + rc = PTR_ERR(mtkd->clk); + return rc; + } + + if (of_property_read_bool(np, "mediatek,dma-33bits")) + mtkd->support_33bits = true; + + if (mtkd->support_33bits) + bit_mask = 33; + + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(bit_mask)); + if (rc) + return rc; + + dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask); + mtkd->ddev.device_alloc_chan_resources = + mtk_uart_apdma_alloc_chan_resources; + mtkd->ddev.device_free_chan_resources = + mtk_uart_apdma_free_chan_resources; + mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status; + mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending; + mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg; + mtkd->ddev.device_config = mtk_uart_apdma_slave_config; + mtkd->ddev.device_pause = mtk_uart_apdma_device_pause; + mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all; + mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE); + mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE); + mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + mtkd->ddev.dev = &pdev->dev; + INIT_LIST_HEAD(&mtkd->ddev.channels); + + mtkd->dma_requests = MTK_UART_APDMA_NR_VCHANS; + if (of_property_read_u32(np, "dma-requests", &mtkd->dma_requests)) { + dev_info(&pdev->dev, + "Using %u as missing dma-requests property\n", + MTK_UART_APDMA_NR_VCHANS); + } + + for (i = 0; i < mtkd->dma_requests; i++) { + c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL); + if (!c) { + rc = -ENODEV; + goto err_no_dma; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + if (!res) { + rc = -ENODEV; + goto err_no_dma; + } + + c->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(c->base)) { + rc = PTR_ERR(c->base); + goto err_no_dma; + } + c->vc.desc_free = mtk_uart_apdma_desc_free; + vchan_init(&c->vc, &mtkd->ddev); + + rc = platform_get_irq(pdev, i); + if (rc < 0) { + dev_err(&pdev->dev, "failed to get IRQ[%d]\n", i); + goto err_no_dma; + } + c->irq = rc; + } + + pm_runtime_enable(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + + rc = dma_async_device_register(&mtkd->ddev); + if (rc) + goto rpm_disable; + + platform_set_drvdata(pdev, mtkd); + + /* Device-tree DMA controller registration */ + rc = of_dma_controller_register(np, of_dma_xlate_by_chan_id, mtkd); + if (rc) + goto dma_remove; + + return rc; + +dma_remove: + dma_async_device_unregister(&mtkd->ddev); +rpm_disable: + pm_runtime_disable(&pdev->dev); +err_no_dma: + mtk_uart_apdma_free(mtkd); + return rc; +} + +static int mtk_uart_apdma_remove(struct platform_device *pdev) +{ + struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev); + + of_dma_controller_free(pdev->dev.of_node); + + mtk_uart_apdma_free(mtkd); + + dma_async_device_unregister(&mtkd->ddev); + + pm_runtime_disable(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int mtk_uart_apdma_suspend(struct device *dev) +{ + struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); + + if (!pm_runtime_suspended(dev)) + clk_disable_unprepare(mtkd->clk); + + return 0; +} + +static int mtk_uart_apdma_resume(struct device *dev) +{ + int ret; + struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); + + if (!pm_runtime_suspended(dev)) { + ret = clk_prepare_enable(mtkd->clk); + if (ret) + return ret; + } + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +static int mtk_uart_apdma_runtime_suspend(struct device *dev) +{ + struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); + + clk_disable_unprepare(mtkd->clk); + + return 0; +} + +static int mtk_uart_apdma_runtime_resume(struct device *dev) +{ + int ret; + struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); + + ret = clk_prepare_enable(mtkd->clk); + if (ret) + return ret; + + return 0; +} +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops mtk_uart_apdma_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume) + SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend, + mtk_uart_apdma_runtime_resume, NULL) +}; + +static struct platform_driver mtk_uart_apdma_driver = { + .probe = mtk_uart_apdma_probe, + .remove = mtk_uart_apdma_remove, + .driver = { + .name = KBUILD_MODNAME, + .pm = &mtk_uart_apdma_pm_ops, + .of_match_table = of_match_ptr(mtk_uart_apdma_match), + }, +}; + +module_platform_driver(mtk_uart_apdma_driver); + +MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver"); +MODULE_AUTHOR("Long Cheng "); +MODULE_LICENSE("GPL v2"); From fb6dda8349ea0a6a68a743b6cb636261f4489983 Mon Sep 17 00:00:00 2001 From: Long Cheng Date: Sat, 27 Apr 2019 11:36:32 +0800 Subject: [PATCH 29/68] dt-bindings: dma: uart: rename binding The filename matches mtk-uart-apdma.c. So using "mtk-uart-apdma.txt" should be better. And add some property. Signed-off-by: Long Cheng Reviewed-by: Rob Herring Signed-off-by: Vinod Koul --- .../devicetree/bindings/dma/8250_mtk_dma.txt | 33 ------------ .../bindings/dma/mtk-uart-apdma.txt | 54 +++++++++++++++++++ 2 files changed, 54 insertions(+), 33 deletions(-) delete mode 100644 Documentation/devicetree/bindings/dma/8250_mtk_dma.txt create mode 100644 Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt diff --git a/Documentation/devicetree/bindings/dma/8250_mtk_dma.txt b/Documentation/devicetree/bindings/dma/8250_mtk_dma.txt deleted file mode 100644 index 3fe0961bcf64..000000000000 --- a/Documentation/devicetree/bindings/dma/8250_mtk_dma.txt +++ /dev/null @@ -1,33 +0,0 @@ -* Mediatek UART APDMA Controller - -Required properties: -- compatible should contain: - * "mediatek,mt2712-uart-dma" for MT2712 compatible APDMA - * "mediatek,mt6577-uart-dma" for MT6577 and all of the above - -- reg: The base address of the APDMA register bank. - -- interrupts: A single interrupt specifier. - -- clocks : Must contain an entry for each entry in clock-names. - See ../clocks/clock-bindings.txt for details. -- clock-names: The APDMA clock for register accesses - -Examples: - - apdma: dma-controller@11000380 { - compatible = "mediatek,mt2712-uart-dma"; - reg = <0 0x11000380 0 0x400>; - interrupts = , - , - , - , - , - , - , - ; - clocks = <&pericfg CLK_PERI_AP_DMA>; - clock-names = "apdma"; - #dma-cells = <1>; - }; - diff --git a/Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt b/Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt new file mode 100644 index 000000000000..5d6f98c43e3d --- /dev/null +++ b/Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt @@ -0,0 +1,54 @@ +* Mediatek UART APDMA Controller + +Required properties: +- compatible should contain: + * "mediatek,mt2712-uart-dma" for MT2712 compatible APDMA + * "mediatek,mt6577-uart-dma" for MT6577 and all of the above + +- reg: The base address of the APDMA register bank. + +- interrupts: A single interrupt specifier. + One interrupt per dma-requests, or 8 if no dma-requests property is present + +- dma-requests: The number of DMA channels + +- clocks : Must contain an entry for each entry in clock-names. + See ../clocks/clock-bindings.txt for details. +- clock-names: The APDMA clock for register accesses + +- mediatek,dma-33bits: Present if the DMA requires support + +Examples: + + apdma: dma-controller@11000400 { + compatible = "mediatek,mt2712-uart-dma"; + reg = <0 0x11000400 0 0x80>, + <0 0x11000480 0 0x80>, + <0 0x11000500 0 0x80>, + <0 0x11000580 0 0x80>, + <0 0x11000600 0 0x80>, + <0 0x11000680 0 0x80>, + <0 0x11000700 0 0x80>, + <0 0x11000780 0 0x80>, + <0 0x11000800 0 0x80>, + <0 0x11000880 0 0x80>, + <0 0x11000900 0 0x80>, + <0 0x11000980 0 0x80>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + ; + dma-requests = <12>; + clocks = <&pericfg CLK_PERI_AP_DMA>; + clock-names = "apdma"; + mediatek,dma-33bits; + #dma-cells = <1>; + }; From e63d79d1ffcd2201a2dbff1d7a1184b8f3ec74cf Mon Sep 17 00:00:00 2001 From: Gustavo Pimentel Date: Tue, 4 Jun 2019 15:29:22 +0200 Subject: [PATCH 30/68] dmaengine: Add Synopsys eDMA IP core driver Add Synopsys PCIe Endpoint eDMA IP core driver to kernel. This IP is generally distributed with Synopsys PCIe Endpoint IP (depends of the use and licensing agreement). This core driver, initializes and configures the eDMA IP using vma-helpers functions and dma-engine subsystem. This driver can be compile as built-in or external module in kernel. To enable this driver just select DW_EDMA option in kernel configuration, however it requires and selects automatically DMA_ENGINE and DMA_VIRTUAL_CHANNELS option too. In order to transfer data from point A to B as fast as possible this IP requires a dedicated memory space containing linked list of elements. All elements of this linked list are continuous and each one describes a data transfer (source and destination addresses, length and a control variable). For the sake of simplicity, lets assume a memory space for channel write 0 which allows about 42 elements. +---------+ | Desc #0 |-+ +---------+ | V +----------+ | Chunk #0 |-+ | CB = 1 | | +----------+ +-----+ +-----------+ +-----+ +----------+ +->| Burst #0 |->| ... |->| Burst #41 |->| llp | | +----------+ +-----+ +-----------+ +-----+ V +----------+ | Chunk #1 |-+ | CB = 0 | | +-----------+ +-----+ +-----------+ +-----+ +----------+ +->| Burst #42 |->| ... |->| Burst #83 |->| llp | | +-----------+ +-----+ +-----------+ +-----+ V +----------+ | Chunk #2 |-+ | CB = 1 | | +-----------+ +-----+ +------------+ +-----+ +----------+ +->| Burst #84 |->| ... |->| Burst #125 |->| llp | | +-----------+ +-----+ +------------+ +-----+ V +----------+ | Chunk #3 |-+ | CB = 0 | | +------------+ +-----+ +------------+ +-----+ +----------+ +->| Burst #126 |->| ... |->| Burst #129 |->| llp | +------------+ +-----+ +------------+ +-----+ Legend: - Linked list, also know as Chunk - Linked list element*, also know as Burst *CB*, also know as Change Bit, it's a control bit (and typically is toggled) that allows to easily identify and differentiate between the current linked list and the previous or the next one. - LLP, is a special element that indicates the end of the linked list element stream also informs that the next CB should be toggle On every last Burst of the Chunk (Burst #41, Burst #83, Burst #125 or even Burst #129) is set some flags on their control variable (RIE and LIE bits) that will trigger the send of "done" interruption. On the interruptions callback, is decided whether to recycle the linked list memory space by writing a new set of Bursts elements (if still exists Chunks to transfer) or is considered completed (if there is no Chunks available to transfer). On scatter-gather transfer mode, the client will submit a scatter-gather list of n (on this case 130) elements, that will be divide in multiple Chunks, each Chunk will have (on this case 42) a limited number of Bursts and after transferring all Bursts, an interrupt will be triggered, which will allow to recycle the all linked list dedicated memory again with the new information relative to the next Chunk and respective Burst associated and repeat the whole cycle again. On cyclic transfer mode, the client will submit a buffer pointer, length of it and number of repetitions, in this case each burst will correspond directly to each repetition. Each Burst can describes a data transfer from point A(source) to point B(destination) with a length that can be from 1 byte up to 4 GB. Since dedicated the memory space where the linked list will reside is limited, the whole n burst elements will be organized in several Chunks, that will be used later to recycle the dedicated memory space to initiate a new sequence of data transfers. The whole transfer is considered has completed when it was transferred all bursts. Currently this IP has a set well-known register map, which includes support for legacy and unroll modes. Legacy mode is version of this register map that has multiplexer register that allows to switch registers between all write and read channels and the unroll modes repeats all write and read channels registers with an offset between them. This register map is called v0. The IP team is creating a new register map more suitable to the latest PCIe features, that very likely will change the map register, which this version will be called v1. As soon as this new version is released by the IP team the support for this version in be included on this driver. According to the logic, patches 1, 2 and 3 should be squashed into 1 unique patch, but for the sake of simplicity of review, it was divided in this 3 patches files. Signed-off-by: Gustavo Pimentel Cc: Vinod Koul Cc: Dan Williams Cc: Andy Shevchenko Cc: Russell King Cc: Joao Pinto Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 + drivers/dma/Makefile | 1 + drivers/dma/dw-edma/Kconfig | 9 + drivers/dma/dw-edma/Makefile | 4 + drivers/dma/dw-edma/dw-edma-core.c | 936 +++++++++++++++++++++++++++++ drivers/dma/dw-edma/dw-edma-core.h | 165 +++++ include/linux/dma/edma.h | 47 ++ 7 files changed, 1164 insertions(+) create mode 100644 drivers/dma/dw-edma/Kconfig create mode 100644 drivers/dma/dw-edma/Makefile create mode 100644 drivers/dma/dw-edma/dw-edma-core.c create mode 100644 drivers/dma/dw-edma/dw-edma-core.h create mode 100644 include/linux/dma/edma.h diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index eaf78f4e07ce..76859aa2688c 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -665,6 +665,8 @@ source "drivers/dma/qcom/Kconfig" source "drivers/dma/dw/Kconfig" +source "drivers/dma/dw-edma/Kconfig" + source "drivers/dma/hsu/Kconfig" source "drivers/dma/sh/Kconfig" diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 6126e1c3a875..5bddf6f8790f 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/ obj-$(CONFIG_DW_DMAC_CORE) += dw/ +obj-$(CONFIG_DW_EDMA) += dw-edma/ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o diff --git a/drivers/dma/dw-edma/Kconfig b/drivers/dma/dw-edma/Kconfig new file mode 100644 index 000000000000..3016bed63589 --- /dev/null +++ b/drivers/dma/dw-edma/Kconfig @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 + +config DW_EDMA + tristate "Synopsys DesignWare eDMA controller driver" + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support the Synopsys DesignWare eDMA controller, normally + implemented on endpoints SoCs. diff --git a/drivers/dma/dw-edma/Makefile b/drivers/dma/dw-edma/Makefile new file mode 100644 index 000000000000..322401089891 --- /dev/null +++ b/drivers/dma/dw-edma/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_DW_EDMA) += dw-edma.o +dw-edma-objs := dw-edma-core.o diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c new file mode 100644 index 000000000000..c9d032f49dc3 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -0,0 +1,936 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA core driver + * + * Author: Gustavo Pimentel + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dw-edma-core.h" +#include "../dmaengine.h" +#include "../virt-dma.h" + +static inline +struct device *dchan2dev(struct dma_chan *dchan) +{ + return &dchan->dev->device; +} + +static inline +struct device *chan2dev(struct dw_edma_chan *chan) +{ + return &chan->vc.chan.dev->device; +} + +static inline +struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct dw_edma_desc, vd); +} + +static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk) +{ + struct dw_edma_burst *burst; + + burst = kzalloc(sizeof(*burst), GFP_NOWAIT); + if (unlikely(!burst)) + return NULL; + + INIT_LIST_HEAD(&burst->list); + if (chunk->burst) { + /* Create and add new element into the linked list */ + chunk->bursts_alloc++; + list_add_tail(&burst->list, &chunk->burst->list); + } else { + /* List head */ + chunk->bursts_alloc = 0; + chunk->burst = burst; + } + + return burst; +} + +static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc) +{ + struct dw_edma_chan *chan = desc->chan; + struct dw_edma *dw = chan->chip->dw; + struct dw_edma_chunk *chunk; + + chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); + if (unlikely(!chunk)) + return NULL; + + INIT_LIST_HEAD(&chunk->list); + chunk->chan = chan; + /* Toggling change bit (CB) in each chunk, this is a mechanism to + * inform the eDMA HW block that this is a new linked list ready + * to be consumed. + * - Odd chunks originate CB equal to 0 + * - Even chunks originate CB equal to 1 + */ + chunk->cb = !(desc->chunks_alloc % 2); + chunk->ll_region.paddr = dw->ll_region.paddr + chan->ll_off; + chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off; + + if (desc->chunk) { + /* Create and add new element into the linked list */ + desc->chunks_alloc++; + list_add_tail(&chunk->list, &desc->chunk->list); + if (!dw_edma_alloc_burst(chunk)) { + kfree(chunk); + return NULL; + } + } else { + /* List head */ + chunk->burst = NULL; + desc->chunks_alloc = 0; + desc->chunk = chunk; + } + + return chunk; +} + +static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan) +{ + struct dw_edma_desc *desc; + + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); + if (unlikely(!desc)) + return NULL; + + desc->chan = chan; + if (!dw_edma_alloc_chunk(desc)) { + kfree(desc); + return NULL; + } + + return desc; +} + +static void dw_edma_free_burst(struct dw_edma_chunk *chunk) +{ + struct dw_edma_burst *child, *_next; + + /* Remove all the list elements */ + list_for_each_entry_safe(child, _next, &chunk->burst->list, list) { + list_del(&child->list); + kfree(child); + chunk->bursts_alloc--; + } + + /* Remove the list head */ + kfree(child); + chunk->burst = NULL; +} + +static void dw_edma_free_chunk(struct dw_edma_desc *desc) +{ + struct dw_edma_chunk *child, *_next; + + if (!desc->chunk) + return; + + /* Remove all the list elements */ + list_for_each_entry_safe(child, _next, &desc->chunk->list, list) { + dw_edma_free_burst(child); + list_del(&child->list); + kfree(child); + desc->chunks_alloc--; + } + + /* Remove the list head */ + kfree(child); + desc->chunk = NULL; +} + +static void dw_edma_free_desc(struct dw_edma_desc *desc) +{ + dw_edma_free_chunk(desc); + kfree(desc); +} + +static void vchan_free_desc(struct virt_dma_desc *vdesc) +{ + dw_edma_free_desc(vd2dw_edma_desc(vdesc)); +} + +static void dw_edma_start_transfer(struct dw_edma_chan *chan) +{ + struct dw_edma_chunk *child; + struct dw_edma_desc *desc; + struct virt_dma_desc *vd; + + vd = vchan_next_desc(&chan->vc); + if (!vd) + return; + + desc = vd2dw_edma_desc(vd); + if (!desc) + return; + + child = list_first_entry_or_null(&desc->chunk->list, + struct dw_edma_chunk, list); + if (!child) + return; + + dw_edma_v0_core_start(child, !desc->xfer_sz); + desc->xfer_sz += child->ll_region.sz; + dw_edma_free_burst(child); + list_del(&child->list); + kfree(child); + desc->chunks_alloc--; +} + +static int dw_edma_device_config(struct dma_chan *dchan, + struct dma_slave_config *config) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + + memcpy(&chan->config, config, sizeof(*config)); + chan->configured = true; + + return 0; +} + +static int dw_edma_device_pause(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + int err = 0; + + if (!chan->configured) + err = -EPERM; + else if (chan->status != EDMA_ST_BUSY) + err = -EPERM; + else if (chan->request != EDMA_REQ_NONE) + err = -EPERM; + else + chan->request = EDMA_REQ_PAUSE; + + return err; +} + +static int dw_edma_device_resume(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + int err = 0; + + if (!chan->configured) { + err = -EPERM; + } else if (chan->status != EDMA_ST_PAUSE) { + err = -EPERM; + } else if (chan->request != EDMA_REQ_NONE) { + err = -EPERM; + } else { + chan->status = EDMA_ST_BUSY; + dw_edma_start_transfer(chan); + } + + return err; +} + +static int dw_edma_device_terminate_all(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + int err = 0; + LIST_HEAD(head); + + if (!chan->configured) { + /* Do nothing */ + } else if (chan->status == EDMA_ST_PAUSE) { + chan->status = EDMA_ST_IDLE; + chan->configured = false; + } else if (chan->status == EDMA_ST_IDLE) { + chan->configured = false; + } else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) { + /* + * The channel is in a false BUSY state, probably didn't + * receive or lost an interrupt + */ + chan->status = EDMA_ST_IDLE; + chan->configured = false; + } else if (chan->request > EDMA_REQ_PAUSE) { + err = -EPERM; + } else { + chan->request = EDMA_REQ_STOP; + } + + return err; +} + +static void dw_edma_device_issue_pending(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + unsigned long flags; + + spin_lock_irqsave(&chan->vc.lock, flags); + if (chan->configured && chan->request == EDMA_REQ_NONE && + chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) { + chan->status = EDMA_ST_BUSY; + dw_edma_start_transfer(chan); + } + spin_unlock_irqrestore(&chan->vc.lock, flags); +} + +static enum dma_status +dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + struct dw_edma_desc *desc; + struct virt_dma_desc *vd; + unsigned long flags; + enum dma_status ret; + u32 residue = 0; + + ret = dma_cookie_status(dchan, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE) + ret = DMA_PAUSED; + + if (!txstate) + goto ret_residue; + + spin_lock_irqsave(&chan->vc.lock, flags); + vd = vchan_find_desc(&chan->vc, cookie); + if (vd) { + desc = vd2dw_edma_desc(vd); + if (desc) + residue = desc->alloc_sz - desc->xfer_sz; + } + spin_unlock_irqrestore(&chan->vc.lock, flags); + +ret_residue: + dma_set_residue(txstate, residue); + + return ret; +} + +static struct dma_async_tx_descriptor * +dw_edma_device_transfer(struct dw_edma_transfer *xfer) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan); + enum dma_transfer_direction direction = xfer->direction; + phys_addr_t src_addr, dst_addr; + struct scatterlist *sg = NULL; + struct dw_edma_chunk *chunk; + struct dw_edma_burst *burst; + struct dw_edma_desc *desc; + u32 cnt; + int i; + + if ((direction == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) || + (direction == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)) + return NULL; + + if (xfer->cyclic) { + if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt) + return NULL; + } else { + if (xfer->xfer.sg.len < 1) + return NULL; + } + + if (!chan->configured) + return NULL; + + desc = dw_edma_alloc_desc(chan); + if (unlikely(!desc)) + goto err_alloc; + + chunk = dw_edma_alloc_chunk(desc); + if (unlikely(!chunk)) + goto err_alloc; + + src_addr = chan->config.src_addr; + dst_addr = chan->config.dst_addr; + + if (xfer->cyclic) { + cnt = xfer->xfer.cyclic.cnt; + } else { + cnt = xfer->xfer.sg.len; + sg = xfer->xfer.sg.sgl; + } + + for (i = 0; i < cnt; i++) { + if (!xfer->cyclic && !sg) + break; + + if (chunk->bursts_alloc == chan->ll_max) { + chunk = dw_edma_alloc_chunk(desc); + if (unlikely(!chunk)) + goto err_alloc; + } + + burst = dw_edma_alloc_burst(chunk); + if (unlikely(!burst)) + goto err_alloc; + + if (xfer->cyclic) + burst->sz = xfer->xfer.cyclic.len; + else + burst->sz = sg_dma_len(sg); + + chunk->ll_region.sz += burst->sz; + desc->alloc_sz += burst->sz; + + if (direction == DMA_DEV_TO_MEM) { + burst->sar = src_addr; + if (xfer->cyclic) { + burst->dar = xfer->xfer.cyclic.paddr; + } else { + burst->dar = sg_dma_address(sg); + /* Unlike the typical assumption by other + * drivers/IPs the peripheral memory isn't + * a FIFO memory, in this case, it's a + * linear memory and that why the source + * and destination addresses are increased + * by the same portion (data length) + */ + src_addr += sg_dma_len(sg); + } + } else { + burst->dar = dst_addr; + if (xfer->cyclic) { + burst->sar = xfer->xfer.cyclic.paddr; + } else { + burst->sar = sg_dma_address(sg); + /* Unlike the typical assumption by other + * drivers/IPs the peripheral memory isn't + * a FIFO memory, in this case, it's a + * linear memory and that why the source + * and destination addresses are increased + * by the same portion (data length) + */ + dst_addr += sg_dma_len(sg); + } + } + + if (!xfer->cyclic) + sg = sg_next(sg); + } + + return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags); + +err_alloc: + if (desc) + dw_edma_free_desc(desc); + + return NULL; +} + +static struct dma_async_tx_descriptor * +dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, + unsigned int len, + enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct dw_edma_transfer xfer; + + xfer.dchan = dchan; + xfer.direction = direction; + xfer.xfer.sg.sgl = sgl; + xfer.xfer.sg.len = len; + xfer.flags = flags; + xfer.cyclic = false; + + return dw_edma_device_transfer(&xfer); +} + +static struct dma_async_tx_descriptor * +dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr, + size_t len, size_t count, + enum dma_transfer_direction direction, + unsigned long flags) +{ + struct dw_edma_transfer xfer; + + xfer.dchan = dchan; + xfer.direction = direction; + xfer.xfer.cyclic.paddr = paddr; + xfer.xfer.cyclic.len = len; + xfer.xfer.cyclic.cnt = count; + xfer.flags = flags; + xfer.cyclic = true; + + return dw_edma_device_transfer(&xfer); +} + +static void dw_edma_done_interrupt(struct dw_edma_chan *chan) +{ + struct dw_edma_desc *desc; + struct virt_dma_desc *vd; + unsigned long flags; + + dw_edma_v0_core_clear_done_int(chan); + + spin_lock_irqsave(&chan->vc.lock, flags); + vd = vchan_next_desc(&chan->vc); + if (vd) { + switch (chan->request) { + case EDMA_REQ_NONE: + desc = vd2dw_edma_desc(vd); + if (desc->chunks_alloc) { + chan->status = EDMA_ST_BUSY; + dw_edma_start_transfer(chan); + } else { + list_del(&vd->node); + vchan_cookie_complete(vd); + chan->status = EDMA_ST_IDLE; + } + break; + + case EDMA_REQ_STOP: + list_del(&vd->node); + vchan_cookie_complete(vd); + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_IDLE; + break; + + case EDMA_REQ_PAUSE: + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_PAUSE; + break; + + default: + break; + } + } + spin_unlock_irqrestore(&chan->vc.lock, flags); +} + +static void dw_edma_abort_interrupt(struct dw_edma_chan *chan) +{ + struct virt_dma_desc *vd; + unsigned long flags; + + dw_edma_v0_core_clear_abort_int(chan); + + spin_lock_irqsave(&chan->vc.lock, flags); + vd = vchan_next_desc(&chan->vc); + if (vd) { + list_del(&vd->node); + vchan_cookie_complete(vd); + } + spin_unlock_irqrestore(&chan->vc.lock, flags); + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_IDLE; +} + +static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write) +{ + struct dw_edma_irq *dw_irq = data; + struct dw_edma *dw = dw_irq->dw; + unsigned long total, pos, val; + unsigned long off; + u32 mask; + + if (write) { + total = dw->wr_ch_cnt; + off = 0; + mask = dw_irq->wr_mask; + } else { + total = dw->rd_ch_cnt; + off = dw->wr_ch_cnt; + mask = dw_irq->rd_mask; + } + + val = dw_edma_v0_core_status_done_int(dw, write ? + EDMA_DIR_WRITE : + EDMA_DIR_READ); + val &= mask; + for_each_set_bit(pos, &val, total) { + struct dw_edma_chan *chan = &dw->chan[pos + off]; + + dw_edma_done_interrupt(chan); + } + + val = dw_edma_v0_core_status_abort_int(dw, write ? + EDMA_DIR_WRITE : + EDMA_DIR_READ); + val &= mask; + for_each_set_bit(pos, &val, total) { + struct dw_edma_chan *chan = &dw->chan[pos + off]; + + dw_edma_abort_interrupt(chan); + } + + return IRQ_HANDLED; +} + +static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data) +{ + return dw_edma_interrupt(irq, data, true); +} + +static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data) +{ + return dw_edma_interrupt(irq, data, false); +} + +static irqreturn_t dw_edma_interrupt_common(int irq, void *data) +{ + dw_edma_interrupt(irq, data, true); + dw_edma_interrupt(irq, data, false); + + return IRQ_HANDLED; +} + +static int dw_edma_alloc_chan_resources(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + + if (chan->status != EDMA_ST_IDLE) + return -EBUSY; + + pm_runtime_get(chan->chip->dev); + + return 0; +} + +static void dw_edma_free_chan_resources(struct dma_chan *dchan) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(5000); + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + int ret; + + while (time_before(jiffies, timeout)) { + ret = dw_edma_device_terminate_all(dchan); + if (!ret) + break; + + if (time_after_eq(jiffies, timeout)) + return; + + cpu_relax(); + }; + + pm_runtime_put(chan->chip->dev); +} + +static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write, + u32 wr_alloc, u32 rd_alloc) +{ + struct dw_edma_region *dt_region; + struct device *dev = chip->dev; + struct dw_edma *dw = chip->dw; + struct dw_edma_chan *chan; + size_t ll_chunk, dt_chunk; + struct dw_edma_irq *irq; + struct dma_device *dma; + u32 i, j, cnt, ch_cnt; + u32 alloc, off_alloc; + int err = 0; + u32 pos; + + ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; + ll_chunk = dw->ll_region.sz; + dt_chunk = dw->dt_region.sz; + + /* Calculate linked list chunk for each channel */ + ll_chunk /= roundup_pow_of_two(ch_cnt); + + /* Calculate linked list chunk for each channel */ + dt_chunk /= roundup_pow_of_two(ch_cnt); + + if (write) { + i = 0; + cnt = dw->wr_ch_cnt; + dma = &dw->wr_edma; + alloc = wr_alloc; + off_alloc = 0; + } else { + i = dw->wr_ch_cnt; + cnt = dw->rd_ch_cnt; + dma = &dw->rd_edma; + alloc = rd_alloc; + off_alloc = wr_alloc; + } + + INIT_LIST_HEAD(&dma->channels); + for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) { + chan = &dw->chan[i]; + + dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL); + if (!dt_region) + return -ENOMEM; + + chan->vc.chan.private = dt_region; + + chan->chip = chip; + chan->id = j; + chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ; + chan->configured = false; + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_IDLE; + + chan->ll_off = (ll_chunk * i); + chan->ll_max = (ll_chunk / EDMA_LL_SZ) - 1; + + chan->dt_off = (dt_chunk * i); + + dev_vdbg(dev, "L. List:\tChannel %s[%u] off=0x%.8lx, max_cnt=%u\n", + write ? "write" : "read", j, + chan->ll_off, chan->ll_max); + + if (dw->nr_irqs == 1) + pos = 0; + else + pos = off_alloc + (j % alloc); + + irq = &dw->irq[pos]; + + if (write) + irq->wr_mask |= BIT(j); + else + irq->rd_mask |= BIT(j); + + irq->dw = dw; + memcpy(&chan->msi, &irq->msi, sizeof(chan->msi)); + + dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n", + write ? "write" : "read", j, + chan->msi.address_hi, chan->msi.address_lo, + chan->msi.data); + + chan->vc.desc_free = vchan_free_desc; + vchan_init(&chan->vc, dma); + + dt_region->paddr = dw->dt_region.paddr + chan->dt_off; + dt_region->vaddr = dw->dt_region.vaddr + chan->dt_off; + dt_region->sz = dt_chunk; + + dev_vdbg(dev, "Data:\tChannel %s[%u] off=0x%.8lx\n", + write ? "write" : "read", j, chan->dt_off); + + dw_edma_v0_core_device_config(chan); + } + + /* Set DMA channel capabilities */ + dma_cap_zero(dma->cap_mask); + dma_cap_set(DMA_SLAVE, dma->cap_mask); + dma_cap_set(DMA_CYCLIC, dma->cap_mask); + dma_cap_set(DMA_PRIVATE, dma->cap_mask); + dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV); + dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + dma->chancnt = cnt; + + /* Set DMA channel callbacks */ + dma->dev = chip->dev; + dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources; + dma->device_free_chan_resources = dw_edma_free_chan_resources; + dma->device_config = dw_edma_device_config; + dma->device_pause = dw_edma_device_pause; + dma->device_resume = dw_edma_device_resume; + dma->device_terminate_all = dw_edma_device_terminate_all; + dma->device_issue_pending = dw_edma_device_issue_pending; + dma->device_tx_status = dw_edma_device_tx_status; + dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg; + dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic; + + dma_set_max_seg_size(dma->dev, U32_MAX); + + /* Register DMA device */ + err = dma_async_device_register(dma); + + return err; +} + +static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt) +{ + if (*nr_irqs && *alloc < cnt) { + (*alloc)++; + (*nr_irqs)--; + } +} + +static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt) +{ + while (*mask * alloc < cnt) + (*mask)++; +} + +static int dw_edma_irq_request(struct dw_edma_chip *chip, + u32 *wr_alloc, u32 *rd_alloc) +{ + struct device *dev = chip->dev; + struct dw_edma *dw = chip->dw; + u32 wr_mask = 1; + u32 rd_mask = 1; + int i, err = 0; + u32 ch_cnt; + + ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; + + if (dw->nr_irqs < 1) + return -EINVAL; + + if (dw->nr_irqs == 1) { + /* Common IRQ shared among all channels */ + err = request_irq(pci_irq_vector(to_pci_dev(dev), 0), + dw_edma_interrupt_common, + IRQF_SHARED, dw->name, &dw->irq[0]); + if (err) { + dw->nr_irqs = 0; + return err; + } + + get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), 0), + &dw->irq[0].msi); + } else { + /* Distribute IRQs equally among all channels */ + int tmp = dw->nr_irqs; + + while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) { + dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt); + dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt); + } + + dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt); + dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt); + + for (i = 0; i < (*wr_alloc + *rd_alloc); i++) { + err = request_irq(pci_irq_vector(to_pci_dev(dev), i), + i < *wr_alloc ? + dw_edma_interrupt_write : + dw_edma_interrupt_read, + IRQF_SHARED, dw->name, + &dw->irq[i]); + if (err) { + dw->nr_irqs = i; + return err; + } + + get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), i), + &dw->irq[i].msi); + } + + dw->nr_irqs = i; + } + + return err; +} + +int dw_edma_probe(struct dw_edma_chip *chip) +{ + struct device *dev = chip->dev; + struct dw_edma *dw = chip->dw; + u32 wr_alloc = 0; + u32 rd_alloc = 0; + int i, err; + + raw_spin_lock_init(&dw->lock); + + /* Find out how many write channels are supported by hardware */ + dw->wr_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE); + if (!dw->wr_ch_cnt) + return -EINVAL; + + /* Find out how many read channels are supported by hardware */ + dw->rd_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ); + if (!dw->rd_ch_cnt) + return -EINVAL; + + dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n", + dw->wr_ch_cnt, dw->rd_ch_cnt); + + /* Allocate channels */ + dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt, + sizeof(*dw->chan), GFP_KERNEL); + if (!dw->chan) + return -ENOMEM; + + snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id); + + /* Disable eDMA, only to establish the ideal initial conditions */ + dw_edma_v0_core_off(dw); + + /* Request IRQs */ + err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc); + if (err) + return err; + + /* Setup write channels */ + err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc); + if (err) + goto err_irq_free; + + /* Setup read channels */ + err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc); + if (err) + goto err_irq_free; + + /* Power management */ + pm_runtime_enable(dev); + + /* Turn debugfs on */ + dw_edma_v0_core_debugfs_on(chip); + + return 0; + +err_irq_free: + for (i = (dw->nr_irqs - 1); i >= 0; i--) + free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]); + + dw->nr_irqs = 0; + + return err; +} +EXPORT_SYMBOL_GPL(dw_edma_probe); + +int dw_edma_remove(struct dw_edma_chip *chip) +{ + struct dw_edma_chan *chan, *_chan; + struct device *dev = chip->dev; + struct dw_edma *dw = chip->dw; + int i; + + /* Disable eDMA */ + dw_edma_v0_core_off(dw); + + /* Free irqs */ + for (i = (dw->nr_irqs - 1); i >= 0; i--) + free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]); + + /* Power management */ + pm_runtime_disable(dev); + + list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels, + vc.chan.device_node) { + list_del(&chan->vc.chan.device_node); + tasklet_kill(&chan->vc.task); + } + + list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels, + vc.chan.device_node) { + list_del(&chan->vc.chan.device_node); + tasklet_kill(&chan->vc.task); + } + + /* Deregister eDMA device */ + dma_async_device_unregister(&dw->wr_edma); + dma_async_device_unregister(&dw->rd_edma); + + /* Turn debugfs off */ + dw_edma_v0_core_debugfs_off(); + + return 0; +} +EXPORT_SYMBOL_GPL(dw_edma_remove); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver"); +MODULE_AUTHOR("Gustavo Pimentel "); diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h new file mode 100644 index 000000000000..b6cc90cbc9dc --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-core.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA core driver + * + * Author: Gustavo Pimentel + */ + +#ifndef _DW_EDMA_CORE_H +#define _DW_EDMA_CORE_H + +#include +#include + +#include "../virt-dma.h" + +#define EDMA_LL_SZ 24 + +enum dw_edma_dir { + EDMA_DIR_WRITE = 0, + EDMA_DIR_READ +}; + +enum dw_edma_mode { + EDMA_MODE_LEGACY = 0, + EDMA_MODE_UNROLL +}; + +enum dw_edma_request { + EDMA_REQ_NONE = 0, + EDMA_REQ_STOP, + EDMA_REQ_PAUSE +}; + +enum dw_edma_status { + EDMA_ST_IDLE = 0, + EDMA_ST_PAUSE, + EDMA_ST_BUSY +}; + +struct dw_edma_chan; +struct dw_edma_chunk; + +struct dw_edma_burst { + struct list_head list; + u64 sar; + u64 dar; + u32 sz; +}; + +struct dw_edma_region { + phys_addr_t paddr; + dma_addr_t vaddr; + size_t sz; +}; + +struct dw_edma_chunk { + struct list_head list; + struct dw_edma_chan *chan; + struct dw_edma_burst *burst; + + u32 bursts_alloc; + + u8 cb; + struct dw_edma_region ll_region; /* Linked list */ +}; + +struct dw_edma_desc { + struct virt_dma_desc vd; + struct dw_edma_chan *chan; + struct dw_edma_chunk *chunk; + + u32 chunks_alloc; + + u32 alloc_sz; + u32 xfer_sz; +}; + +struct dw_edma_chan { + struct virt_dma_chan vc; + struct dw_edma_chip *chip; + int id; + enum dw_edma_dir dir; + + off_t ll_off; + u32 ll_max; + + off_t dt_off; + + struct msi_msg msi; + + enum dw_edma_request request; + enum dw_edma_status status; + u8 configured; + + struct dma_slave_config config; +}; + +struct dw_edma_irq { + struct msi_msg msi; + u32 wr_mask; + u32 rd_mask; + struct dw_edma *dw; +}; + +struct dw_edma { + char name[20]; + + struct dma_device wr_edma; + u16 wr_ch_cnt; + + struct dma_device rd_edma; + u16 rd_ch_cnt; + + struct dw_edma_region rg_region; /* Registers */ + struct dw_edma_region ll_region; /* Linked list */ + struct dw_edma_region dt_region; /* Data */ + + struct dw_edma_irq *irq; + int nr_irqs; + + u32 version; + enum dw_edma_mode mode; + + struct dw_edma_chan *chan; + const struct dw_edma_core_ops *ops; + + raw_spinlock_t lock; /* Only for legacy */ +}; + +struct dw_edma_sg { + struct scatterlist *sgl; + unsigned int len; +}; + +struct dw_edma_cyclic { + dma_addr_t paddr; + size_t len; + size_t cnt; +}; + +struct dw_edma_transfer { + struct dma_chan *dchan; + union dw_edma_xfer { + struct dw_edma_sg sg; + struct dw_edma_cyclic cyclic; + } xfer; + enum dma_transfer_direction direction; + unsigned long flags; + bool cyclic; +}; + +static inline +struct dw_edma_chan *vc2dw_edma_chan(struct virt_dma_chan *vc) +{ + return container_of(vc, struct dw_edma_chan, vc); +} + +static inline +struct dw_edma_chan *dchan2dw_edma_chan(struct dma_chan *dchan) +{ + return vc2dw_edma_chan(to_virt_chan(dchan)); +} + +#endif /* _DW_EDMA_CORE_H */ diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h new file mode 100644 index 000000000000..cab6e18773da --- /dev/null +++ b/include/linux/dma/edma.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA core driver + * + * Author: Gustavo Pimentel + */ + +#ifndef _DW_EDMA_H +#define _DW_EDMA_H + +#include +#include + +struct dw_edma; + +/** + * struct dw_edma_chip - representation of DesignWare eDMA controller hardware + * @dev: struct device of the eDMA controller + * @id: instance ID + * @irq: irq line + * @dw: struct dw_edma that is filed by dw_edma_probe() + */ +struct dw_edma_chip { + struct device *dev; + int id; + int irq; + struct dw_edma *dw; +}; + +/* Export to the platform drivers */ +#if IS_ENABLED(CONFIG_DW_EDMA) +int dw_edma_probe(struct dw_edma_chip *chip); +int dw_edma_remove(struct dw_edma_chip *chip); +#else +static inline int dw_edma_probe(struct dw_edma_chip *chip) +{ + return -ENODEV; +} + +static inline int dw_edma_remove(struct dw_edma_chip *chip) +{ + return 0; +} +#endif /* CONFIG_DW_EDMA */ + +#endif /* _DW_EDMA_H */ From 7e4b8a4fbe2cecab0959e862604803d063f50029 Mon Sep 17 00:00:00 2001 From: Gustavo Pimentel Date: Tue, 4 Jun 2019 15:29:23 +0200 Subject: [PATCH 31/68] dmaengine: Add Synopsys eDMA IP version 0 support Add support for the eDMA IP version 0 driver for both register maps (legacy and unroll). The legacy register mapping was the initial implementation, which consisted in having all registers belonging to channels multiplexed, which could be change anytime (which could led a race-condition) by view port register (access to only one channel available each time). This register mapping is not very effective and efficient in a multithread environment, which has led to the development of unroll registers mapping, which consists of having all channels registers accessible any time by spreading all channels registers by an offset between them. This version supports a maximum of 16 independent channels (8 write + 8 read), which can run simultaneously. Implements a scatter-gather transfer through a linked list, where the size of linked list depends on the allocated memory divided equally among all channels. Each linked list descriptor can transfer from 1 byte to 4 Gbytes and is alignmented to DWORD. Both SAR (Source Address Register) and DAR (Destination Address Register) are alignmented to byte. Signed-off-by: Gustavo Pimentel Cc: Vinod Koul Cc: Dan Williams Cc: Andy Shevchenko Cc: Russell King Cc: Joao Pinto Signed-off-by: Vinod Koul --- drivers/dma/dw-edma/Makefile | 3 +- drivers/dma/dw-edma/dw-edma-core.c | 1 + drivers/dma/dw-edma/dw-edma-v0-core.c | 352 ++++++++++++++++++++++++++ drivers/dma/dw-edma/dw-edma-v0-core.h | 28 ++ drivers/dma/dw-edma/dw-edma-v0-regs.h | 158 ++++++++++++ 5 files changed, 541 insertions(+), 1 deletion(-) create mode 100644 drivers/dma/dw-edma/dw-edma-v0-core.c create mode 100644 drivers/dma/dw-edma/dw-edma-v0-core.h create mode 100644 drivers/dma/dw-edma/dw-edma-v0-regs.h diff --git a/drivers/dma/dw-edma/Makefile b/drivers/dma/dw-edma/Makefile index 322401089891..01c7c633cdab 100644 --- a/drivers/dma/dw-edma/Makefile +++ b/drivers/dma/dw-edma/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_DW_EDMA) += dw-edma.o -dw-edma-objs := dw-edma-core.o +dw-edma-objs := dw-edma-core.o \ + dw-edma-v0-core.o diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c index c9d032f49dc3..60d6a4690020 100644 --- a/drivers/dma/dw-edma/dw-edma-core.c +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -17,6 +17,7 @@ #include #include "dw-edma-core.h" +#include "dw-edma-v0-core.h" #include "../dmaengine.h" #include "../virt-dma.h" diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c new file mode 100644 index 000000000000..a38c47312dcf --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-core.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel + */ + +#include + +#include "dw-edma-core.h" +#include "dw-edma-v0-core.h" +#include "dw-edma-v0-regs.h" +#include "dw-edma-v0-debugfs.h" + +enum dw_edma_control { + DW_EDMA_V0_CB = BIT(0), + DW_EDMA_V0_TCB = BIT(1), + DW_EDMA_V0_LLP = BIT(2), + DW_EDMA_V0_LIE = BIT(3), + DW_EDMA_V0_RIE = BIT(4), + DW_EDMA_V0_CCS = BIT(8), + DW_EDMA_V0_LLE = BIT(9), +}; + +static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) +{ + return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr; +} + +#define SET(dw, name, value) \ + writel(value, &(__dw_regs(dw)->name)) + +#define GET(dw, name) \ + readl(&(__dw_regs(dw)->name)) + +#define SET_RW(dw, dir, name, value) \ + do { \ + if ((dir) == EDMA_DIR_WRITE) \ + SET(dw, wr_##name, value); \ + else \ + SET(dw, rd_##name, value); \ + } while (0) + +#define GET_RW(dw, dir, name) \ + ((dir) == EDMA_DIR_WRITE \ + ? GET(dw, wr_##name) \ + : GET(dw, rd_##name)) + +#define SET_BOTH(dw, name, value) \ + do { \ + SET(dw, wr_##name, value); \ + SET(dw, rd_##name, value); \ + } while (0) + +static inline struct dw_edma_v0_ch_regs __iomem * +__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch) +{ + if (dw->mode == EDMA_MODE_LEGACY) + return &(__dw_regs(dw)->type.legacy.ch); + + if (dir == EDMA_DIR_WRITE) + return &__dw_regs(dw)->type.unroll.ch[ch].wr; + + return &__dw_regs(dw)->type.unroll.ch[ch].rd; +} + +static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, + u32 value, void __iomem *addr) +{ + if (dw->mode == EDMA_MODE_LEGACY) { + u32 viewport_sel; + unsigned long flags; + + raw_spin_lock_irqsave(&dw->lock, flags); + + viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + if (dir == EDMA_DIR_READ) + viewport_sel |= BIT(31); + + writel(viewport_sel, + &(__dw_regs(dw)->type.legacy.viewport_sel)); + writel(value, addr); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + writel(value, addr); + } +} + +static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, + const void __iomem *addr) +{ + u32 value; + + if (dw->mode == EDMA_MODE_LEGACY) { + u32 viewport_sel; + unsigned long flags; + + raw_spin_lock_irqsave(&dw->lock, flags); + + viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + if (dir == EDMA_DIR_READ) + viewport_sel |= BIT(31); + + writel(viewport_sel, + &(__dw_regs(dw)->type.legacy.viewport_sel)); + value = readl(addr); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + value = readl(addr); + } + + return value; +} + +#define SET_CH(dw, dir, ch, name, value) \ + writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name)) + +#define GET_CH(dw, dir, ch, name) \ + readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name)) + +#define SET_LL(ll, value) \ + writel(value, ll) + +/* eDMA management callbacks */ +void dw_edma_v0_core_off(struct dw_edma *dw) +{ + SET_BOTH(dw, int_mask, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); + SET_BOTH(dw, int_clear, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); + SET_BOTH(dw, engine_en, 0); +} + +u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir) +{ + u32 num_ch; + + if (dir == EDMA_DIR_WRITE) + num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, GET(dw, ctrl)); + else + num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, GET(dw, ctrl)); + + if (num_ch > EDMA_V0_MAX_NR_CH) + num_ch = EDMA_V0_MAX_NR_CH; + + return (u16)num_ch; +} + +enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->chip->dw; + u32 tmp; + + tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK, + GET_CH(dw, chan->dir, chan->id, ch_control1)); + + if (tmp == 1) + return DMA_IN_PROGRESS; + else if (tmp == 3) + return DMA_COMPLETE; + else + return DMA_ERROR; +} + +void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->chip->dw; + + SET_RW(dw, chan->dir, int_clear, + FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id))); +} + +void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->chip->dw; + + SET_RW(dw, chan->dir, int_clear, + FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id))); +} + +u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir) +{ + return FIELD_GET(EDMA_V0_DONE_INT_MASK, GET_RW(dw, dir, int_status)); +} + +u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir) +{ + return FIELD_GET(EDMA_V0_ABORT_INT_MASK, GET_RW(dw, dir, int_status)); +} + +static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) +{ + struct dw_edma_burst *child; + struct dw_edma_v0_lli *lli; + struct dw_edma_v0_llp *llp; + u32 control = 0, i = 0; + u64 sar, dar, addr; + int j; + + lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr; + + if (chunk->cb) + control = DW_EDMA_V0_CB; + + j = chunk->bursts_alloc; + list_for_each_entry(child, &chunk->burst->list, list) { + j--; + if (!j) + control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE); + + /* Channel control */ + SET_LL(&lli[i].control, control); + /* Transfer size */ + SET_LL(&lli[i].transfer_size, child->sz); + /* SAR - low, high */ + sar = cpu_to_le64(child->sar); + SET_LL(&lli[i].sar_low, lower_32_bits(sar)); + SET_LL(&lli[i].sar_high, upper_32_bits(sar)); + /* DAR - low, high */ + dar = cpu_to_le64(child->dar); + SET_LL(&lli[i].dar_low, lower_32_bits(dar)); + SET_LL(&lli[i].dar_high, upper_32_bits(dar)); + i++; + } + + llp = (struct dw_edma_v0_llp *)&lli[i]; + control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB; + if (!chunk->cb) + control |= DW_EDMA_V0_CB; + + /* Channel control */ + SET_LL(&llp->control, control); + /* Linked list - low, high */ + addr = cpu_to_le64(chunk->ll_region.paddr); + SET_LL(&llp->llp_low, lower_32_bits(addr)); + SET_LL(&llp->llp_high, upper_32_bits(addr)); +} + +void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) +{ + struct dw_edma_chan *chan = chunk->chan; + struct dw_edma *dw = chan->chip->dw; + u32 tmp; + u64 llp; + + dw_edma_v0_core_write_chunk(chunk); + + if (first) { + /* Enable engine */ + SET_RW(dw, chan->dir, engine_en, BIT(0)); + /* Interrupt unmask - done, abort */ + tmp = GET_RW(dw, chan->dir, int_mask); + tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)); + tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)); + SET_RW(dw, chan->dir, int_mask, tmp); + /* Linked list error */ + tmp = GET_RW(dw, chan->dir, linked_list_err_en); + tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id)); + SET_RW(dw, chan->dir, linked_list_err_en, tmp); + /* Channel control */ + SET_CH(dw, chan->dir, chan->id, ch_control1, + (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); + /* Linked list - low, high */ + llp = cpu_to_le64(chunk->ll_region.paddr); + SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp)); + SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp)); + } + /* Doorbell */ + SET_RW(dw, chan->dir, doorbell, + FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id)); +} + +int dw_edma_v0_core_device_config(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->chip->dw; + u32 tmp = 0; + + /* MSI done addr - low, high */ + SET_RW(dw, chan->dir, done_imwr_low, chan->msi.address_lo); + SET_RW(dw, chan->dir, done_imwr_high, chan->msi.address_hi); + /* MSI abort addr - low, high */ + SET_RW(dw, chan->dir, abort_imwr_low, chan->msi.address_lo); + SET_RW(dw, chan->dir, abort_imwr_high, chan->msi.address_hi); + /* MSI data - low, high */ + switch (chan->id) { + case 0: + case 1: + tmp = GET_RW(dw, chan->dir, ch01_imwr_data); + break; + + case 2: + case 3: + tmp = GET_RW(dw, chan->dir, ch23_imwr_data); + break; + + case 4: + case 5: + tmp = GET_RW(dw, chan->dir, ch45_imwr_data); + break; + + case 6: + case 7: + tmp = GET_RW(dw, chan->dir, ch67_imwr_data); + break; + } + + if (chan->id & BIT(0)) { + /* Channel odd {1, 3, 5, 7} */ + tmp &= EDMA_V0_CH_EVEN_MSI_DATA_MASK; + tmp |= FIELD_PREP(EDMA_V0_CH_ODD_MSI_DATA_MASK, + chan->msi.data); + } else { + /* Channel even {0, 2, 4, 6} */ + tmp &= EDMA_V0_CH_ODD_MSI_DATA_MASK; + tmp |= FIELD_PREP(EDMA_V0_CH_EVEN_MSI_DATA_MASK, + chan->msi.data); + } + + switch (chan->id) { + case 0: + case 1: + SET_RW(dw, chan->dir, ch01_imwr_data, tmp); + break; + + case 2: + case 3: + SET_RW(dw, chan->dir, ch23_imwr_data, tmp); + break; + + case 4: + case 5: + SET_RW(dw, chan->dir, ch45_imwr_data, tmp); + break; + + case 6: + case 7: + SET_RW(dw, chan->dir, ch67_imwr_data, tmp); + break; + } + + return 0; +} + +/* eDMA debugfs callbacks */ +void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip) +{ +} + +void dw_edma_v0_core_debugfs_off(void) +{ +} diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h new file mode 100644 index 000000000000..abae1527f1f9 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-core.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel + */ + +#ifndef _DW_EDMA_V0_CORE_H +#define _DW_EDMA_V0_CORE_H + +#include + +/* eDMA management callbacks */ +void dw_edma_v0_core_off(struct dw_edma *chan); +u16 dw_edma_v0_core_ch_count(struct dw_edma *chan, enum dw_edma_dir dir); +enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan); +void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan); +void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan); +u32 dw_edma_v0_core_status_done_int(struct dw_edma *chan, enum dw_edma_dir dir); +u32 dw_edma_v0_core_status_abort_int(struct dw_edma *chan, enum dw_edma_dir dir); +void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first); +int dw_edma_v0_core_device_config(struct dw_edma_chan *chan); +/* eDMA debug fs callbacks */ +void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip); +void dw_edma_v0_core_debugfs_off(void); + +#endif /* _DW_EDMA_V0_CORE_H */ diff --git a/drivers/dma/dw-edma/dw-edma-v0-regs.h b/drivers/dma/dw-edma/dw-edma-v0-regs.h new file mode 100644 index 000000000000..cd6476884507 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-regs.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel + */ + +#ifndef _DW_EDMA_V0_REGS_H +#define _DW_EDMA_V0_REGS_H + +#include + +#define EDMA_V0_MAX_NR_CH 8 +#define EDMA_V0_VIEWPORT_MASK GENMASK(2, 0) +#define EDMA_V0_DONE_INT_MASK GENMASK(7, 0) +#define EDMA_V0_ABORT_INT_MASK GENMASK(23, 16) +#define EDMA_V0_WRITE_CH_COUNT_MASK GENMASK(3, 0) +#define EDMA_V0_READ_CH_COUNT_MASK GENMASK(19, 16) +#define EDMA_V0_CH_STATUS_MASK GENMASK(6, 5) +#define EDMA_V0_DOORBELL_CH_MASK GENMASK(2, 0) +#define EDMA_V0_LINKED_LIST_ERR_MASK GENMASK(7, 0) + +#define EDMA_V0_CH_ODD_MSI_DATA_MASK GENMASK(31, 16) +#define EDMA_V0_CH_EVEN_MSI_DATA_MASK GENMASK(15, 0) + +struct dw_edma_v0_ch_regs { + u32 ch_control1; /* 0x000 */ + u32 ch_control2; /* 0x004 */ + u32 transfer_size; /* 0x008 */ + u32 sar_low; /* 0x00c */ + u32 sar_high; /* 0x010 */ + u32 dar_low; /* 0x014 */ + u32 dar_high; /* 0x018 */ + u32 llp_low; /* 0x01c */ + u32 llp_high; /* 0x020 */ +}; + +struct dw_edma_v0_ch { + struct dw_edma_v0_ch_regs wr; /* 0x200 */ + u32 padding_1[55]; /* [0x224..0x2fc] */ + struct dw_edma_v0_ch_regs rd; /* 0x300 */ + u32 padding_2[55]; /* [0x224..0x2fc] */ +}; + +struct dw_edma_v0_unroll { + u32 padding_1; /* 0x0f8 */ + u32 wr_engine_chgroup; /* 0x100 */ + u32 rd_engine_chgroup; /* 0x104 */ + u32 wr_engine_hshake_cnt_low; /* 0x108 */ + u32 wr_engine_hshake_cnt_high; /* 0x10c */ + u32 padding_2[2]; /* [0x110..0x114] */ + u32 rd_engine_hshake_cnt_low; /* 0x118 */ + u32 rd_engine_hshake_cnt_high; /* 0x11c */ + u32 padding_3[2]; /* [0x120..0x124] */ + u32 wr_ch0_pwr_en; /* 0x128 */ + u32 wr_ch1_pwr_en; /* 0x12c */ + u32 wr_ch2_pwr_en; /* 0x130 */ + u32 wr_ch3_pwr_en; /* 0x134 */ + u32 wr_ch4_pwr_en; /* 0x138 */ + u32 wr_ch5_pwr_en; /* 0x13c */ + u32 wr_ch6_pwr_en; /* 0x140 */ + u32 wr_ch7_pwr_en; /* 0x144 */ + u32 padding_4[8]; /* [0x148..0x164] */ + u32 rd_ch0_pwr_en; /* 0x168 */ + u32 rd_ch1_pwr_en; /* 0x16c */ + u32 rd_ch2_pwr_en; /* 0x170 */ + u32 rd_ch3_pwr_en; /* 0x174 */ + u32 rd_ch4_pwr_en; /* 0x178 */ + u32 rd_ch5_pwr_en; /* 0x18c */ + u32 rd_ch6_pwr_en; /* 0x180 */ + u32 rd_ch7_pwr_en; /* 0x184 */ + u32 padding_5[30]; /* [0x188..0x1fc] */ + struct dw_edma_v0_ch ch[EDMA_V0_MAX_NR_CH]; /* [0x200..0x1120] */ +}; + +struct dw_edma_v0_legacy { + u32 viewport_sel; /* 0x0f8 */ + struct dw_edma_v0_ch_regs ch; /* [0x100..0x120] */ +}; + +struct dw_edma_v0_regs { + /* eDMA global registers */ + u32 ctrl_data_arb_prior; /* 0x000 */ + u32 padding_1; /* 0x004 */ + u32 ctrl; /* 0x008 */ + u32 wr_engine_en; /* 0x00c */ + u32 wr_doorbell; /* 0x010 */ + u32 padding_2; /* 0x014 */ + u32 wr_ch_arb_weight_low; /* 0x018 */ + u32 wr_ch_arb_weight_high; /* 0x01c */ + u32 padding_3[3]; /* [0x020..0x028] */ + u32 rd_engine_en; /* 0x02c */ + u32 rd_doorbell; /* 0x030 */ + u32 padding_4; /* 0x034 */ + u32 rd_ch_arb_weight_low; /* 0x038 */ + u32 rd_ch_arb_weight_high; /* 0x03c */ + u32 padding_5[3]; /* [0x040..0x048] */ + /* eDMA interrupts registers */ + u32 wr_int_status; /* 0x04c */ + u32 padding_6; /* 0x050 */ + u32 wr_int_mask; /* 0x054 */ + u32 wr_int_clear; /* 0x058 */ + u32 wr_err_status; /* 0x05c */ + u32 wr_done_imwr_low; /* 0x060 */ + u32 wr_done_imwr_high; /* 0x064 */ + u32 wr_abort_imwr_low; /* 0x068 */ + u32 wr_abort_imwr_high; /* 0x06c */ + u32 wr_ch01_imwr_data; /* 0x070 */ + u32 wr_ch23_imwr_data; /* 0x074 */ + u32 wr_ch45_imwr_data; /* 0x078 */ + u32 wr_ch67_imwr_data; /* 0x07c */ + u32 padding_7[4]; /* [0x080..0x08c] */ + u32 wr_linked_list_err_en; /* 0x090 */ + u32 padding_8[3]; /* [0x094..0x09c] */ + u32 rd_int_status; /* 0x0a0 */ + u32 padding_9; /* 0x0a4 */ + u32 rd_int_mask; /* 0x0a8 */ + u32 rd_int_clear; /* 0x0ac */ + u32 padding_10; /* 0x0b0 */ + u32 rd_err_status_low; /* 0x0b4 */ + u32 rd_err_status_high; /* 0x0b8 */ + u32 padding_11[2]; /* [0x0bc..0x0c0] */ + u32 rd_linked_list_err_en; /* 0x0c4 */ + u32 padding_12; /* 0x0c8 */ + u32 rd_done_imwr_low; /* 0x0cc */ + u32 rd_done_imwr_high; /* 0x0d0 */ + u32 rd_abort_imwr_low; /* 0x0d4 */ + u32 rd_abort_imwr_high; /* 0x0d8 */ + u32 rd_ch01_imwr_data; /* 0x0dc */ + u32 rd_ch23_imwr_data; /* 0x0e0 */ + u32 rd_ch45_imwr_data; /* 0x0e4 */ + u32 rd_ch67_imwr_data; /* 0x0e8 */ + u32 padding_13[4]; /* [0x0ec..0x0f8] */ + /* eDMA channel context grouping */ + union dw_edma_v0_type { + struct dw_edma_v0_legacy legacy; /* [0x0f8..0x120] */ + struct dw_edma_v0_unroll unroll; /* [0x0f8..0x1120] */ + } type; +}; + +struct dw_edma_v0_lli { + u32 control; + u32 transfer_size; + u32 sar_low; + u32 sar_high; + u32 dar_low; + u32 dar_high; +}; + +struct dw_edma_v0_llp { + u32 control; + u32 reserved; + u32 llp_low; + u32 llp_high; +}; + +#endif /* _DW_EDMA_V0_REGS_H */ From 305aebeff8795cbf03b3f117e74bb702c130ae4c Mon Sep 17 00:00:00 2001 From: Gustavo Pimentel Date: Tue, 4 Jun 2019 15:29:24 +0200 Subject: [PATCH 32/68] dmaengine: Add Synopsys eDMA IP version 0 debugfs support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add Synopsys eDMA IP version 0 debugfs support to assist any debug in the future. Creates a file system structure composed by folders and files that mimic the IP register map (this files are read only) to ease any debug. To enable this feature is necessary to select DEBUG_FS option on kernel configuration. Small output example: (eDMA IP version 0, unroll, 1 write + 1 read channels) % mount -t debugfs none /sys/kernel/debug/ % tree /sys/kernel/debug/dw-edma-core:0/ dw-edma/ ├── version ├── mode ├── wr_ch_cnt ├── rd_ch_cnt └── registers     ├── ctrl_data_arb_prior     ├── ctrl     ├── write     │   ├── engine_en     │   ├── doorbell     │   ├── ch_arb_weight_low     │   ├── ch_arb_weight_high     │   ├── int_status     │   ├── int_mask     │   ├── int_clear     │   ├── err_status     │   ├── done_imwr_low     │   ├── done_imwr_high     │   ├── abort_imwr_low     │   ├── abort_imwr_high     │   ├── ch01_imwr_data     │   ├── ch23_imwr_data     │   ├── ch45_imwr_data     │   ├── ch67_imwr_data     │   ├── linked_list_err_en     │   ├── engine_chgroup     │   ├── engine_hshake_cnt_low     │   ├── engine_hshake_cnt_high     │   ├── ch0_pwr_en     │   ├── ch1_pwr_en     │   ├── ch2_pwr_en     │   ├── ch3_pwr_en     │   ├── ch4_pwr_en     │   ├── ch5_pwr_en     │   ├── ch6_pwr_en     │   ├── ch7_pwr_en     │   └── channel:0     │       ├── ch_control1     │       ├── ch_control2     │       ├── transfer_size     │       ├── sar_low     │       ├── sar_high     │       ├── dar_high     │       ├── llp_low     │       └── llp_high     └── read         ├── engine_en         ├── doorbell         ├── ch_arb_weight_low         ├── ch_arb_weight_high         ├── int_status         ├── int_mask         ├── int_clear         ├── err_status_low         ├── err_status_high         ├── done_imwr_low         ├── done_imwr_high         ├── abort_imwr_low         ├── abort_imwr_high         ├── ch01_imwr_data         ├── ch23_imwr_data         ├── ch45_imwr_data         ├── ch67_imwr_data         ├── linked_list_err_en         ├── engine_chgroup         ├── engine_hshake_cnt_low         ├── engine_hshake_cnt_high         ├── ch0_pwr_en         ├── ch1_pwr_en         ├── ch2_pwr_en         ├── ch3_pwr_en         ├── ch4_pwr_en         ├── ch5_pwr_en         ├── ch6_pwr_en         ├── ch7_pwr_en         └── channel:0             ├── ch_control1             ├── ch_control2             ├── transfer_size             ├── sar_low             ├── sar_high             ├── dar_high             ├── llp_low             └── llp_high Signed-off-by: Gustavo Pimentel Cc: Vinod Koul Cc: Dan Williams Cc: Andy Shevchenko Cc: Russell King Cc: Joao Pinto Signed-off-by: Vinod Koul --- drivers/dma/dw-edma/Makefile | 3 +- drivers/dma/dw-edma/dw-edma-v0-core.c | 2 + drivers/dma/dw-edma/dw-edma-v0-debugfs.c | 310 +++++++++++++++++++++++ drivers/dma/dw-edma/dw-edma-v0-debugfs.h | 27 ++ 4 files changed, 341 insertions(+), 1 deletion(-) create mode 100644 drivers/dma/dw-edma/dw-edma-v0-debugfs.c create mode 100644 drivers/dma/dw-edma/dw-edma-v0-debugfs.h diff --git a/drivers/dma/dw-edma/Makefile b/drivers/dma/dw-edma/Makefile index 01c7c633cdab..0c53033cc5ae 100644 --- a/drivers/dma/dw-edma/Makefile +++ b/drivers/dma/dw-edma/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_DW_EDMA) += dw-edma.o +dw-edma-$(CONFIG_DEBUG_FS) := dw-edma-v0-debugfs.o dw-edma-objs := dw-edma-core.o \ - dw-edma-v0-core.o + dw-edma-v0-core.o $(dw-edma-y) diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c index a38c47312dcf..8a3180ed49a6 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-core.c +++ b/drivers/dma/dw-edma/dw-edma-v0-core.c @@ -345,8 +345,10 @@ int dw_edma_v0_core_device_config(struct dw_edma_chan *chan) /* eDMA debugfs callbacks */ void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip) { + dw_edma_v0_debugfs_on(chip); } void dw_edma_v0_core_debugfs_off(void) { + dw_edma_v0_debugfs_off(); } diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c new file mode 100644 index 000000000000..3226f528cc11 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel + */ + +#include +#include + +#include "dw-edma-v0-debugfs.h" +#include "dw-edma-v0-regs.h" +#include "dw-edma-core.h" + +#define REGS_ADDR(name) \ + ((dma_addr_t *)®s->name) +#define REGISTER(name) \ + { #name, REGS_ADDR(name) } + +#define WR_REGISTER(name) \ + { #name, REGS_ADDR(wr_##name) } +#define RD_REGISTER(name) \ + { #name, REGS_ADDR(rd_##name) } + +#define WR_REGISTER_LEGACY(name) \ + { #name, REGS_ADDR(type.legacy.wr_##name) } +#define RD_REGISTER_LEGACY(name) \ + { #name, REGS_ADDR(type.legacy.rd_##name) } + +#define WR_REGISTER_UNROLL(name) \ + { #name, REGS_ADDR(type.unroll.wr_##name) } +#define RD_REGISTER_UNROLL(name) \ + { #name, REGS_ADDR(type.unroll.rd_##name) } + +#define WRITE_STR "write" +#define READ_STR "read" +#define CHANNEL_STR "channel" +#define REGISTERS_STR "registers" + +static struct dentry *base_dir; +static struct dw_edma *dw; +static struct dw_edma_v0_regs *regs; + +static struct { + void *start; + void *end; +} lim[2][EDMA_V0_MAX_NR_CH]; + +struct debugfs_entries { + char name[24]; + dma_addr_t *reg; +}; + +static int dw_edma_debugfs_u32_get(void *data, u64 *val) +{ + if (dw->mode == EDMA_MODE_LEGACY && + data >= (void *)®s->type.legacy.ch) { + void *ptr = (void *)®s->type.legacy.ch; + u32 viewport_sel = 0; + unsigned long flags; + u16 ch; + + for (ch = 0; ch < dw->wr_ch_cnt; ch++) + if (lim[0][ch].start >= data && data < lim[0][ch].end) { + ptr += (data - lim[0][ch].start); + goto legacy_sel_wr; + } + + for (ch = 0; ch < dw->rd_ch_cnt; ch++) + if (lim[1][ch].start >= data && data < lim[1][ch].end) { + ptr += (data - lim[1][ch].start); + goto legacy_sel_rd; + } + + return 0; +legacy_sel_rd: + viewport_sel = BIT(31); +legacy_sel_wr: + viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + + raw_spin_lock_irqsave(&dw->lock, flags); + + writel(viewport_sel, ®s->type.legacy.viewport_sel); + *val = readl(ptr); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + *val = readl(data); + } + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_edma_debugfs_u32_get, NULL, "0x%08llx\n"); + +static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[], + int nr_entries, struct dentry *dir) +{ + int i; + + for (i = 0; i < nr_entries; i++) { + if (!debugfs_create_file_unsafe(entries[i].name, 0444, dir, + entries[i].reg, &fops_x32)) + break; + } +} + +static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs, + struct dentry *dir) +{ + int nr_entries; + const struct debugfs_entries debugfs_regs[] = { + REGISTER(ch_control1), + REGISTER(ch_control2), + REGISTER(transfer_size), + REGISTER(sar_low), + REGISTER(sar_high), + REGISTER(dar_low), + REGISTER(dar_high), + REGISTER(llp_low), + REGISTER(llp_high), + }; + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, dir); +} + +static void dw_edma_debugfs_regs_wr(struct dentry *dir) +{ + const struct debugfs_entries debugfs_regs[] = { + /* eDMA global registers */ + WR_REGISTER(engine_en), + WR_REGISTER(doorbell), + WR_REGISTER(ch_arb_weight_low), + WR_REGISTER(ch_arb_weight_high), + /* eDMA interrupts registers */ + WR_REGISTER(int_status), + WR_REGISTER(int_mask), + WR_REGISTER(int_clear), + WR_REGISTER(err_status), + WR_REGISTER(done_imwr_low), + WR_REGISTER(done_imwr_high), + WR_REGISTER(abort_imwr_low), + WR_REGISTER(abort_imwr_high), + WR_REGISTER(ch01_imwr_data), + WR_REGISTER(ch23_imwr_data), + WR_REGISTER(ch45_imwr_data), + WR_REGISTER(ch67_imwr_data), + WR_REGISTER(linked_list_err_en), + }; + const struct debugfs_entries debugfs_unroll_regs[] = { + /* eDMA channel context grouping */ + WR_REGISTER_UNROLL(engine_chgroup), + WR_REGISTER_UNROLL(engine_hshake_cnt_low), + WR_REGISTER_UNROLL(engine_hshake_cnt_high), + WR_REGISTER_UNROLL(ch0_pwr_en), + WR_REGISTER_UNROLL(ch1_pwr_en), + WR_REGISTER_UNROLL(ch2_pwr_en), + WR_REGISTER_UNROLL(ch3_pwr_en), + WR_REGISTER_UNROLL(ch4_pwr_en), + WR_REGISTER_UNROLL(ch5_pwr_en), + WR_REGISTER_UNROLL(ch6_pwr_en), + WR_REGISTER_UNROLL(ch7_pwr_en), + }; + struct dentry *regs_dir, *ch_dir; + int nr_entries, i; + char name[16]; + + regs_dir = debugfs_create_dir(WRITE_STR, dir); + if (!regs_dir) + return; + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); + + if (dw->mode == EDMA_MODE_UNROLL) { + nr_entries = ARRAY_SIZE(debugfs_unroll_regs); + dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, + regs_dir); + } + + for (i = 0; i < dw->wr_ch_cnt; i++) { + snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); + + ch_dir = debugfs_create_dir(name, regs_dir); + if (!ch_dir) + return; + + dw_edma_debugfs_regs_ch(®s->type.unroll.ch[i].wr, ch_dir); + + lim[0][i].start = ®s->type.unroll.ch[i].wr; + lim[0][i].end = ®s->type.unroll.ch[i].padding_1[0]; + } +} + +static void dw_edma_debugfs_regs_rd(struct dentry *dir) +{ + const struct debugfs_entries debugfs_regs[] = { + /* eDMA global registers */ + RD_REGISTER(engine_en), + RD_REGISTER(doorbell), + RD_REGISTER(ch_arb_weight_low), + RD_REGISTER(ch_arb_weight_high), + /* eDMA interrupts registers */ + RD_REGISTER(int_status), + RD_REGISTER(int_mask), + RD_REGISTER(int_clear), + RD_REGISTER(err_status_low), + RD_REGISTER(err_status_high), + RD_REGISTER(linked_list_err_en), + RD_REGISTER(done_imwr_low), + RD_REGISTER(done_imwr_high), + RD_REGISTER(abort_imwr_low), + RD_REGISTER(abort_imwr_high), + RD_REGISTER(ch01_imwr_data), + RD_REGISTER(ch23_imwr_data), + RD_REGISTER(ch45_imwr_data), + RD_REGISTER(ch67_imwr_data), + }; + const struct debugfs_entries debugfs_unroll_regs[] = { + /* eDMA channel context grouping */ + RD_REGISTER_UNROLL(engine_chgroup), + RD_REGISTER_UNROLL(engine_hshake_cnt_low), + RD_REGISTER_UNROLL(engine_hshake_cnt_high), + RD_REGISTER_UNROLL(ch0_pwr_en), + RD_REGISTER_UNROLL(ch1_pwr_en), + RD_REGISTER_UNROLL(ch2_pwr_en), + RD_REGISTER_UNROLL(ch3_pwr_en), + RD_REGISTER_UNROLL(ch4_pwr_en), + RD_REGISTER_UNROLL(ch5_pwr_en), + RD_REGISTER_UNROLL(ch6_pwr_en), + RD_REGISTER_UNROLL(ch7_pwr_en), + }; + struct dentry *regs_dir, *ch_dir; + int nr_entries, i; + char name[16]; + + regs_dir = debugfs_create_dir(READ_STR, dir); + if (!regs_dir) + return; + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); + + if (dw->mode == EDMA_MODE_UNROLL) { + nr_entries = ARRAY_SIZE(debugfs_unroll_regs); + dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, + regs_dir); + } + + for (i = 0; i < dw->rd_ch_cnt; i++) { + snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); + + ch_dir = debugfs_create_dir(name, regs_dir); + if (!ch_dir) + return; + + dw_edma_debugfs_regs_ch(®s->type.unroll.ch[i].rd, ch_dir); + + lim[1][i].start = ®s->type.unroll.ch[i].rd; + lim[1][i].end = ®s->type.unroll.ch[i].padding_2[0]; + } +} + +static void dw_edma_debugfs_regs(void) +{ + const struct debugfs_entries debugfs_regs[] = { + REGISTER(ctrl_data_arb_prior), + REGISTER(ctrl), + }; + struct dentry *regs_dir; + int nr_entries; + + regs_dir = debugfs_create_dir(REGISTERS_STR, base_dir); + if (!regs_dir) + return; + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); + + dw_edma_debugfs_regs_wr(regs_dir); + dw_edma_debugfs_regs_rd(regs_dir); +} + +void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip) +{ + dw = chip->dw; + if (!dw) + return; + + regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr; + if (!regs) + return; + + base_dir = debugfs_create_dir(dw->name, 0); + if (!base_dir) + return; + + debugfs_create_u32("version", 0444, base_dir, &dw->version); + debugfs_create_u32("mode", 0444, base_dir, &dw->mode); + debugfs_create_u16("wr_ch_cnt", 0444, base_dir, &dw->wr_ch_cnt); + debugfs_create_u16("rd_ch_cnt", 0444, base_dir, &dw->rd_ch_cnt); + + dw_edma_debugfs_regs(); +} + +void dw_edma_v0_debugfs_off(void) +{ + debugfs_remove_recursive(base_dir); +} diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h new file mode 100644 index 000000000000..5450a0a94193 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel + */ + +#ifndef _DW_EDMA_V0_DEBUG_FS_H +#define _DW_EDMA_V0_DEBUG_FS_H + +#include + +#ifdef CONFIG_DEBUG_FS +void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip); +void dw_edma_v0_debugfs_off(void); +#else +static inline void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip) +{ +} + +static inline void dw_edma_v0_debugfs_off(void) +{ +} +#endif /* CONFIG_DEBUG_FS */ + +#endif /* _DW_EDMA_V0_DEBUG_FS_H */ From 1f418f46503d72594bbe6407d97fd2ae1ce15ee6 Mon Sep 17 00:00:00 2001 From: Gustavo Pimentel Date: Tue, 4 Jun 2019 15:29:25 +0200 Subject: [PATCH 33/68] PCI: Add Synopsys endpoint EDDA Device ID Create and add Synopsys Endpoint EDDA Device ID to PCI ID list, since this ID is now being use on two different drivers (pci_endpoint_test.ko and dw-edma-pcie.ko). Signed-off-by: Gustavo Pimentel Acked-by: Bjorn Helgaas Cc: Kishon Vijay Abraham I Cc: Bjorn Helgaas Cc: Lorenzo Pieralisi Cc: Joao Pinto Signed-off-by: Vinod Koul --- drivers/misc/pci_endpoint_test.c | 2 +- include/linux/pci_ids.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 7b015f2a1c6f..1f531c1b4f74 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -804,7 +804,7 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) }, { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) }, - { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) }, + { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654), .driver_data = (kernel_ulong_t)&am654_data }, diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 70e86148cb1e..4aad69fc4d6b 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2366,6 +2366,7 @@ #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf +#define PCI_DEVICE_ID_SYNOPSYS_EDDA 0xedda #define PCI_VENDOR_ID_USR 0x16ec From 41aaff2a2ac01c5f5415f9de9cd7015ac2b8d569 Mon Sep 17 00:00:00 2001 From: Gustavo Pimentel Date: Tue, 4 Jun 2019 15:29:26 +0200 Subject: [PATCH 34/68] dmaengine: Add Synopsys eDMA IP PCIe glue-logic Synopsys eDMA IP is normally distributed along with Synopsys PCIe EndPoint IP (depends of the use and licensing agreement). This IP requires some basic configurations, such as: - eDMA registers BAR - eDMA registers offset - eDMA registers size - eDMA linked list memory BAR - eDMA linked list memory offset - eDMA linked list memory size - eDMA data memory BAR - eDMA data memory offset - eDMA data memory size - eDMA version - eDMA mode - IRQs available for eDMA As a working example, PCIe glue-logic will attach to a Synopsys PCIe EndPoint IP prototype kit (Vendor ID = 0x16c3, Device ID = 0xedda), which has built-in an eDMA IP with this default configuration: - eDMA registers BAR = 0 - eDMA registers offset = 0x00001000 (4 Kbytes) - eDMA registers size = 0x00002000 (8 Kbytes) - eDMA linked list memory BAR = 2 - eDMA linked list memory offset = 0x00000000 (0 Kbytes) - eDMA linked list memory size = 0x00800000 (8 Mbytes) - eDMA data memory BAR = 2 - eDMA data memory offset = 0x00800000 (8 Mbytes) - eDMA data memory size = 0x03800000 (56 Mbytes) - eDMA version = 0 - eDMA mode = EDMA_MODE_UNROLL - IRQs = 1 This driver can be compile as built-in or external module in kernel. To enable this driver just select DW_EDMA_PCIE option in kernel configuration, however it requires and selects automatically DW_EDMA option too. Signed-off-by: Gustavo Pimentel Cc: Vinod Koul Cc: Dan Williams Cc: Russell King Cc: Lorenzo Pieralisi Cc: Joao Pinto Signed-off-by: Gustavo Pimentel Signed-off-by: Vinod Koul --- drivers/dma/dw-edma/Kconfig | 9 ++ drivers/dma/dw-edma/Makefile | 1 + drivers/dma/dw-edma/dw-edma-pcie.c | 229 +++++++++++++++++++++++++++++ 3 files changed, 239 insertions(+) create mode 100644 drivers/dma/dw-edma/dw-edma-pcie.c diff --git a/drivers/dma/dw-edma/Kconfig b/drivers/dma/dw-edma/Kconfig index 3016bed63589..c0838ce959e4 100644 --- a/drivers/dma/dw-edma/Kconfig +++ b/drivers/dma/dw-edma/Kconfig @@ -7,3 +7,12 @@ config DW_EDMA help Support the Synopsys DesignWare eDMA controller, normally implemented on endpoints SoCs. + +config DW_EDMA_PCIE + tristate "Synopsys DesignWare eDMA PCIe driver" + depends on PCI && PCI_MSI + select DW_EDMA + help + Provides a glue-logic between the Synopsys DesignWare + eDMA controller and an endpoint PCIe device. This also serves + as a reference design to whom desires to use this IP. diff --git a/drivers/dma/dw-edma/Makefile b/drivers/dma/dw-edma/Makefile index 0c53033cc5ae..8d45c0d5689d 100644 --- a/drivers/dma/dw-edma/Makefile +++ b/drivers/dma/dw-edma/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_DW_EDMA) += dw-edma.o dw-edma-$(CONFIG_DEBUG_FS) := dw-edma-v0-debugfs.o dw-edma-objs := dw-edma-core.o \ dw-edma-v0-core.o $(dw-edma-y) +obj-$(CONFIG_DW_EDMA_PCIE) += dw-edma-pcie.o diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c new file mode 100644 index 000000000000..4c96e1c948f2 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-pcie.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA PCIe driver + * + * Author: Gustavo Pimentel + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "dw-edma-core.h" + +struct dw_edma_pcie_data { + /* eDMA registers location */ + enum pci_barno rg_bar; + off_t rg_off; + size_t rg_sz; + /* eDMA memory linked list location */ + enum pci_barno ll_bar; + off_t ll_off; + size_t ll_sz; + /* eDMA memory data location */ + enum pci_barno dt_bar; + off_t dt_off; + size_t dt_sz; + /* Other */ + u32 version; + enum dw_edma_mode mode; + u8 irqs; +}; + +static const struct dw_edma_pcie_data snps_edda_data = { + /* eDMA registers location */ + .rg_bar = BAR_0, + .rg_off = 0x00001000, /* 4 Kbytes */ + .rg_sz = 0x00002000, /* 8 Kbytes */ + /* eDMA memory linked list location */ + .ll_bar = BAR_2, + .ll_off = 0x00000000, /* 0 Kbytes */ + .ll_sz = 0x00800000, /* 8 Mbytes */ + /* eDMA memory data location */ + .dt_bar = BAR_2, + .dt_off = 0x00800000, /* 8 Mbytes */ + .dt_sz = 0x03800000, /* 56 Mbytes */ + /* Other */ + .version = 0, + .mode = EDMA_MODE_UNROLL, + .irqs = 1, +}; + +static int dw_edma_pcie_probe(struct pci_dev *pdev, + const struct pci_device_id *pid) +{ + const struct dw_edma_pcie_data *pdata = (void *)pid->driver_data; + struct device *dev = &pdev->dev; + struct dw_edma_chip *chip; + int err, nr_irqs; + struct dw_edma *dw; + + /* Enable PCI device */ + err = pcim_enable_device(pdev); + if (err) { + pci_err(pdev, "enabling device failed\n"); + return err; + } + + /* Mapping PCI BAR regions */ + err = pcim_iomap_regions(pdev, BIT(pdata->rg_bar) | + BIT(pdata->ll_bar) | + BIT(pdata->dt_bar), + pci_name(pdev)); + if (err) { + pci_err(pdev, "eDMA BAR I/O remapping failed\n"); + return err; + } + + pci_set_master(pdev); + + /* DMA configuration */ + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (!err) { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + pci_err(pdev, "consistent DMA mask 64 set failed\n"); + return err; + } + } else { + pci_err(pdev, "DMA mask 64 set failed\n"); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + pci_err(pdev, "DMA mask 32 set failed\n"); + return err; + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + pci_err(pdev, "consistent DMA mask 32 set failed\n"); + return err; + } + } + + /* Data structure allocation */ + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL); + if (!dw) + return -ENOMEM; + + /* IRQs allocation */ + nr_irqs = pci_alloc_irq_vectors(pdev, 1, pdata->irqs, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (nr_irqs < 1) { + pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n", + nr_irqs); + return -EPERM; + } + + /* Data structure initialization */ + chip->dw = dw; + chip->dev = dev; + chip->id = pdev->devfn; + chip->irq = pdev->irq; + + dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar]; + dw->rg_region.vaddr += pdata->rg_off; + dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start; + dw->rg_region.paddr += pdata->rg_off; + dw->rg_region.sz = pdata->rg_sz; + + dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar]; + dw->ll_region.vaddr += pdata->ll_off; + dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start; + dw->ll_region.paddr += pdata->ll_off; + dw->ll_region.sz = pdata->ll_sz; + + dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar]; + dw->dt_region.vaddr += pdata->dt_off; + dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start; + dw->dt_region.paddr += pdata->dt_off; + dw->dt_region.sz = pdata->dt_sz; + + dw->version = pdata->version; + dw->mode = pdata->mode; + dw->nr_irqs = nr_irqs; + + /* Debug info */ + pci_dbg(pdev, "Version:\t%u\n", dw->version); + + pci_dbg(pdev, "Mode:\t%s\n", + dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll"); + + pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", + pdata->rg_bar, pdata->rg_off, pdata->rg_sz, + &dw->rg_region.vaddr, &dw->rg_region.paddr); + + pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", + pdata->ll_bar, pdata->ll_off, pdata->ll_sz, + &dw->ll_region.vaddr, &dw->ll_region.paddr); + + pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", + pdata->dt_bar, pdata->dt_off, pdata->dt_sz, + &dw->dt_region.vaddr, &dw->dt_region.paddr); + + pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs); + + /* Validating if PCI interrupts were enabled */ + if (!pci_dev_msi_enabled(pdev)) { + pci_err(pdev, "enable interrupt failed\n"); + return -EPERM; + } + + dw->irq = devm_kcalloc(dev, nr_irqs, sizeof(*dw->irq), GFP_KERNEL); + if (!dw->irq) + return -ENOMEM; + + /* Starting eDMA driver */ + err = dw_edma_probe(chip); + if (err) { + pci_err(pdev, "eDMA probe failed\n"); + return err; + } + + /* Saving data structure reference */ + pci_set_drvdata(pdev, chip); + + return 0; +} + +static void dw_edma_pcie_remove(struct pci_dev *pdev) +{ + struct dw_edma_chip *chip = pci_get_drvdata(pdev); + int err; + + /* Stopping eDMA driver */ + err = dw_edma_remove(chip); + if (err) + pci_warn(pdev, "can't remove device properly: %d\n", err); + + /* Freeing IRQs */ + pci_free_irq_vectors(pdev); +} + +static const struct pci_device_id dw_edma_pcie_id_table[] = { + { PCI_DEVICE_DATA(SYNOPSYS, EDDA, &snps_edda_data) }, + { } +}; +MODULE_DEVICE_TABLE(pci, dw_edma_pcie_id_table); + +static struct pci_driver dw_edma_pcie_driver = { + .name = "dw-edma-pcie", + .id_table = dw_edma_pcie_id_table, + .probe = dw_edma_pcie_probe, + .remove = dw_edma_pcie_remove, +}; + +module_pci_driver(dw_edma_pcie_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Synopsys DesignWare eDMA PCIe driver"); +MODULE_AUTHOR("Gustavo Pimentel "); From 28635f6cd78e7fb4e632e0e43ca7e3e81a8c7b58 Mon Sep 17 00:00:00 2001 From: Gustavo Pimentel Date: Tue, 4 Jun 2019 15:29:27 +0200 Subject: [PATCH 35/68] MAINTAINERS: Add Synopsys eDMA IP driver maintainer Add Synopsys eDMA IP driver maintainer. This driver aims to support Synopsys eDMA IP and is normally distributed along with Synopsys PCIe EndPoint IP (depends of the use and licensing agreement). Signed-off-by: Gustavo Pimentel Cc: Vinod Koul Cc: Joao Pinto Signed-off-by: Vinod Koul --- MAINTAINERS | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 5cfbea4ce575..a43f960ae673 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4594,6 +4594,13 @@ L: linux-mtd@lists.infradead.org S: Supported F: drivers/mtd/nand/raw/denali* +DESIGNWARE EDMA CORE IP DRIVER +M: Gustavo Pimentel +L: dmaengine@vger.kernel.org +S: Maintained +F: drivers/dma/dw-edma/ +F: include/linux/dma/edma.h + DESIGNWARE USB2 DRD IP DRIVER M: Minas Harutyunyan L: linux-usb@vger.kernel.org From f4a9fe97ea5a08c13a842637abf9b2d6b4edf35d Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Thu, 6 Jun 2019 13:53:44 +0300 Subject: [PATCH 36/68] dmaengine: axi-dmac: update license header The change replaces the old license information in the comment header with the new SPDX license specifier. As well as bumping the year range from 2013-2015 to 2013-2019. The latter also reflects recent changes that were added to the driver. Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index d5e29bbc3d43..9e6b8940e8d2 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -1,10 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Driver for the Analog Devices AXI-DMAC core * - * Copyright 2013-2015 Analog Devices Inc. + * Copyright 2013-2019 Analog Devices Inc. * Author: Lars-Peter Clausen - * - * Licensed under the GPL-2. */ #include From fe333389ccce1f1e9c0807ee904fba0ca79fa3a9 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 7 Jun 2019 13:30:39 +0200 Subject: [PATCH 37/68] dmaengine: Grammar s/the its/its/, s/need/needs/ Signed-off-by: Geert Uytterhoeven Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 610080c629bb..7efb9264b744 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -73,7 +73,7 @@ static long dmaengine_ref_count; /* --- sysfs implementation --- */ /** - * dev_to_dma_chan - convert a device pointer to the its sysfs container object + * dev_to_dma_chan - convert a device pointer to its sysfs container object * @dev - device node * * Must be called under dma_list_mutex @@ -717,7 +717,7 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name) chan = acpi_dma_request_slave_chan_by_name(dev, name); if (chan) { - /* Valid channel found or requester need to be deferred */ + /* Valid channel found or requester needs to be deferred */ if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) return chan; } From 83eb5cfcd5e35967d5e136d1266378933372a41a Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 12 Jun 2019 12:19:54 +0800 Subject: [PATCH 38/68] dmaengine: dw-edma: Fix build error without CONFIG_PCI_MSI If CONFIG_PCI_MSI is not set, building with CONFIG_DW_EDMA fails: drivers/dma/dw-edma/dw-edma-core.c: In function dw_edma_irq_request: drivers/dma/dw-edma/dw-edma-core.c:784:21: error: implicit declaration of function pci_irq_vector; did you mean rcu_irq_enter? [-Werror=implicit-function-declaration] err = request_irq(pci_irq_vector(to_pci_dev(dev), 0), ^~~~~~~~~~~~~~ Reported-by: Hulk Robot Fixes: e63d79d1ffcd ("dmaengine: Add Synopsys eDMA IP core driver") Signed-off-by: YueHaibing Reported-by: Randy Dunlap Signed-off-by: Vinod Koul --- drivers/dma/dw-edma/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/dw-edma/Kconfig b/drivers/dma/dw-edma/Kconfig index c0838ce959e4..7ff17b2db6a1 100644 --- a/drivers/dma/dw-edma/Kconfig +++ b/drivers/dma/dw-edma/Kconfig @@ -2,6 +2,7 @@ config DW_EDMA tristate "Synopsys DesignWare eDMA controller driver" + depends on PCI && PCI_MSI select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help From 718745f87f95d0a8c3cdeb52abd20bfc2b063d12 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 12 Jun 2019 14:25:52 +0200 Subject: [PATCH 39/68] dmaengine: amba-pl08x: no need to cast away call to debugfs_create_file() No need to check the return value of debugfs_create_file(), so no need to provide a fake "cast away" of the return value either. Cc: Dan Williams Cc: Vinod Koul Cc: dmaengine@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Greg Kroah-Hartman Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 8cfc753ad4b0..ccd983c6d46c 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -2520,9 +2520,8 @@ DEFINE_SHOW_ATTRIBUTE(pl08x_debugfs); static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) { /* Expose a simple debugfs interface to view all clocks */ - (void) debugfs_create_file(dev_name(&pl08x->adev->dev), - S_IFREG | S_IRUGO, NULL, pl08x, - &pl08x_debugfs_fops); + debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, + NULL, pl08x, &pl08x_debugfs_fops); } #else From 635d7302caca40841077804c194ae8e00339e2a0 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 12 Jun 2019 14:25:53 +0200 Subject: [PATCH 40/68] dmaengine: bcm-sba-raid: no need to check return value of debugfs_create functions When calling debugfs functions, there is no need to ever check the return value. The function can work or not, but the code logic should never do something different based on this. Also, because there is no need to save the file dentry, remove the variable that was saving it as it was never even being used once set. Cc: Vinod Koul Cc: Dan Williams Cc: dmaengine@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Greg Kroah-Hartman Signed-off-by: Vinod Koul --- drivers/dma/bcm-sba-raid.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c index fa81d0177765..275e90fa829d 100644 --- a/drivers/dma/bcm-sba-raid.c +++ b/drivers/dma/bcm-sba-raid.c @@ -164,7 +164,6 @@ struct sba_device { struct list_head reqs_free_list; /* DebugFS directory entries */ struct dentry *root; - struct dentry *stats; }; /* ====== Command helper routines ===== */ @@ -1716,17 +1715,11 @@ static int sba_probe(struct platform_device *pdev) /* Create debugfs root entry */ sba->root = debugfs_create_dir(dev_name(sba->dev), NULL); - if (IS_ERR_OR_NULL(sba->root)) { - dev_err(sba->dev, "failed to create debugfs root entry\n"); - sba->root = NULL; - goto skip_debugfs; - } /* Create debugfs stats entry */ - sba->stats = debugfs_create_devm_seqfile(sba->dev, "stats", sba->root, - sba_debugfs_stats_show); - if (IS_ERR_OR_NULL(sba->stats)) - dev_err(sba->dev, "failed to create debugfs stats file\n"); + debugfs_create_devm_seqfile(sba->dev, "stats", sba->root, + sba_debugfs_stats_show); + skip_debugfs: /* Register DMA device with Linux async framework */ From c33394bd006c2efa742b204e740c098763972dc9 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 12 Jun 2019 14:25:54 +0200 Subject: [PATCH 41/68] dmaengine: coh901318: no need to cast away call to debugfs_create_file() No need to check the return value of debugfs_create_file(), so no need to provide a fake "cast away" of the return value either. Cc: Linus Walleij Cc: Vinod Koul Cc: Dan Williams Cc: linux-arm-kernel@lists.infradead.org Cc: dmaengine@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Greg Kroah-Hartman Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/coh901318.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index b69d66e44052..6f5bf6ae08b1 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1378,10 +1378,8 @@ static int __init init_coh901318_debugfs(void) dma_dentry = debugfs_create_dir("dma", NULL); - (void) debugfs_create_file("status", - S_IFREG | S_IRUGO, - dma_dentry, NULL, - &coh901318_debugfs_status_operations); + debugfs_create_file("status", S_IFREG | S_IRUGO, dma_dentry, NULL, + &coh901318_debugfs_status_operations); return 0; } From 8148a87846ebe07e8f34954205bbd8ab40693f4b Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 12 Jun 2019 14:25:55 +0200 Subject: [PATCH 42/68] dmaengine: pxa_dma: no need to check return value of debugfs_create functions When calling debugfs functions, there is no need to ever check the return value. The function can work or not, but the code logic should never do something different based on this. Also, because there is no need to save the file dentry, remove the variable that was saving it as it was never even being used once set. Cc: Daniel Mack Cc: Haojian Zhuang Cc: Robert Jarzmik Cc: Vinod Koul Cc: Dan Williams Cc: linux-arm-kernel@lists.infradead.org Cc: dmaengine@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Greg Kroah-Hartman Signed-off-by: Vinod Koul --- drivers/dma/pxa_dma.c | 56 +++++++++---------------------------------- 1 file changed, 11 insertions(+), 45 deletions(-) diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index b429642f3e7a..0f698f49ee26 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -132,7 +132,6 @@ struct pxad_device { spinlock_t phy_lock; /* Phy association */ #ifdef CONFIG_DEBUG_FS struct dentry *dbgfs_root; - struct dentry *dbgfs_state; struct dentry **dbgfs_chan; #endif }; @@ -326,31 +325,18 @@ static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev, int ch, struct dentry *chandir) { char chan_name[11]; - struct dentry *chan, *chan_state = NULL, *chan_descr = NULL; - struct dentry *chan_reqs = NULL; + struct dentry *chan; void *dt; scnprintf(chan_name, sizeof(chan_name), "%d", ch); chan = debugfs_create_dir(chan_name, chandir); dt = (void *)&pdev->phys[ch]; - if (chan) - chan_state = debugfs_create_file("state", 0400, chan, dt, - &chan_state_fops); - if (chan_state) - chan_descr = debugfs_create_file("descriptors", 0400, chan, dt, - &descriptors_fops); - if (chan_descr) - chan_reqs = debugfs_create_file("requesters", 0400, chan, dt, - &requester_chan_fops); - if (!chan_reqs) - goto err_state; + debugfs_create_file("state", 0400, chan, dt, &chan_state_fops); + debugfs_create_file("descriptors", 0400, chan, dt, &descriptors_fops); + debugfs_create_file("requesters", 0400, chan, dt, &requester_chan_fops); return chan; - -err_state: - debugfs_remove_recursive(chan); - return NULL; } static void pxad_init_debugfs(struct pxad_device *pdev) @@ -358,40 +344,20 @@ static void pxad_init_debugfs(struct pxad_device *pdev) int i; struct dentry *chandir; - pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL); - if (IS_ERR(pdev->dbgfs_root) || !pdev->dbgfs_root) - goto err_root; - - pdev->dbgfs_state = debugfs_create_file("state", 0400, pdev->dbgfs_root, - pdev, &state_fops); - if (!pdev->dbgfs_state) - goto err_state; - pdev->dbgfs_chan = - kmalloc_array(pdev->nr_chans, sizeof(*pdev->dbgfs_state), + kmalloc_array(pdev->nr_chans, sizeof(struct dentry *), GFP_KERNEL); if (!pdev->dbgfs_chan) - goto err_alloc; + return; + + pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL); + + debugfs_create_file("state", 0400, pdev->dbgfs_root, pdev, &state_fops); chandir = debugfs_create_dir("channels", pdev->dbgfs_root); - if (!chandir) - goto err_chandir; - for (i = 0; i < pdev->nr_chans; i++) { + for (i = 0; i < pdev->nr_chans; i++) pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir); - if (!pdev->dbgfs_chan[i]) - goto err_chans; - } - - return; -err_chans: -err_chandir: - kfree(pdev->dbgfs_chan); -err_alloc: -err_state: - debugfs_remove_recursive(pdev->dbgfs_root); -err_root: - pr_err("pxad: debugfs is not available\n"); } static void pxad_cleanup_debugfs(struct pxad_device *pdev) From bea696c5ce1e165a95699acd2c4fc34c43177a08 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 12 Jun 2019 14:25:56 +0200 Subject: [PATCH 43/68] dmaengine: mic_x100_dma: no need to check return value of debugfs_create functions When calling debugfs functions, there is no need to ever check the return value. The function can work or not, but the code logic should never do something different based on this. Cc: Sudeep Dutt Cc: Ashutosh Dixit Cc: Dan Williams Cc: Vinod Koul Cc: dmaengine@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Greg Kroah-Hartman Reviewed-by: Sudeep Dutt Signed-off-by: Vinod Koul --- drivers/dma/mic_x100_dma.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c index 6a91e28d537d..584e09661507 100644 --- a/drivers/dma/mic_x100_dma.c +++ b/drivers/dma/mic_x100_dma.c @@ -728,10 +728,8 @@ static int mic_dma_driver_probe(struct mbus_device *mbdev) if (mic_dma_dbg) { mic_dma_dev->dbg_dir = debugfs_create_dir(dev_name(&mbdev->dev), mic_dma_dbg); - if (mic_dma_dev->dbg_dir) - debugfs_create_file("mic_dma_reg", 0444, - mic_dma_dev->dbg_dir, mic_dma_dev, - &mic_dma_reg_fops); + debugfs_create_file("mic_dma_reg", 0444, mic_dma_dev->dbg_dir, + mic_dma_dev, &mic_dma_reg_fops); } return 0; } From a08a9645a3d6f9e11a2dba2b46f0d5ac5d80c4e0 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 12 Jun 2019 14:25:57 +0200 Subject: [PATCH 44/68] dmaengine: qcom: hidma: no need to check return value of debugfs_create functions When calling debugfs functions, there is no need to ever check the return value. The function can work or not, but the code logic should never do something different based on this. Also, because there is no need to save the file dentry, remove the variables that were saving them as they were never even being used once set. Cc: Sinan Kaya Cc: Andy Gross Cc: David Brown Cc: Dan Williams Cc: Vinod Koul Cc: linux-arm-kernel@lists.infradead.org Cc: linux-arm-msm@vger.kernel.org Cc: dmaengine@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Greg Kroah-Hartman Acked-by: Sinan Kaya Signed-off-by: Vinod Koul --- drivers/dma/qcom/hidma.h | 5 +---- drivers/dma/qcom/hidma_dbg.c | 37 +++++++----------------------------- 2 files changed, 8 insertions(+), 34 deletions(-) diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h index 5f9966e82c0b..36357d02333a 100644 --- a/drivers/dma/qcom/hidma.h +++ b/drivers/dma/qcom/hidma.h @@ -101,8 +101,6 @@ struct hidma_chan { * It is used by the DMA complete notification to * locate the descriptor that initiated the transfer. */ - struct dentry *debugfs; - struct dentry *stats; struct hidma_dev *dmadev; struct hidma_desc *running; @@ -134,7 +132,6 @@ struct hidma_dev { struct dma_device ddev; struct dentry *debugfs; - struct dentry *stats; /* sysfs entry for the channel id */ struct device_attribute *chid_attrs; @@ -166,6 +163,6 @@ irqreturn_t hidma_ll_inthandler(int irq, void *arg); irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause); void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, u8 err_code); -int hidma_debug_init(struct hidma_dev *dmadev); +void hidma_debug_init(struct hidma_dev *dmadev); void hidma_debug_uninit(struct hidma_dev *dmadev); #endif diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c index 9523faf7acdc..994f448b64d8 100644 --- a/drivers/dma/qcom/hidma_dbg.c +++ b/drivers/dma/qcom/hidma_dbg.c @@ -146,17 +146,13 @@ void hidma_debug_uninit(struct hidma_dev *dmadev) debugfs_remove_recursive(dmadev->debugfs); } -int hidma_debug_init(struct hidma_dev *dmadev) +void hidma_debug_init(struct hidma_dev *dmadev) { - int rc = 0; int chidx = 0; struct list_head *position = NULL; + struct dentry *dir; dmadev->debugfs = debugfs_create_dir(dev_name(dmadev->ddev.dev), NULL); - if (!dmadev->debugfs) { - rc = -ENODEV; - return rc; - } /* walk through the virtual channel list */ list_for_each(position, &dmadev->ddev.channels) { @@ -165,32 +161,13 @@ int hidma_debug_init(struct hidma_dev *dmadev) chan = list_entry(position, struct hidma_chan, chan.device_node); sprintf(chan->dbg_name, "chan%d", chidx); - chan->debugfs = debugfs_create_dir(chan->dbg_name, + dir = debugfs_create_dir(chan->dbg_name, dmadev->debugfs); - if (!chan->debugfs) { - rc = -ENOMEM; - goto cleanup; - } - chan->stats = debugfs_create_file("stats", S_IRUGO, - chan->debugfs, chan, - &hidma_chan_fops); - if (!chan->stats) { - rc = -ENOMEM; - goto cleanup; - } + debugfs_create_file("stats", S_IRUGO, dir, chan, + &hidma_chan_fops); chidx++; } - dmadev->stats = debugfs_create_file("stats", S_IRUGO, - dmadev->debugfs, dmadev, - &hidma_dma_fops); - if (!dmadev->stats) { - rc = -ENOMEM; - goto cleanup; - } - - return 0; -cleanup: - hidma_debug_uninit(dmadev); - return rc; + debugfs_create_file("stats", S_IRUGO, dmadev->debugfs, dmadev, + &hidma_dma_fops); } From 09d5b702b0f97dd0768dade7106b6bf7622b1355 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Thu, 6 Jun 2019 13:45:47 +0300 Subject: [PATCH 45/68] dmaengine: virt-dma: store result on dma descriptor This allows each virtual channel to store information about each transfer that completed, i.e. which transfer succeeded (or which failed) and if there was any residue data on each (completed) transfer. Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- drivers/dma/virt-dma.c | 4 ++-- drivers/dma/virt-dma.h | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index 88ad8ed2a8d6..bf560a20c8a8 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -101,7 +101,7 @@ static void vchan_complete(unsigned long arg) } spin_unlock_irq(&vc->lock); - dmaengine_desc_callback_invoke(&cb, NULL); + dmaengine_desc_callback_invoke(&cb, &vd->tx_result); list_for_each_entry_safe(vd, _vd, &head, node) { dmaengine_desc_get_callback(&vd->tx, &cb); @@ -109,7 +109,7 @@ static void vchan_complete(unsigned long arg) list_del(&vd->node); vchan_vdesc_fini(vd); - dmaengine_desc_callback_invoke(&cb, NULL); + dmaengine_desc_callback_invoke(&cb, &vd->tx_result); } } diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index b09b75ab0751..eb767c583b7e 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -17,6 +17,7 @@ struct virt_dma_desc { struct dma_async_tx_descriptor tx; + struct dmaengine_result tx_result; /* protected by vc.lock */ struct list_head node; }; @@ -65,6 +66,9 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan vd->tx.tx_submit = vchan_tx_submit; vd->tx.desc_free = vchan_tx_desc_free; + vd->tx_result.result = DMA_TRANS_NOERROR; + vd->tx_result.residue = 0; + spin_lock_irqsave(&vc->lock, flags); list_add_tail(&vd->node, &vc->desc_allocated); spin_unlock_irqrestore(&vc->lock, flags); From e3923592f80ba93aaa35b8d49b1f0a5b31db6b7f Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Thu, 6 Jun 2019 13:45:48 +0300 Subject: [PATCH 46/68] dmaengine: axi-dmac: populate residue info for completed xfers Starting with version 4.2.a, the AXI DMAC controller can report partial transfers that have been issued. This change implements computing DMA residue information for transfers, based on that reported information. The way this is done, is to dequeue the partial transfers from the FIFO of partial transfers, store the partial length to the correct segment & descriptor, and compute the residue before submitting the DMA cookie to the DMA framework. Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 99 +++++++++++++++++++++++++++++++++++++- 1 file changed, 98 insertions(+), 1 deletion(-) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 9e6b8940e8d2..0101c0ae24c8 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -63,6 +63,8 @@ #define AXI_DMAC_REG_STATUS 0x430 #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434 #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438 +#define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c +#define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450 #define AXI_DMAC_CTRL_ENABLE BIT(0) #define AXI_DMAC_CTRL_PAUSE BIT(1) @@ -72,6 +74,9 @@ #define AXI_DMAC_FLAG_CYCLIC BIT(0) #define AXI_DMAC_FLAG_LAST BIT(1) +#define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2) + +#define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31) /* The maximum ID allocated by the hardware is 31 */ #define AXI_DMAC_SG_UNUSED 32U @@ -84,6 +89,7 @@ struct axi_dmac_sg { unsigned int dest_stride; unsigned int src_stride; unsigned int id; + unsigned int partial_len; bool schedule_when_free; }; @@ -113,6 +119,7 @@ struct axi_dmac_chan { unsigned int address_align_mask; unsigned int length_align_mask; + bool hw_partial_xfer; bool hw_cyclic; bool hw_2d; }; @@ -244,6 +251,9 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) desc->num_sgs == 1) flags |= AXI_DMAC_FLAG_CYCLIC; + if (chan->hw_partial_xfer) + flags |= AXI_DMAC_FLAG_PARTIAL_REPORT; + axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1); axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1); axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags); @@ -256,6 +266,82 @@ static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan) struct axi_dmac_desc, vdesc.node); } +static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan, + struct axi_dmac_sg *sg) +{ + if (chan->hw_2d) + return sg->x_len * sg->y_len; + else + return sg->x_len; +} + +static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan) +{ + struct axi_dmac *dmac = chan_to_axi_dmac(chan); + struct axi_dmac_desc *desc; + struct axi_dmac_sg *sg; + u32 xfer_done, len, id, i; + bool found_sg; + + do { + len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN); + id = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID); + + found_sg = false; + list_for_each_entry(desc, &chan->active_descs, vdesc.node) { + for (i = 0; i < desc->num_sgs; i++) { + sg = &desc->sg[i]; + if (sg->id == AXI_DMAC_SG_UNUSED) + continue; + if (sg->id == id) { + sg->partial_len = len; + found_sg = true; + break; + } + } + if (found_sg) + break; + } + + if (found_sg) { + dev_dbg(dmac->dma_dev.dev, + "Found partial segment id=%u, len=%u\n", + id, len); + } else { + dev_warn(dmac->dma_dev.dev, + "Not found partial segment id=%u, len=%u\n", + id, len); + } + + /* Check if we have any more partial transfers */ + xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); + xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE); + + } while (!xfer_done); +} + +static void axi_dmac_compute_residue(struct axi_dmac_chan *chan, + struct axi_dmac_desc *active) +{ + struct dmaengine_result *rslt = &active->vdesc.tx_result; + unsigned int start = active->num_completed - 1; + struct axi_dmac_sg *sg; + unsigned int i, total; + + rslt->result = DMA_TRANS_NOERROR; + rslt->residue = 0; + + /* + * We get here if the last completed segment is partial, which + * means we can compute the residue from that segment onwards + */ + for (i = start; i < active->num_sgs; i++) { + sg = &active->sg[i]; + total = axi_dmac_total_sg_bytes(chan, sg); + rslt->residue += (total - sg->partial_len); + } +} + static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, unsigned int completed_transfers) { @@ -267,6 +353,10 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, if (!active) return false; + if (chan->hw_partial_xfer && + (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE)) + axi_dmac_dequeue_partial_xfers(chan); + do { sg = &active->sg[active->num_completed]; if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */ @@ -280,10 +370,14 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, start_next = true; } + if (sg->partial_len) + axi_dmac_compute_residue(chan, active); + if (active->cyclic) vchan_cyclic_callback(&active->vdesc); - if (active->num_completed == active->num_sgs) { + if (active->num_completed == active->num_sgs || + sg->partial_len) { if (active->cyclic) { active->num_completed = 0; /* wrap around */ } else { @@ -674,6 +768,9 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac) return -ENODEV; } + if (version >= ADI_AXI_PCORE_VER(4, 2, 'a')) + chan->hw_partial_xfer = true; + if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) { axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00); chan->length_align_mask = From e28d915528136449ed261a743dfa3f392f555045 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Thu, 6 Jun 2019 13:45:49 +0300 Subject: [PATCH 47/68] dmaengine: axi-dmac: terminate early DMA transfers after a partial one When a partial transfer is received, the driver should not submit any more segments to the hardware, as they will be ignored/unused until a new transfer start operation is done. This change implements this by adding a new flag on the AXI DMAC descriptor. This flags is set to true, if there was a partial transfer in a previously completed segment. When that flag is true, the TLAST flag is added to the to the submitted segment, signaling the controller to stop receiving more segments. Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 0101c0ae24c8..368f08cc29aa 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -96,6 +96,7 @@ struct axi_dmac_sg { struct axi_dmac_desc { struct virt_dma_desc vdesc; bool cyclic; + bool have_partial_xfer; unsigned int num_submitted; unsigned int num_completed; @@ -220,7 +221,8 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) } desc->num_submitted++; - if (desc->num_submitted == desc->num_sgs) { + if (desc->num_submitted == desc->num_sgs || + desc->have_partial_xfer) { if (desc->cyclic) desc->num_submitted = 0; /* Start again */ else @@ -294,6 +296,7 @@ static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan) if (sg->id == AXI_DMAC_SG_UNUSED) continue; if (sg->id == id) { + desc->have_partial_xfer = true; sg->partial_len = len; found_sg = true; break; From fc15be39a827e6e417431a8dc5ca2612d0b3e944 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Thu, 6 Jun 2019 13:45:50 +0300 Subject: [PATCH 48/68] dmaengine: axi-dmac: add regmap support The registers for AXI DMAC are detailed at: https://wiki.analog.com/resources/fpga/docs/axi_dmac#register_map This change adds regmap support for these registers, in case some wants to have a more direct access to them via this interface. Signed-off-by: Alexandru Ardelean [vkoul: fixed code style issue] Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 1 + drivers/dma/dma-axi-dmac.c | 41 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 76859aa2688c..b1d109d62c15 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -102,6 +102,7 @@ config AXI_DMAC depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_SOCFPGA || COMPILE_TEST select DMA_ENGINE select DMA_VIRTUAL_CHANNELS + select REGMAP_MMIO help Enable support for the Analog Devices AXI-DMAC peripheral. This DMA controller is often used in Analog Device's reference designs for FPGA diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 368f08cc29aa..c12bdc7832b1 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -678,6 +679,44 @@ static void axi_dmac_desc_free(struct virt_dma_desc *vdesc) kfree(container_of(vdesc, struct axi_dmac_desc, vdesc)); } +static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg) +{ + switch (reg) { + case AXI_DMAC_REG_IRQ_MASK: + case AXI_DMAC_REG_IRQ_SOURCE: + case AXI_DMAC_REG_IRQ_PENDING: + case AXI_DMAC_REG_CTRL: + case AXI_DMAC_REG_TRANSFER_ID: + case AXI_DMAC_REG_START_TRANSFER: + case AXI_DMAC_REG_FLAGS: + case AXI_DMAC_REG_DEST_ADDRESS: + case AXI_DMAC_REG_SRC_ADDRESS: + case AXI_DMAC_REG_X_LENGTH: + case AXI_DMAC_REG_Y_LENGTH: + case AXI_DMAC_REG_DEST_STRIDE: + case AXI_DMAC_REG_SRC_STRIDE: + case AXI_DMAC_REG_TRANSFER_DONE: + case AXI_DMAC_REG_ACTIVE_TRANSFER_ID: + case AXI_DMAC_REG_STATUS: + case AXI_DMAC_REG_CURRENT_SRC_ADDR: + case AXI_DMAC_REG_CURRENT_DEST_ADDR: + case AXI_DMAC_REG_PARTIAL_XFER_LEN: + case AXI_DMAC_REG_PARTIAL_XFER_ID: + return true; + default: + return false; + } +} + +static const struct regmap_config axi_dmac_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = AXI_DMAC_REG_PARTIAL_XFER_ID, + .readable_reg = axi_dmac_regmap_rdwr, + .writeable_reg = axi_dmac_regmap_rdwr, +}; + /* * The configuration stored in the devicetree matches the configuration * parameters of the peripheral instance and allows the driver to know which @@ -882,6 +921,8 @@ static int axi_dmac_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dmac); + devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config); + return 0; err_unregister_of: From 0ed91bded307cc980f9542eff861266f1744c303 Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Tue, 11 Jun 2019 10:34:32 -0500 Subject: [PATCH 49/68] dt-bindings: pl330: document the optional resets property Add the optional resets property the pl330 dma node. Signed-off-by: Dinh Nguyen Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/arm-pl330.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Documentation/devicetree/bindings/dma/arm-pl330.txt b/Documentation/devicetree/bindings/dma/arm-pl330.txt index db7e2260f9c5..2c7fd1941abb 100644 --- a/Documentation/devicetree/bindings/dma/arm-pl330.txt +++ b/Documentation/devicetree/bindings/dma/arm-pl330.txt @@ -16,6 +16,9 @@ Optional properties: - dma-channels: contains the total number of DMA channels supported by the DMAC - dma-requests: contains the total number of DMA requests supported by the DMAC - arm,pl330-broken-no-flushp: quirk for avoiding to execute DMAFLUSHP + - resets: contains an entry for each entry in reset-names. + See ../reset/reset.txt for details. + - reset-names: must contain at least "dma", and optional is "dma-ocp". Example: From 0eaab70a7a1b70ec39e61817553321b3bc638156 Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Tue, 11 Jun 2019 10:34:33 -0500 Subject: [PATCH 50/68] dmagengine: pl330: add code to get reset property The DMA controller on some SoCs can be held in reset, and thus requires the reset signal(s) to deasserted. Most SoCs will have just one reset signal, but there are others, i.e. Arria10/Stratix10 will have an additional reset signal, referred to as the OCP. Add code to get the reset property from the device tree for deassert and assert. Signed-off-by: Dinh Nguyen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 6e6837214210..5208c6a80a39 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "dmaengine.h" #define PL330_MAX_CHAN 8 @@ -500,6 +501,9 @@ struct pl330_dmac { unsigned int num_peripherals; struct dma_pl330_chan *peripherals; /* keep at end */ int quirks; + + struct reset_control *rstc; + struct reset_control *rstc_ocp; }; static struct pl330_of_quirks { @@ -3028,6 +3032,32 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) amba_set_drvdata(adev, pl330); + pl330->rstc = devm_reset_control_get_optional(&adev->dev, "dma"); + if (IS_ERR(pl330->rstc)) { + if (PTR_ERR(pl330->rstc) != -EPROBE_DEFER) + dev_err(&adev->dev, "Failed to get reset!\n"); + return PTR_ERR(pl330->rstc); + } else { + ret = reset_control_deassert(pl330->rstc); + if (ret) { + dev_err(&adev->dev, "Couldn't deassert the device from reset!\n"); + return ret; + } + } + + pl330->rstc_ocp = devm_reset_control_get_optional(&adev->dev, "dma-ocp"); + if (IS_ERR(pl330->rstc_ocp)) { + if (PTR_ERR(pl330->rstc_ocp) != -EPROBE_DEFER) + dev_err(&adev->dev, "Failed to get OCP reset!\n"); + return PTR_ERR(pl330->rstc_ocp); + } else { + ret = reset_control_deassert(pl330->rstc_ocp); + if (ret) { + dev_err(&adev->dev, "Couldn't deassert the device from OCP reset!\n"); + return ret; + } + } + for (i = 0; i < AMBA_NR_IRQS; i++) { irq = adev->irq[i]; if (irq) { @@ -3168,6 +3198,11 @@ probe_err3: probe_err2: pl330_del(pl330); + if (pl330->rstc_ocp) + reset_control_assert(pl330->rstc_ocp); + + if (pl330->rstc) + reset_control_assert(pl330->rstc); return ret; } @@ -3206,6 +3241,11 @@ static int pl330_remove(struct amba_device *adev) pl330_del(pl330); + if (pl330->rstc_ocp) + reset_control_assert(pl330->rstc_ocp); + + if (pl330->rstc) + reset_control_assert(pl330->rstc); return 0; } From c24a5c735f87d0549060de31367c095e8810b895 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 13 Jun 2019 16:32:32 +0300 Subject: [PATCH 51/68] dmaengine: hsu: Revert "set HSU_CH_MTSR to memory width" The commit 080edf75d337 ("dmaengine: hsu: set HSU_CH_MTSR to memory width") has been mistakenly submitted. The further investigations show that the original code does better job since the memory side transfer size has never been configured by DMA users. As per latest revision of documentation: "Channel minimum transfer size (CHnMTSR)... For IOSF UART, maximum value that can be programmed is 64 and minimum value that can be programmed is 1." This reverts commit 080edf75d337d35faa6fc3df99342b10d2848d16. Fixes: 080edf75d337 ("dmaengine: hsu: set HSU_CH_MTSR to memory width") Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/hsu/hsu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c index e06f20272fd7..dfabc64c2ab0 100644 --- a/drivers/dma/hsu/hsu.c +++ b/drivers/dma/hsu/hsu.c @@ -64,10 +64,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc) if (hsuc->direction == DMA_MEM_TO_DEV) { bsr = config->dst_maxburst; - mtsr = config->src_addr_width; + mtsr = config->dst_addr_width; } else if (hsuc->direction == DMA_DEV_TO_MEM) { bsr = config->src_maxburst; - mtsr = config->dst_addr_width; + mtsr = config->src_addr_width; } hsu_chan_disable(hsuc); From 002905eca5bedab08bafd9e325bbbb41670c7712 Mon Sep 17 00:00:00 2001 From: Peng Ma Date: Thu, 13 Jun 2019 10:27:08 +0000 Subject: [PATCH 52/68] dmaengine: fsl-edma: support little endian for edma driver Our platforms with below registers(CHCFG0 - CHCFG15) of eDMA *-----------------------------------------------------------* | Offset | Big endian Register| Little endian Register| |--------------|--------------------|-----------------------| | 0x0 | CHCFG0 | CHCFG3 | |--------------|--------------------|-----------------------| | 0x1 | CHCFG1 | CHCFG2 | |--------------|--------------------|-----------------------| | 0x2 | CHCFG2 | CHCFG1 | |--------------|--------------------|-----------------------| | 0x3 | CHCFG3 | CHCFG0 | |--------------|--------------------|-----------------------| | ... | ...... | ...... | |--------------|--------------------|-----------------------| | 0xC | CHCFG12 | CHCFG15 | |--------------|--------------------|-----------------------| | 0xD | CHCFG13 | CHCFG14 | |--------------|--------------------|-----------------------| | 0xE | CHCFG14 | CHCFG13 | |--------------|--------------------|-----------------------| | 0xF | CHCFG15 | CHCFG12 | *-----------------------------------------------------------* Current eDMA driver does not support Little endian, so this patch is to improve edma driver to support little endian. Signed-off-by: Peng Ma Signed-off-by: Vinod Koul --- drivers/dma/fsl-edma-common.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index 680b2a00a953..6bf238e19d91 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -83,9 +83,14 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, u32 ch = fsl_chan->vchan.chan.chan_id; void __iomem *muxaddr; unsigned int chans_per_mux, ch_off; + int endian_diff[4] = {3, 1, -1, -3}; chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR; ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; + + if (!fsl_chan->edma->big_endian) + ch_off += endian_diff[ch_off % 4]; + muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; slot = EDMAMUX_CHCFG_SOURCE(slot); From a183ec708b7318bec6bb8a6593f9f7e9bb3fdf4e Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 14 Jun 2019 14:06:04 +0300 Subject: [PATCH 53/68] dmaengine: dw: Distinguish ->remove() between DW and iDMA 32-bit In the same way as done for ->probe(), call ->remove() based on the type of the hardware. While it works now due to equivalency of the two removal functions, it might be changed in the future. Signed-off-by: Andy Shevchenko Acked-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw/pci.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index e79a75db0852..6aa1355ded6c 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c @@ -15,10 +15,13 @@ struct dw_dma_pci_data { const struct dw_dma_platform_data *pdata; int (*probe)(struct dw_dma_chip *chip); + int (*remove)(struct dw_dma_chip *chip); + struct dw_dma_chip *chip; }; static const struct dw_dma_pci_data dw_pci_data = { .probe = dw_dma_probe, + .remove = dw_dma_remove, }; static const struct dw_dma_platform_data idma32_pdata = { @@ -34,11 +37,13 @@ static const struct dw_dma_platform_data idma32_pdata = { static const struct dw_dma_pci_data idma32_pci_data = { .pdata = &idma32_pdata, .probe = idma32_dma_probe, + .remove = idma32_dma_remove, }; static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { - const struct dw_dma_pci_data *data = (void *)pid->driver_data; + const struct dw_dma_pci_data *drv_data = (void *)pid->driver_data; + struct dw_dma_pci_data *data; struct dw_dma_chip *chip; int ret; @@ -63,6 +68,10 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) if (ret) return ret; + data = devm_kmemdup(&pdev->dev, drv_data, sizeof(*drv_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; @@ -73,21 +82,24 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) chip->irq = pdev->irq; chip->pdata = data->pdata; + data->chip = chip; + ret = data->probe(chip); if (ret) return ret; - pci_set_drvdata(pdev, chip); + pci_set_drvdata(pdev, data); return 0; } static void dw_pci_remove(struct pci_dev *pdev) { - struct dw_dma_chip *chip = pci_get_drvdata(pdev); + struct dw_dma_pci_data *data = pci_get_drvdata(pdev); + struct dw_dma_chip *chip = data->chip; int ret; - ret = dw_dma_remove(chip); + ret = data->remove(chip); if (ret) dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret); } @@ -96,16 +108,16 @@ static void dw_pci_remove(struct pci_dev *pdev) static int dw_pci_suspend_late(struct device *dev) { - struct pci_dev *pci = to_pci_dev(dev); - struct dw_dma_chip *chip = pci_get_drvdata(pci); + struct dw_dma_pci_data *data = dev_get_drvdata(dev); + struct dw_dma_chip *chip = data->chip; return do_dw_dma_disable(chip); }; static int dw_pci_resume_early(struct device *dev) { - struct pci_dev *pci = to_pci_dev(dev); - struct dw_dma_chip *chip = pci_get_drvdata(pci); + struct dw_dma_pci_data *data = dev_get_drvdata(dev); + struct dw_dma_chip *chip = data->chip; return do_dw_dma_enable(chip); }; From ed04b7c57c3383ed4573f1d1d1dbdc1108ba0bed Mon Sep 17 00:00:00 2001 From: "Hook, Gary" Date: Tue, 18 Jun 2019 22:03:04 +0000 Subject: [PATCH 54/68] dmaengine: dmatest: timeout value of -1 should specify infinite wait The dmatest module parameter 'timeout' is documented as accepting a -1 to mean "infinite timeout". However, an infinite timeout is not advised, nor possible since the module parameter is an unsigned int, which won't accept a negative value. Change the parameter comment to reflect current behavior, which allows values from 0 up to 4294967295 (0xFFFFFFFF). Signed-off-by: Gary R Hook Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index b96814a7dceb..e0c229aa1353 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -65,7 +65,7 @@ MODULE_PARM_DESC(pq_sources, static int timeout = 3000; module_param(timeout, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " - "Pass -1 for infinite timeout"); + "Pass 0xFFFFFFFF (4294967295) for maximum timeout"); static bool noverify; module_param(noverify, bool, S_IRUGO | S_IWUSR); @@ -97,7 +97,7 @@ MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default * @iterations: iterations before stopping test * @xor_sources: number of xor source buffers * @pq_sources: number of p+q source buffers - * @timeout: transfer timeout in msec, -1 for infinite timeout + * @timeout: transfer timeout in msec, 0 - 0xFFFFFFFF (4294967295) */ struct dmatest_params { unsigned int buf_size; @@ -108,7 +108,7 @@ struct dmatest_params { unsigned int iterations; unsigned int xor_sources; unsigned int pq_sources; - int timeout; + unsigned int timeout; bool noverify; bool norandom; int alignment; From e3dc00b936e5a60ca27be62008482f13bfbc06e3 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 19 Jun 2019 14:45:55 +0200 Subject: [PATCH 55/68] dmaengine: sh: usb-dmac: Use [] to denote a flexible array member Flexible array members should be denoted using [] instead of [0], else gcc will not warn when they are no longer at the end of the structure. Signed-off-by: Geert Uytterhoeven Reviewed-by: Simon Horman Signed-off-by: Vinod Koul --- drivers/dma/sh/usb-dmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index 0afabf395930..17063aaf51bc 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -57,7 +57,7 @@ struct usb_dmac_desc { u32 residue; struct list_head node; dma_cookie_t done_cookie; - struct usb_dmac_sg sg[0]; + struct usb_dmac_sg sg[]; }; #define to_usb_dmac_desc(vd) container_of(vd, struct usb_dmac_desc, vd) From 2b651ecfc39fb64359ce172c5b589d52cb224ad3 Mon Sep 17 00:00:00 2001 From: kbuild test robot Date: Sun, 16 Jun 2019 00:05:50 +0800 Subject: [PATCH 56/68] dmaengine: dw-edma: fix semicolon.cocci warnings drivers/dma/dw-edma/dw-edma-core.c:617:2-3: Unneeded semicolon Remove unneeded semicolon. Generated by: scripts/coccinelle/misc/semicolon.cocci Fixes: e63d79d1ffcd ("dmaengine: Add Synopsys eDMA IP core driver") CC: Gustavo Pimentel Signed-off-by: kbuild test robot Acked-by: Gustavo Pimentel Signed-off-by: Vinod Koul --- drivers/dma/dw-edma/dw-edma-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c index 60d6a4690020..ff392c01bad1 100644 --- a/drivers/dma/dw-edma/dw-edma-core.c +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -615,7 +615,7 @@ static void dw_edma_free_chan_resources(struct dma_chan *dchan) return; cpu_relax(); - }; + } pm_runtime_put(chan->chip->dev); } From 9e5ab0655ef98cc1261409f94a42219dfb68cc22 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 21 Jun 2019 16:19:14 +0300 Subject: [PATCH 57/68] dmaengine: dw: Enable iDMA 32-bit on Intel Elkhart Lake Intel Elkhart Lake OSE (Offload Service Engine) provides few DMA controllers to the host. Enable them in the driver. Signed-off-by: Andy Shevchenko Acked-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw/pci.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index 6aa1355ded6c..8de87b15a988 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c @@ -143,6 +143,11 @@ static const struct pci_device_id dw_pci_id_table[] = { { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_data }, { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_data }, + /* Elkhart Lake iDMA 32-bit (OSE DMA) */ + { PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_pci_data }, + { PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_pci_data }, + { PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_pci_data }, + /* Haswell */ { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_data }, From 78efb76ab4dfb8f74f290ae743f34162cd627f19 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 24 Jun 2019 14:38:18 +0200 Subject: [PATCH 58/68] dmaengine: rcar-dmac: Reject zero-length slave DMA requests While the .device_prep_slave_sg() callback rejects empty scatterlists, it still accepts single-entry scatterlists with a zero-length segment. These may happen if a driver calls dmaengine_prep_slave_single() with a zero len parameter. The corresponding DMA request will never complete, leading to messages like: rcar-dmac e7300000.dma-controller: Channel Address Error happen and DMA timeouts. Although requesting a zero-length DMA request is a driver bug, rejecting it early eases debugging. Note that the .device_prep_dma_memcpy() callback already rejects requests to copy zero bytes. Reported-by: Eugeniu Rosca Analyzed-by: Yoshihiro Shimoda Signed-off-by: Geert Uytterhoeven Signed-off-by: Vinod Koul --- drivers/dma/sh/rcar-dmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 67df54ac3294..9c41a4e42575 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1165,7 +1165,7 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); /* Someone calling slave DMA on a generic channel? */ - if (rchan->mid_rid < 0 || !sg_len) { + if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) { dev_warn(chan->device->dev, "%s: bad parameter: len=%d, id=%d\n", __func__, sg_len, rchan->mid_rid); From fc4a9030788548414ccdd9d04aa457d4e027ecaa Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Tue, 2 Jul 2019 13:48:52 -0300 Subject: [PATCH 59/68] dmaengine: Revert "dmaengine: fsl-edma: support little endian for edma driver" This reverts commit 002905eca5bedab08bafd9e325bbbb41670c7712. Commit 002905eca5be ("dmaengine: fsl-edma: support little endian for edma driver") incorrectly assumed that there was not little endian support in the driver. This causes hangs on Vybrid, so revert it so that Vybrid systems could boot again. Reported-by: Krzysztof Kozlowski Signed-off-by: Fabio Estevam Tested-by: Krzysztof Kozlowski Signed-off-by: Vinod Koul --- drivers/dma/fsl-edma-common.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index 6bf238e19d91..680b2a00a953 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -83,14 +83,9 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, u32 ch = fsl_chan->vchan.chan.chan_id; void __iomem *muxaddr; unsigned int chans_per_mux, ch_off; - int endian_diff[4] = {3, 1, -1, -3}; chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR; ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; - - if (!fsl_chan->edma->big_endian) - ch_off += endian_diff[ch_off % 4]; - muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; slot = EDMAMUX_CHCFG_SOURCE(slot); From af802728e4ab0764b2a26960a30f4cbe358a3b95 Mon Sep 17 00:00:00 2001 From: Robin Gong Date: Tue, 25 Jun 2019 17:43:19 +0800 Subject: [PATCH 60/68] dmaengine: fsl-edma: add drvdata for fsl-edma There are some differences between vf610 and next i.mx7ulp. Put such differences into static driver data for distinguishing easily at driver level. Change mcf-edma accordingly. Signed-off-by: Robin Gong Tested-by: Angelo Dureghello Signed-off-by: Vinod Koul --- drivers/dma/fsl-edma-common.c | 29 +++++++++++------------ drivers/dma/fsl-edma-common.h | 10 +++++++- drivers/dma/fsl-edma.c | 43 ++++++++++++++++++++++++----------- drivers/dma/mcf-edma.c | 11 ++++++--- 4 files changed, 62 insertions(+), 31 deletions(-) diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index 680b2a00a953..07d96894c7c4 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -47,7 +47,7 @@ static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan) struct edma_regs *regs = &fsl_chan->edma->regs; u32 ch = fsl_chan->vchan.chan.chan_id; - if (fsl_chan->edma->version == v1) { + if (fsl_chan->edma->drvdata->version == v1) { edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei); edma_writeb(fsl_chan->edma, ch, regs->serq); } else { @@ -64,7 +64,7 @@ void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan) struct edma_regs *regs = &fsl_chan->edma->regs; u32 ch = fsl_chan->vchan.chan.chan_id; - if (fsl_chan->edma->version == v1) { + if (fsl_chan->edma->drvdata->version == v1) { edma_writeb(fsl_chan->edma, ch, regs->cerq); edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei); } else { @@ -83,8 +83,9 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, u32 ch = fsl_chan->vchan.chan.chan_id; void __iomem *muxaddr; unsigned int chans_per_mux, ch_off; + u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs; - chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR; + chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr; ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; slot = EDMAMUX_CHCFG_SOURCE(slot); @@ -647,28 +648,28 @@ void fsl_edma_setup_regs(struct fsl_edma_engine *edma) edma->regs.erql = edma->membase + EDMA_ERQ; edma->regs.eeil = edma->membase + EDMA_EEI; - edma->regs.serq = edma->membase + ((edma->version == v1) ? + edma->regs.serq = edma->membase + ((edma->drvdata->version == v1) ? EDMA_SERQ : EDMA64_SERQ); - edma->regs.cerq = edma->membase + ((edma->version == v1) ? + edma->regs.cerq = edma->membase + ((edma->drvdata->version == v1) ? EDMA_CERQ : EDMA64_CERQ); - edma->regs.seei = edma->membase + ((edma->version == v1) ? + edma->regs.seei = edma->membase + ((edma->drvdata->version == v1) ? EDMA_SEEI : EDMA64_SEEI); - edma->regs.ceei = edma->membase + ((edma->version == v1) ? + edma->regs.ceei = edma->membase + ((edma->drvdata->version == v1) ? EDMA_CEEI : EDMA64_CEEI); - edma->regs.cint = edma->membase + ((edma->version == v1) ? + edma->regs.cint = edma->membase + ((edma->drvdata->version == v1) ? EDMA_CINT : EDMA64_CINT); - edma->regs.cerr = edma->membase + ((edma->version == v1) ? + edma->regs.cerr = edma->membase + ((edma->drvdata->version == v1) ? EDMA_CERR : EDMA64_CERR); - edma->regs.ssrt = edma->membase + ((edma->version == v1) ? + edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v1) ? EDMA_SSRT : EDMA64_SSRT); - edma->regs.cdne = edma->membase + ((edma->version == v1) ? + edma->regs.cdne = edma->membase + ((edma->drvdata->version == v1) ? EDMA_CDNE : EDMA64_CDNE); - edma->regs.intl = edma->membase + ((edma->version == v1) ? + edma->regs.intl = edma->membase + ((edma->drvdata->version == v1) ? EDMA_INTR : EDMA64_INTL); - edma->regs.errl = edma->membase + ((edma->version == v1) ? + edma->regs.errl = edma->membase + ((edma->drvdata->version == v1) ? EDMA_ERR : EDMA64_ERRL); - if (edma->version == v2) { + if (edma->drvdata->version == v2) { edma->regs.erqh = edma->membase + EDMA64_ERQH; edma->regs.eeih = edma->membase + EDMA64_EEIH; edma->regs.errh = edma->membase + EDMA64_ERRH; diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index c53f76eeb4d3..4e175560292c 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -7,6 +7,7 @@ #define _FSL_EDMA_COMMON_H_ #include +#include #include "virt-dma.h" #define EDMA_CR_EDBG BIT(1) @@ -140,17 +141,24 @@ enum edma_version { v2, /* 64ch Coldfire */ }; +struct fsl_edma_drvdata { + enum edma_version version; + u32 dmamuxs; + int (*setup_irq)(struct platform_device *pdev, + struct fsl_edma_engine *fsl_edma); +}; + struct fsl_edma_engine { struct dma_device dma_dev; void __iomem *membase; void __iomem *muxbase[DMAMUX_NR]; struct clk *muxclk[DMAMUX_NR]; struct mutex fsl_edma_mutex; + const struct fsl_edma_drvdata *drvdata; u32 n_chans; int txirq; int errirq; bool big_endian; - enum edma_version version; struct edma_regs regs; struct fsl_edma_chan chans[]; }; diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index d641ef85a634..e616425acd5f 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -96,7 +96,8 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data; struct dma_chan *chan, *_chan; struct fsl_edma_chan *fsl_chan; - unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR; + u32 dmamux_nr = fsl_edma->drvdata->dmamuxs; + unsigned long chans_per_mux = fsl_edma->n_chans / dmamux_nr; if (dma_spec->args_count != 2) return NULL; @@ -184,16 +185,38 @@ static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks) clk_disable_unprepare(fsl_edma->muxclk[i]); } +static struct fsl_edma_drvdata vf610_data = { + .version = v1, + .dmamuxs = DMAMUX_NR, + .setup_irq = fsl_edma_irq_init, +}; + +static const struct of_device_id fsl_edma_dt_ids[] = { + { .compatible = "fsl,vf610-edma", .data = &vf610_data}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids); + static int fsl_edma_probe(struct platform_device *pdev) { + const struct of_device_id *of_id = + of_match_device(fsl_edma_dt_ids, &pdev->dev); struct device_node *np = pdev->dev.of_node; struct fsl_edma_engine *fsl_edma; + const struct fsl_edma_drvdata *drvdata = NULL; struct fsl_edma_chan *fsl_chan; struct edma_regs *regs; struct resource *res; int len, chans; int ret, i; + if (of_id) + drvdata = of_id->data; + if (!drvdata) { + dev_err(&pdev->dev, "unable to find driver data\n"); + return -EINVAL; + } + ret = of_property_read_u32(np, "dma-channels", &chans); if (ret) { dev_err(&pdev->dev, "Can't get dma-channels.\n"); @@ -205,7 +228,7 @@ static int fsl_edma_probe(struct platform_device *pdev) if (!fsl_edma) return -ENOMEM; - fsl_edma->version = v1; + fsl_edma->drvdata = drvdata; fsl_edma->n_chans = chans; mutex_init(&fsl_edma->fsl_edma_mutex); @@ -217,7 +240,7 @@ static int fsl_edma_probe(struct platform_device *pdev) fsl_edma_setup_regs(fsl_edma); regs = &fsl_edma->regs; - for (i = 0; i < DMAMUX_NR; i++) { + for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) { char clkname[32]; res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i); @@ -263,7 +286,7 @@ static int fsl_edma_probe(struct platform_device *pdev) } edma_writel(fsl_edma, ~0, regs->intl); - ret = fsl_edma_irq_init(pdev, fsl_edma); + ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma); if (ret) return ret; @@ -295,7 +318,7 @@ static int fsl_edma_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Can't register Freescale eDMA engine. (%d)\n", ret); - fsl_disable_clocks(fsl_edma, DMAMUX_NR); + fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); return ret; } @@ -304,7 +327,7 @@ static int fsl_edma_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma. (%d)\n", ret); dma_async_device_unregister(&fsl_edma->dma_dev); - fsl_disable_clocks(fsl_edma, DMAMUX_NR); + fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); return ret; } @@ -323,7 +346,7 @@ static int fsl_edma_remove(struct platform_device *pdev) fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); of_dma_controller_free(np); dma_async_device_unregister(&fsl_edma->dma_dev); - fsl_disable_clocks(fsl_edma, DMAMUX_NR); + fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); return 0; } @@ -382,12 +405,6 @@ static const struct dev_pm_ops fsl_edma_pm_ops = { .resume_early = fsl_edma_resume_early, }; -static const struct of_device_id fsl_edma_dt_ids[] = { - { .compatible = "fsl,vf610-edma", }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids); - static struct platform_driver fsl_edma_driver = { .driver = { .name = "fsl-edma", diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c index 7de54b2fafdb..e15bd15a9ef6 100644 --- a/drivers/dma/mcf-edma.c +++ b/drivers/dma/mcf-edma.c @@ -164,6 +164,11 @@ static void mcf_edma_irq_free(struct platform_device *pdev, free_irq(irq, mcf_edma); } +static struct fsl_edma_drvdata mcf_data = { + .version = v2, + .setup_irq = mcf_edma_irq_init, +}; + static int mcf_edma_probe(struct platform_device *pdev) { struct mcf_edma_platform_data *pdata; @@ -187,8 +192,8 @@ static int mcf_edma_probe(struct platform_device *pdev) mcf_edma->n_chans = chans; - /* Set up version for ColdFire edma */ - mcf_edma->version = v2; + /* Set up drvdata for ColdFire edma */ + mcf_edma->drvdata = &mcf_data; mcf_edma->big_endian = 1; if (!mcf_edma->n_chans) { @@ -223,7 +228,7 @@ static int mcf_edma_probe(struct platform_device *pdev) iowrite32(~0, regs->inth); iowrite32(~0, regs->intl); - ret = mcf_edma_irq_init(pdev, mcf_edma); + ret = mcf_edma->drvdata->setup_irq(pdev, mcf_edma); if (ret) return ret; From 78690bf3c4a4dea4c17f86c7ad385b5e9f5cf58b Mon Sep 17 00:00:00 2001 From: Robin Gong Date: Tue, 25 Jun 2019 17:43:20 +0800 Subject: [PATCH 61/68] dmaengine: fsl-edma-common: move dmamux register to another single function Prepare for edmav2 on i.mx7ulp whose dmamux register is 32bit. No function impacted. Signed-off-by: Robin Gong Tested-by: Angelo Dureghello Signed-off-by: Vinod Koul --- drivers/dma/fsl-edma-common.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index 07d96894c7c4..ba74e10aa0c2 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -77,6 +77,19 @@ void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan) } EXPORT_SYMBOL_GPL(fsl_edma_disable_request); +static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr, + u32 off, u32 slot, bool enable) +{ + u8 val8; + + if (enable) + val8 = EDMAMUX_CHCFG_ENBL | slot; + else + val8 = EDMAMUX_CHCFG_DIS; + + iowrite8(val8, addr + off); +} + void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, unsigned int slot, bool enable) { @@ -90,10 +103,7 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; slot = EDMAMUX_CHCFG_SOURCE(slot); - if (enable) - iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off); - else - iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off); + mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable); } EXPORT_SYMBOL_GPL(fsl_edma_chan_mux); From b12650cc5145fa9ccb44a0baea58aa848be92c61 Mon Sep 17 00:00:00 2001 From: Robin Gong Date: Tue, 25 Jun 2019 17:43:21 +0800 Subject: [PATCH 62/68] dmaengine: fsl-edma-common: version check for v2 instead The next v3 i.mx7ulp edma is based on v1, so change version check logic for v2 instead. Signed-off-by: Robin Gong Tested-by: Angelo Dureghello Signed-off-by: Vinod Koul --- drivers/dma/fsl-edma-common.c | 40 +++++++++++++++++------------------ 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index ba74e10aa0c2..44d92c34dec3 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -658,26 +658,26 @@ void fsl_edma_setup_regs(struct fsl_edma_engine *edma) edma->regs.erql = edma->membase + EDMA_ERQ; edma->regs.eeil = edma->membase + EDMA_EEI; - edma->regs.serq = edma->membase + ((edma->drvdata->version == v1) ? - EDMA_SERQ : EDMA64_SERQ); - edma->regs.cerq = edma->membase + ((edma->drvdata->version == v1) ? - EDMA_CERQ : EDMA64_CERQ); - edma->regs.seei = edma->membase + ((edma->drvdata->version == v1) ? - EDMA_SEEI : EDMA64_SEEI); - edma->regs.ceei = edma->membase + ((edma->drvdata->version == v1) ? - EDMA_CEEI : EDMA64_CEEI); - edma->regs.cint = edma->membase + ((edma->drvdata->version == v1) ? - EDMA_CINT : EDMA64_CINT); - edma->regs.cerr = edma->membase + ((edma->drvdata->version == v1) ? - EDMA_CERR : EDMA64_CERR); - edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v1) ? - EDMA_SSRT : EDMA64_SSRT); - edma->regs.cdne = edma->membase + ((edma->drvdata->version == v1) ? - EDMA_CDNE : EDMA64_CDNE); - edma->regs.intl = edma->membase + ((edma->drvdata->version == v1) ? - EDMA_INTR : EDMA64_INTL); - edma->regs.errl = edma->membase + ((edma->drvdata->version == v1) ? - EDMA_ERR : EDMA64_ERRL); + edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_SERQ : EDMA_SERQ); + edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_CERQ : EDMA_CERQ); + edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_SEEI : EDMA_SEEI); + edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_CEEI : EDMA_CEEI); + edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_CINT : EDMA_CINT); + edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_CERR : EDMA_CERR); + edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_SSRT : EDMA_SSRT); + edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_CDNE : EDMA_CDNE); + edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_INTL : EDMA_INTR); + edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_ERRL : EDMA_ERR); if (edma->drvdata->version == v2) { edma->regs.erqh = edma->membase + EDMA64_ERQH; From a7c5c6f6bc295d6c158db4ef9d1ca6770032669d Mon Sep 17 00:00:00 2001 From: Robin Gong Date: Tue, 25 Jun 2019 17:43:22 +0800 Subject: [PATCH 63/68] dt-bindings: dma: fsl-edma: add new i.mx7ulp-edma More channel interrupts, one more clock, and only one dmamux on i.mx7ulp-edma. Signed-off-by: Robin Gong Tested-by: Angelo Dureghello Signed-off-by: Vinod Koul --- .../devicetree/bindings/dma/fsl-edma.txt | 44 ++++++++++++++++--- 1 file changed, 39 insertions(+), 5 deletions(-) diff --git a/Documentation/devicetree/bindings/dma/fsl-edma.txt b/Documentation/devicetree/bindings/dma/fsl-edma.txt index 97e213e07660..29dd3ccb1235 100644 --- a/Documentation/devicetree/bindings/dma/fsl-edma.txt +++ b/Documentation/devicetree/bindings/dma/fsl-edma.txt @@ -9,15 +9,16 @@ group, DMAMUX0 or DMAMUX1, but not both. Required properties: - compatible : - "fsl,vf610-edma" for eDMA used similar to that on Vybrid vf610 SoC + - "fsl,imx7ulp-edma" for eDMA2 used similar to that on i.mx7ulp - reg : Specifies base physical address(s) and size of the eDMA registers. The 1st region is eDMA control register's address and size. The 2nd and the 3rd regions are programmable channel multiplexing control register's address and size. - interrupts : A list of interrupt-specifiers, one for each entry in - interrupt-names. -- interrupt-names : Should contain: - "edma-tx" - the transmission interrupt - "edma-err" - the error interrupt + interrupt-names on vf610 similar SoC. But for i.mx7ulp per channel + per transmission interrupt, total 16 channel interrupt and 1 + error interrupt(located in the last), no interrupt-names list on + i.mx7ulp for clean on dts. - #dma-cells : Must be <2>. The 1st cell specifies the DMAMUX(0 for DMAMUX0 and 1 for DMAMUX1). Specific request source can only be multiplexed by specific channels @@ -28,6 +29,7 @@ Required properties: - clock-names : A list of channel group clock names. Should contain: "dmamux0" - clock name of mux0 group "dmamux1" - clock name of mux1 group + Note: No dmamux0 on i.mx7ulp, but another 'dma' clk added on i.mx7ulp. - clocks : A list of phandle and clock-specifier pairs, one for each entry in clock-names. @@ -35,6 +37,10 @@ Optional properties: - big-endian: If present registers and hardware scatter/gather descriptors of the eDMA are implemented in big endian mode, otherwise in little mode. +- interrupt-names : Should contain the below on vf610 similar SoC but not used + on i.mx7ulp similar SoC: + "edma-tx" - the transmission interrupt + "edma-err" - the error interrupt Examples: @@ -52,8 +58,36 @@ edma0: dma-controller@40018000 { clock-names = "dmamux0", "dmamux1"; clocks = <&clks VF610_CLK_DMAMUX0>, <&clks VF610_CLK_DMAMUX1>; -}; +}; /* vf610 */ +edma1: dma-controller@40080000 { + #dma-cells = <2>; + compatible = "fsl,imx7ulp-edma"; + reg = <0x40080000 0x2000>, + <0x40210000 0x1000>; + dma-channels = <32>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + /* last is eDMA2-ERR interrupt */ + ; + clock-names = "dma", "dmamux0"; + clocks = <&pcc2 IMX7ULP_CLK_DMA1>, + <&pcc2 IMX7ULP_CLK_DMA_MUX1>; +}; /* i.mx7ulp */ * DMA clients DMA client drivers that uses the DMA function must use the format described From 7144afd025b23b042c158582160d7d2b10a754b7 Mon Sep 17 00:00:00 2001 From: Robin Gong Date: Tue, 25 Jun 2019 17:43:23 +0800 Subject: [PATCH 64/68] dmaengine: fsl-edma: add i.mx7ulp edma2 version support Add edma2 for i.mx7ulp by version v3, since v2 has already been used by mcf-edma. The big changes based on v1 are belows: 1. only one dmamux. 2. another clock dma_clk except dmamux clk. 3. 16 independent interrupts instead of only one interrupt for all channels. Signed-off-by: Robin Gong Tested-by: Angelo Dureghello Signed-off-by: Vinod Koul --- drivers/dma/fsl-edma-common.c | 18 +++++++++- drivers/dma/fsl-edma-common.h | 4 +++ drivers/dma/fsl-edma.c | 66 +++++++++++++++++++++++++++++++++++ 3 files changed, 87 insertions(+), 1 deletion(-) diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index 44d92c34dec3..6d6d8a4e8e38 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -90,6 +90,19 @@ static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr, iowrite8(val8, addr + off); } +void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr, + u32 off, u32 slot, bool enable) +{ + u32 val; + + if (enable) + val = EDMAMUX_CHCFG_ENBL << 24 | slot; + else + val = EDMAMUX_CHCFG_DIS; + + iowrite32(val, addr + off * 4); +} + void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, unsigned int slot, bool enable) { @@ -103,7 +116,10 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; slot = EDMAMUX_CHCFG_SOURCE(slot); - mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable); + if (fsl_chan->edma->drvdata->version == v3) + mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable); + else + mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable); } EXPORT_SYMBOL_GPL(fsl_edma_chan_mux); diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index 4e175560292c..5eaa2902ed39 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -125,6 +125,7 @@ struct fsl_edma_chan { dma_addr_t dma_dev_addr; u32 dma_dev_size; enum dma_data_direction dma_dir; + char chan_name[16]; }; struct fsl_edma_desc { @@ -139,11 +140,13 @@ struct fsl_edma_desc { enum edma_version { v1, /* 32ch, Vybrid, mpc57x, etc */ v2, /* 64ch Coldfire */ + v3, /* 32ch, i.mx7ulp */ }; struct fsl_edma_drvdata { enum edma_version version; u32 dmamuxs; + bool has_dmaclk; int (*setup_irq)(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma); }; @@ -153,6 +156,7 @@ struct fsl_edma_engine { void __iomem *membase; void __iomem *muxbase[DMAMUX_NR]; struct clk *muxclk[DMAMUX_NR]; + struct clk *dmaclk; struct mutex fsl_edma_mutex; const struct fsl_edma_drvdata *drvdata; u32 n_chans; diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index e616425acd5f..50fe196b0c73 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -166,6 +166,50 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma return 0; } +static int +fsl_edma2_irq_init(struct platform_device *pdev, + struct fsl_edma_engine *fsl_edma) +{ + struct device_node *np = pdev->dev.of_node; + int i, ret, irq; + int count; + + count = of_irq_count(np); + dev_dbg(&pdev->dev, "%s Found %d interrupts\r\n", __func__, count); + if (count <= 2) { + dev_err(&pdev->dev, "Interrupts in DTS not correct.\n"); + return -EINVAL; + } + /* + * 16 channel independent interrupts + 1 error interrupt on i.mx7ulp. + * 2 channel share one interrupt, for example, ch0/ch16, ch1/ch17... + * For now, just simply request irq without IRQF_SHARED flag, since 16 + * channels are enough on i.mx7ulp whose M4 domain own some peripherals. + */ + for (i = 0; i < count; i++) { + irq = platform_get_irq(pdev, i); + if (irq < 0) + return -ENXIO; + + sprintf(fsl_edma->chans[i].chan_name, "eDMA2-CH%02d", i); + + /* The last IRQ is for eDMA err */ + if (i == count - 1) + ret = devm_request_irq(&pdev->dev, irq, + fsl_edma_err_handler, + 0, "eDMA2-ERR", fsl_edma); + else + ret = devm_request_irq(&pdev->dev, irq, + fsl_edma_tx_handler, 0, + fsl_edma->chans[i].chan_name, + fsl_edma); + if (ret) + return ret; + } + + return 0; +} + static void fsl_edma_irq_exit( struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) { @@ -191,8 +235,16 @@ static struct fsl_edma_drvdata vf610_data = { .setup_irq = fsl_edma_irq_init, }; +static struct fsl_edma_drvdata imx7ulp_data = { + .version = v3, + .dmamuxs = 1, + .has_dmaclk = true, + .setup_irq = fsl_edma2_irq_init, +}; + static const struct of_device_id fsl_edma_dt_ids[] = { { .compatible = "fsl,vf610-edma", .data = &vf610_data}, + { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids); @@ -240,6 +292,20 @@ static int fsl_edma_probe(struct platform_device *pdev) fsl_edma_setup_regs(fsl_edma); regs = &fsl_edma->regs; + if (drvdata->has_dmaclk) { + fsl_edma->dmaclk = devm_clk_get(&pdev->dev, "dma"); + if (IS_ERR(fsl_edma->dmaclk)) { + dev_err(&pdev->dev, "Missing DMA block clock.\n"); + return PTR_ERR(fsl_edma->dmaclk); + } + + ret = clk_prepare_enable(fsl_edma->dmaclk); + if (ret) { + dev_err(&pdev->dev, "DMA clk block failed.\n"); + return ret; + } + } + for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) { char clkname[32]; From 3145d73e69ba0ab58cb302d3e6a80232bf373b7e Mon Sep 17 00:00:00 2001 From: Sameer Pujar Date: Thu, 20 Jun 2019 21:24:19 +0530 Subject: [PATCH 65/68] dmaengine: tegra210-adma: remove PM_CLK dependency Tegra ADMA does not use pm-clk interface now and hence the dependency is removed from Kconfig. Signed-off-by: Sameer Pujar Acked-by: Jon Hunter Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index b1d109d62c15..71fe800ef662 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -584,7 +584,7 @@ config TEGRA20_APB_DMA config TEGRA210_ADMA tristate "NVIDIA Tegra210 ADMA support" - depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK + depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help From 9dcb98a29b6e81394fa33ca984f3aaad4d0d1393 Mon Sep 17 00:00:00 2001 From: "Hook, Gary" Date: Mon, 24 Jun 2019 18:35:01 +0000 Subject: [PATCH 66/68] Documentation: dmaengine: clean up description of dmatest usage Fix the formatting of the multi-channel test usage example. Call out the note about parameter ordering and add detail on the settings of parameters for the new version of dmatest. Fixes: f80f9988a26d7 ("dmaengine: Documentation: Add documentation for multi chan testing") Signed-off-by: Gary R Hook Signed-off-by: Vinod Koul --- .../driver-api/dmaengine/dmatest.rst | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/Documentation/driver-api/dmaengine/dmatest.rst b/Documentation/driver-api/dmaengine/dmatest.rst index e78d070bb468..ee268d445d38 100644 --- a/Documentation/driver-api/dmaengine/dmatest.rst +++ b/Documentation/driver-api/dmaengine/dmatest.rst @@ -44,7 +44,8 @@ Example of usage:: dmatest.timeout=2000 dmatest.iterations=1 dmatest.channel=dma0chan0 dmatest.run=1 -Example of multi-channel test usage: +Example of multi-channel test usage (new in the 5.0 kernel):: + % modprobe dmatest % echo 2000 > /sys/module/dmatest/parameters/timeout % echo 1 > /sys/module/dmatest/parameters/iterations @@ -53,15 +54,18 @@ Example of multi-channel test usage: % echo dma0chan2 > /sys/module/dmatest/parameters/channel % echo 1 > /sys/module/dmatest/parameters/run -Note: the channel parameter should always be the last parameter set prior to -running the test (setting run=1), this is because upon setting the channel -parameter, that specific channel is requested using the dmaengine and a thread -is created with the existing parameters. This thread is set as pending -and will be executed once run is set to 1. Any parameters set after the thread -is created are not applied. +.. note:: + For all tests, starting in the 5.0 kernel, either single- or multi-channel, + the channel parameter(s) must be set after all other parameters. It is at + that time that the existing parameter values are acquired for use by the + thread(s). All other parameters are shared. Therefore, if changes are made + to any of the other parameters, and an additional channel specified, the + (shared) parameters used for all threads will use the new values. + After the channels are specified, each thread is set as pending. All threads + begin execution when the run parameter is set to 1. .. hint:: - available channel list could be extracted by running the following command:: + A list of available channels can be found by running the following command:: % ls -1 /sys/class/dma/ @@ -204,6 +208,7 @@ Releasing Channels Channels can be freed by setting run to 0. Example:: + % echo dma0chan1 > /sys/module/dmatest/parameters/channel dmatest: Added 1 threads using dma0chan1 % cat /sys/class/dma/dma0chan1/in_use From b7f5b656ffe19e3c65f04e3adeae21ac17227126 Mon Sep 17 00:00:00 2001 From: Raag Jadav Date: Sat, 29 Jun 2019 13:50:48 +0530 Subject: [PATCH 67/68] dmaengine: at_xdmac: check for non-empty xfers_list before invoking callback tx descriptor retrieved from an empty xfers_list may not have valid pointers to the callback functions. Avoid calling dmaengine_desc_get_callback_invoke if xfers_list is empty. Signed-off-by: Raag Jadav Acked-by: Ludovic Desroches Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index e4ae2ee46d3f..84b3c6ac31cc 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1579,11 +1579,14 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) struct at_xdmac_desc *desc; struct dma_async_tx_descriptor *txd; - desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); - txd = &desc->tx_dma_desc; + if (!list_empty(&atchan->xfers_list)) { + desc = list_first_entry(&atchan->xfers_list, + struct at_xdmac_desc, xfer_node); + txd = &desc->tx_dma_desc; - if (txd->flags & DMA_PREP_INTERRUPT) - dmaengine_desc_get_callback_invoke(txd, NULL); + if (txd->flags & DMA_PREP_INTERRUPT) + dmaengine_desc_get_callback_invoke(txd, NULL); + } } static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) From 5c274ca4cfb22a455e880f61536b1894fa29fd17 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 8 Jul 2019 09:42:55 +0530 Subject: [PATCH 68/68] dmaengine: Revert "dmaengine: fsl-edma: add i.mx7ulp edma2 version support" This reverts commit 7144afd025b2 ("dmaengine: fsl-edma: add i.mx7ulp edma2 version support") as this fails to build with module option due to usage of of_irq_count() which is not an exported symbol as kernel drivers are *not* expected to use it (rightly so). Signed-off-by: Vinod Koul --- drivers/dma/fsl-edma-common.c | 18 +--------- drivers/dma/fsl-edma-common.h | 4 --- drivers/dma/fsl-edma.c | 66 ----------------------------------- 3 files changed, 1 insertion(+), 87 deletions(-) diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index 6d6d8a4e8e38..44d92c34dec3 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -90,19 +90,6 @@ static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr, iowrite8(val8, addr + off); } -void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr, - u32 off, u32 slot, bool enable) -{ - u32 val; - - if (enable) - val = EDMAMUX_CHCFG_ENBL << 24 | slot; - else - val = EDMAMUX_CHCFG_DIS; - - iowrite32(val, addr + off * 4); -} - void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, unsigned int slot, bool enable) { @@ -116,10 +103,7 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; slot = EDMAMUX_CHCFG_SOURCE(slot); - if (fsl_chan->edma->drvdata->version == v3) - mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable); - else - mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable); + mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable); } EXPORT_SYMBOL_GPL(fsl_edma_chan_mux); diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index 5eaa2902ed39..4e175560292c 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -125,7 +125,6 @@ struct fsl_edma_chan { dma_addr_t dma_dev_addr; u32 dma_dev_size; enum dma_data_direction dma_dir; - char chan_name[16]; }; struct fsl_edma_desc { @@ -140,13 +139,11 @@ struct fsl_edma_desc { enum edma_version { v1, /* 32ch, Vybrid, mpc57x, etc */ v2, /* 64ch Coldfire */ - v3, /* 32ch, i.mx7ulp */ }; struct fsl_edma_drvdata { enum edma_version version; u32 dmamuxs; - bool has_dmaclk; int (*setup_irq)(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma); }; @@ -156,7 +153,6 @@ struct fsl_edma_engine { void __iomem *membase; void __iomem *muxbase[DMAMUX_NR]; struct clk *muxclk[DMAMUX_NR]; - struct clk *dmaclk; struct mutex fsl_edma_mutex; const struct fsl_edma_drvdata *drvdata; u32 n_chans; diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index 50fe196b0c73..e616425acd5f 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -166,50 +166,6 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma return 0; } -static int -fsl_edma2_irq_init(struct platform_device *pdev, - struct fsl_edma_engine *fsl_edma) -{ - struct device_node *np = pdev->dev.of_node; - int i, ret, irq; - int count; - - count = of_irq_count(np); - dev_dbg(&pdev->dev, "%s Found %d interrupts\r\n", __func__, count); - if (count <= 2) { - dev_err(&pdev->dev, "Interrupts in DTS not correct.\n"); - return -EINVAL; - } - /* - * 16 channel independent interrupts + 1 error interrupt on i.mx7ulp. - * 2 channel share one interrupt, for example, ch0/ch16, ch1/ch17... - * For now, just simply request irq without IRQF_SHARED flag, since 16 - * channels are enough on i.mx7ulp whose M4 domain own some peripherals. - */ - for (i = 0; i < count; i++) { - irq = platform_get_irq(pdev, i); - if (irq < 0) - return -ENXIO; - - sprintf(fsl_edma->chans[i].chan_name, "eDMA2-CH%02d", i); - - /* The last IRQ is for eDMA err */ - if (i == count - 1) - ret = devm_request_irq(&pdev->dev, irq, - fsl_edma_err_handler, - 0, "eDMA2-ERR", fsl_edma); - else - ret = devm_request_irq(&pdev->dev, irq, - fsl_edma_tx_handler, 0, - fsl_edma->chans[i].chan_name, - fsl_edma); - if (ret) - return ret; - } - - return 0; -} - static void fsl_edma_irq_exit( struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) { @@ -235,16 +191,8 @@ static struct fsl_edma_drvdata vf610_data = { .setup_irq = fsl_edma_irq_init, }; -static struct fsl_edma_drvdata imx7ulp_data = { - .version = v3, - .dmamuxs = 1, - .has_dmaclk = true, - .setup_irq = fsl_edma2_irq_init, -}; - static const struct of_device_id fsl_edma_dt_ids[] = { { .compatible = "fsl,vf610-edma", .data = &vf610_data}, - { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids); @@ -292,20 +240,6 @@ static int fsl_edma_probe(struct platform_device *pdev) fsl_edma_setup_regs(fsl_edma); regs = &fsl_edma->regs; - if (drvdata->has_dmaclk) { - fsl_edma->dmaclk = devm_clk_get(&pdev->dev, "dma"); - if (IS_ERR(fsl_edma->dmaclk)) { - dev_err(&pdev->dev, "Missing DMA block clock.\n"); - return PTR_ERR(fsl_edma->dmaclk); - } - - ret = clk_prepare_enable(fsl_edma->dmaclk); - if (ret) { - dev_err(&pdev->dev, "DMA clk block failed.\n"); - return ret; - } - } - for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) { char clkname[32];