1
0
Fork 0

Merge branch 'dma/next' into next

* dma/next: (52 commits)
  LF-301: dmaengine: imx-sdma: Add once more loading firmware
  LF-246: dmaengine: imx-sdma: correct is_ram_script checking
  dma: caam: fix compilation error
  dma: caam: add dma memcpy driver
  dmaengine: fsl-dpaa2-qdma: Add NXP dpaa2 qDMA controller driver for Layerscape SoCs
  ...
5.4-rM2-2.2.x-imx-squashed
Dong Aisheng 2019-12-02 18:02:24 +08:00
commit dbdab14cb0
21 changed files with 3724 additions and 61 deletions

View File

@ -0,0 +1,82 @@
* Freescale enhanced Direct Memory Access(eDMA-v3) Controller
The eDMA-v3 controller is inherited from FSL eDMA, and firstly is intergrated
on Freescale i.MX8QM SOC chip. The eDMA channels have multiplex capability by
programmble memory-mapped registers. Specific DMA request source has fixed channel.
* eDMA Controller
Required properties:
- compatible :
- "fsl,imx8qm-edma" for eDMA used similar to that on i.MX8QM SoC
- "fsl,imx8qm-adma" for audio eDMA used on i.MX8QM
- reg : Specifies base physical address(s) and size of the eDMA channel registers.
Each eDMA channel has separated register's address and size.
- interrupts : A list of interrupt-specifiers, each channel has one interrupt.
- interrupt-names : Should contain below template:
"edmaX-chanX-Xx"
| | |---> receive/transmit, r or t
| |---> channel id, the max number is 32
|---> edma controller instance, 0, 1, 2,..etc
- #dma-cells : Must be <3>.
The 1st cell specifies the channel ID.
The 2nd cell specifies the channel priority.
The 3rd cell specifies the channel attributes which include below:
BIT(0): transmit or receive:
0: transmit, 1: receive.
BIT(1): local or remote access:
0: local, 1: remote.
BIT(2): dualfifo case or not(only in Audio cyclic now):
0: not dual fifo case, 1: dualfifo case.
See the SoC's reference manual for all the supported request sources.
- dma-channels : Number of channels supported by the controller
- power-domains: Power domains for edma channel used.
- power-domain-names: Power domains name for edma channel used.
Examples:
edma0: dma-controller@40018000 {
compatible = "fsl,imx8qm-edma";
reg = <0x0 0x5a2c0000 0x0 0x10000>, /* channel12 UART0 rx */
<0x0 0x5a2d0000 0x0 0x10000>, /* channel13 UART0 tx */
<0x0 0x5a2e0000 0x0 0x10000>, /* channel14 UART1 rx */
<0x0 0x5a2f0000 0x0 0x10000>; /* channel15 UART1 tx */
#dma-cells = <3>;
dma-channels = <4>;
interrupts = <GIC_SPI 434 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 435 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 436 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 437 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "edma0-chan12-rx", "edma0-chan13-tx",
"edma0-chan14-rx", "edma0-chan15-tx";
power-domains = <&pd IMX_SC_R_DMA_0_CH12>,
<&pd IMX_SC_R_DMA_0_CH13>,
<&pd IMX_SC_R_DMA_0_CH14>,
<&pd IMX_SC_R_DMA_0_CH15>;
power-domain-names = "edma0-chan12", "edma0-chan13",
"edma0-chan14", "edma0-chan15";
status = "okay";
};
* DMA clients
DMA client drivers that uses the DMA function must use the format described
in the dma.txt file, using a three-cell specifier for each channel: the 1st
specifies the channel number, the 2nd specifies the priority, and the 3rd
specifies the channel type is for transmit or receive: 0: transmit, 1: receive.
Examples:
lpuart1: serial@5a070000 {
compatible = "fsl,imx8qm-lpuart";
reg = <0x0 0x5a070000 0x0 0x1000>;
interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
clocks = <&clk IMX8QM_UART1_CLK>;
clock-names = "ipg";
assigned-clock-names = <&clk IMX8QM_UART1_CLK>;
assigned-clock-rates = <80000000>;
power-domains = <&pd IMX_SC_R_UART_1>,
power-domain-names = "uart";
dma-names = "tx","rx";
dmas = <&edma0 15 0 0>,
<&edma0 14 0 1>;
status = "disabled";
};

View File

@ -16,6 +16,21 @@ Optional properties:
- #dma-channels : Number of DMA channels supported. Should be 16. - #dma-channels : Number of DMA channels supported. Should be 16.
- #dma-requests : Number of DMA requests supported. - #dma-requests : Number of DMA requests supported.
* DMA capability limitation
Specify the DMA capability limitations.
For example, some SoCs only support up to 32bit DMA capability, although
they are 64bit SoCs.
- only-dma-mask32: 1 means that the SoCs only suppot up to 32bit DMA
capability.
Example:
dma_cap: dma_cap {
compatible = "dma-capability";
only-dma-mask32 = <1>;
};
Example: Example:
dma: dma@10001000 { dma: dma@10001000 {

View File

@ -9,6 +9,8 @@ Required properties:
"fsl,imx53-sdma" "fsl,imx53-sdma"
"fsl,imx6q-sdma" "fsl,imx6q-sdma"
"fsl,imx7d-sdma" "fsl,imx7d-sdma"
"fsl,imx6sx-sdma"
"fsl,imx6ul-sdma"
"fsl,imx8mq-sdma" "fsl,imx8mq-sdma"
The -to variants should be preferred since they allow to determine the The -to variants should be preferred since they allow to determine the
correct ROM script addresses needed for the driver to work without additional correct ROM script addresses needed for the driver to work without additional
@ -51,8 +53,14 @@ The full ID of peripheral types can be found below.
22 SSI Dual FIFO (needs firmware ver >= 2) 22 SSI Dual FIFO (needs firmware ver >= 2)
23 Shared ASRC 23 Shared ASRC
24 SAI 24 SAI
25 HDMI Audio
The third cell specifies the transfer priority as below. The third cell specifies the transfer priority and software done
as below.
Bit31: sw_done
Bit15~Bit8: selector
Bit7~Bit0: priority level
ID transfer priority ID transfer priority
------------------------- -------------------------
@ -60,6 +68,9 @@ The third cell specifies the transfer priority as below.
1 Medium 1 Medium
2 Low 2 Low
For example: 0x80000000 means sw_done enabled for done0 sector and
High priority for PDM on i.mx8mm.
Optional properties: Optional properties:
- gpr : The phandle to the General Purpose Register (GPR) node. - gpr : The phandle to the General Purpose Register (GPR) node.

View File

@ -131,6 +131,24 @@ config COH901318
help help
Enable support for ST-Ericsson COH 901 318 DMA. Enable support for ST-Ericsson COH 901 318 DMA.
config CRYPTO_DEV_FSL_CAAM_DMA
tristate "CAAM DMA engine support"
depends on CRYPTO_DEV_FSL_CAAM_JR
default n
select DMA_ENGINE
select ASYNC_CORE
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
help
Selecting this will offload the DMA operations for users of
the scatter gather memcopy API to the CAAM via job rings. The
CAAM is a hardware module that provides hardware acceleration to
cryptographic operations. It has a built-in DMA controller that can
be programmed to read/write cryptographic data. This module defines
a DMA driver that uses the DMA capabilities of the CAAM.
To compile this as a module, choose M here: the module
will be called caam_dma.
config DMA_BCM2835 config DMA_BCM2835
tristate "BCM2835 DMA engine support" tristate "BCM2835 DMA engine support"
depends on ARCH_BCM2835 depends on ARCH_BCM2835
@ -227,6 +245,17 @@ config FSL_QDMA
or dequeuing DMA jobs from, different work queues. or dequeuing DMA jobs from, different work queues.
This module can be found on NXP Layerscape SoCs. This module can be found on NXP Layerscape SoCs.
The qdma driver only work on SoCs with a DPAA hardware block. The qdma driver only work on SoCs with a DPAA hardware block.
config FSL_EDMA_V3
tristate "Freescale eDMA v3 engine support"
depends on OF
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
Support the Freescale eDMA v3 engine with programmable channel.
This driver is based on FSL_EDMA but big changes come such as
different interrupt for different channel, different register
scope for different channel.
This module can be found on Freescale i.MX8QM.
config FSL_RAID config FSL_RAID
tristate "Freescale RAID engine Support" tristate "Freescale RAID engine Support"
@ -651,7 +680,6 @@ config ZX_DMA
help help
Support the DMA engine for ZTE ZX family platform devices. Support the DMA engine for ZTE ZX family platform devices.
# driver files # driver files
source "drivers/dma/bestcomm/Kconfig" source "drivers/dma/bestcomm/Kconfig"
@ -669,6 +697,8 @@ source "drivers/dma/sh/Kconfig"
source "drivers/dma/ti/Kconfig" source "drivers/dma/ti/Kconfig"
source "drivers/dma/fsl-dpaa2-qdma/Kconfig"
# clients # clients
comment "DMA Clients" comment "DMA Clients"
depends on DMA_ENGINE depends on DMA_ENGINE

View File

@ -32,6 +32,7 @@ obj-$(CONFIG_DW_EDMA) += dw-edma/
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
obj-$(CONFIG_FSL_EDMA_V3) += fsl-edma-v3.o
obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
obj-$(CONFIG_FSL_RAID) += fsl_raid.o obj-$(CONFIG_FSL_RAID) += fsl_raid.o
@ -75,6 +76,8 @@ obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
obj-$(CONFIG_ZX_DMA) += zx_dma.o obj-$(CONFIG_ZX_DMA) += zx_dma.o
obj-$(CONFIG_ST_FDMA) += st_fdma.o obj-$(CONFIG_ST_FDMA) += st_fdma.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_DMA) += caam_dma.o
obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/
obj-y += mediatek/ obj-y += mediatek/
obj-y += qcom/ obj-y += qcom/

View File

@ -0,0 +1,462 @@
/*
* caam support for SG DMA
*
* Copyright 2016 Freescale Semiconductor, Inc
* Copyright 2017 NXP
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the names of the above-listed copyright holders nor the
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "dmaengine.h"
#include "../crypto/caam/regs.h"
#include "../crypto/caam/jr.h"
#include "../crypto/caam/error.h"
#include "../crypto/caam/desc_constr.h"
#define DESC_DMA_MEMCPY_LEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN) / \
CAAM_CMD_SZ)
/*
* This is max chunk size of a DMA transfer. If a buffer is larger than this
* value it is internally broken into chunks of max CAAM_DMA_CHUNK_SIZE bytes
* and for each chunk a DMA transfer request is issued.
* This value is the largest number on 16 bits that is a multiple of 256 bytes
* (the largest configurable CAAM DMA burst size).
*/
#define CAAM_DMA_CHUNK_SIZE 65280
struct caam_dma_sh_desc {
u32 desc[DESC_DMA_MEMCPY_LEN] ____cacheline_aligned;
dma_addr_t desc_dma;
};
/* caam dma extended descriptor */
struct caam_dma_edesc {
struct dma_async_tx_descriptor async_tx;
struct list_head node;
struct caam_dma_ctx *ctx;
dma_addr_t src_dma;
dma_addr_t dst_dma;
unsigned int src_len;
unsigned int dst_len;
u32 jd[] ____cacheline_aligned;
};
/*
* caam_dma_ctx - per jr/channel context
* @chan: dma channel used by async_tx API
* @node: list_head used to attach to the global dma_ctx_list
* @jrdev: Job Ring device
* @pending_q: queue of pending (submitted, but not enqueued) jobs
* @done_not_acked: jobs that have been completed by jr, but maybe not acked
* @edesc_lock: protects extended descriptor
*/
struct caam_dma_ctx {
struct dma_chan chan;
struct list_head node;
struct device *jrdev;
struct list_head pending_q;
struct list_head done_not_acked;
spinlock_t edesc_lock;
};
static struct dma_device *dma_dev;
static struct caam_dma_sh_desc *dma_sh_desc;
static LIST_HEAD(dma_ctx_list);
static dma_cookie_t caam_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct caam_dma_edesc *edesc = NULL;
struct caam_dma_ctx *ctx = NULL;
dma_cookie_t cookie;
edesc = container_of(tx, struct caam_dma_edesc, async_tx);
ctx = container_of(tx->chan, struct caam_dma_ctx, chan);
spin_lock_bh(&ctx->edesc_lock);
cookie = dma_cookie_assign(tx);
list_add_tail(&edesc->node, &ctx->pending_q);
spin_unlock_bh(&ctx->edesc_lock);
return cookie;
}
static void caam_jr_chan_free_edesc(struct caam_dma_edesc *edesc)
{
struct caam_dma_ctx *ctx = edesc->ctx;
struct caam_dma_edesc *_edesc = NULL;
spin_lock_bh(&ctx->edesc_lock);
list_add_tail(&edesc->node, &ctx->done_not_acked);
list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
if (async_tx_test_ack(&edesc->async_tx)) {
list_del(&edesc->node);
kfree(edesc);
}
}
spin_unlock_bh(&ctx->edesc_lock);
}
static void caam_dma_done(struct device *dev, u32 *hwdesc, u32 err,
void *context)
{
struct caam_dma_edesc *edesc = context;
struct caam_dma_ctx *ctx = edesc->ctx;
dma_async_tx_callback callback;
void *callback_param;
if (err)
caam_jr_strstatus(ctx->jrdev, err);
dma_run_dependencies(&edesc->async_tx);
spin_lock_bh(&ctx->edesc_lock);
dma_cookie_complete(&edesc->async_tx);
spin_unlock_bh(&ctx->edesc_lock);
callback = edesc->async_tx.callback;
callback_param = edesc->async_tx.callback_param;
dma_descriptor_unmap(&edesc->async_tx);
caam_jr_chan_free_edesc(edesc);
if (callback)
callback(callback_param);
}
static void caam_dma_memcpy_init_job_desc(struct caam_dma_edesc *edesc)
{
u32 *jd = edesc->jd;
u32 *sh_desc = dma_sh_desc->desc;
dma_addr_t desc_dma = dma_sh_desc->desc_dma;
/* init the job descriptor */
init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE);
/* set SEQIN PTR */
append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, 0);
/* set SEQOUT PTR */
append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, 0);
print_hex_dump_debug("caam dma desc@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1);
}
static struct dma_async_tx_descriptor *
caam_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
size_t len, unsigned long flags)
{
struct caam_dma_edesc *edesc;
struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
chan);
edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | GFP_NOWAIT);
if (!edesc)
return ERR_PTR(-ENOMEM);
dma_async_tx_descriptor_init(&edesc->async_tx, chan);
edesc->async_tx.tx_submit = caam_dma_tx_submit;
edesc->async_tx.flags = flags;
edesc->async_tx.cookie = -EBUSY;
edesc->src_dma = src;
edesc->src_len = len;
edesc->dst_dma = dst;
edesc->dst_len = len;
edesc->ctx = ctx;
caam_dma_memcpy_init_job_desc(edesc);
return &edesc->async_tx;
}
/* This function can be called in an interrupt context */
static void caam_dma_issue_pending(struct dma_chan *chan)
{
struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
chan);
struct caam_dma_edesc *edesc, *_edesc;
spin_lock_bh(&ctx->edesc_lock);
list_for_each_entry_safe(edesc, _edesc, &ctx->pending_q, node) {
if (caam_jr_enqueue(ctx->jrdev, edesc->jd,
caam_dma_done, edesc) < 0)
break;
list_del(&edesc->node);
}
spin_unlock_bh(&ctx->edesc_lock);
}
static void caam_dma_free_chan_resources(struct dma_chan *chan)
{
struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
chan);
struct caam_dma_edesc *edesc, *_edesc;
spin_lock_bh(&ctx->edesc_lock);
list_for_each_entry_safe(edesc, _edesc, &ctx->pending_q, node) {
list_del(&edesc->node);
kfree(edesc);
}
list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
list_del(&edesc->node);
kfree(edesc);
}
spin_unlock_bh(&ctx->edesc_lock);
}
static int caam_dma_jr_chan_bind(void)
{
struct device *jrdev;
struct caam_dma_ctx *ctx;
int bonds = 0;
int i;
for (i = 0; i < caam_jr_driver_probed(); i++) {
jrdev = caam_jridx_alloc(i);
if (IS_ERR(jrdev)) {
pr_err("job ring device %d allocation failed\n", i);
continue;
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
caam_jr_free(jrdev);
continue;
}
ctx->chan.device = dma_dev;
ctx->chan.private = ctx;
ctx->jrdev = jrdev;
INIT_LIST_HEAD(&ctx->pending_q);
INIT_LIST_HEAD(&ctx->done_not_acked);
INIT_LIST_HEAD(&ctx->node);
spin_lock_init(&ctx->edesc_lock);
dma_cookie_init(&ctx->chan);
/* add the context of this channel to the context list */
list_add_tail(&ctx->node, &dma_ctx_list);
/* add this channel to the device chan list */
list_add_tail(&ctx->chan.device_node, &dma_dev->channels);
bonds++;
}
return bonds;
}
static inline void caam_jr_dma_free(struct dma_chan *chan)
{
struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
chan);
list_del(&ctx->node);
list_del(&chan->device_node);
caam_jr_free(ctx->jrdev);
kfree(ctx);
}
static void set_caam_dma_desc(u32 *desc)
{
u32 *jmp_cmd;
/* dma shared descriptor */
init_sh_desc(desc, HDR_SHARE_NEVER | (1 << HDR_START_IDX_SHIFT));
/* REG1 = CAAM_DMA_CHUNK_SIZE */
append_math_add_imm_u32(desc, REG1, ZERO, IMM, CAAM_DMA_CHUNK_SIZE);
/* REG0 = SEQINLEN - CAAM_DMA_CHUNK_SIZE */
append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, CAAM_DMA_CHUNK_SIZE);
/*
* if (REG0 > 0)
* jmp to LABEL1
*/
jmp_cmd = append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
JUMP_COND_MATH_Z);
/* REG1 = SEQINLEN */
append_math_sub(desc, REG1, SEQINLEN, ZERO, CAAM_CMD_SZ);
/* LABEL1 */
set_jump_tgt_here(desc, jmp_cmd);
/* VARSEQINLEN = REG1 */
append_math_add(desc, VARSEQINLEN, REG1, ZERO, CAAM_CMD_SZ);
/* VARSEQOUTLEN = REG1 */
append_math_add(desc, VARSEQOUTLEN, REG1, ZERO, CAAM_CMD_SZ);
/* do FIFO STORE */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_METADATA | LDST_VLF);
/* do FIFO LOAD */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
FIFOLD_TYPE_IFIFO | LDST_VLF);
/*
* if (REG0 > 0)
* jmp 0xF8 (after shared desc header)
*/
append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
JUMP_COND_MATH_Z | 0xF8);
print_hex_dump_debug("caam dma shdesc@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1);
}
static int caam_dma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device *ctrldev = dev->parent;
struct dma_chan *chan, *_chan;
u32 *sh_desc;
int err = -ENOMEM;
int bonds;
if (!caam_jr_driver_probed()) {
dev_info(dev, "Defer probing after JR driver probing\n");
return -EPROBE_DEFER;
}
dma_dev = kzalloc(sizeof(*dma_dev), GFP_KERNEL);
if (!dma_dev)
return -ENOMEM;
dma_sh_desc = kzalloc(sizeof(*dma_sh_desc), GFP_KERNEL | GFP_DMA);
if (!dma_sh_desc)
goto desc_err;
sh_desc = dma_sh_desc->desc;
set_caam_dma_desc(sh_desc);
dma_sh_desc->desc_dma = dma_map_single(ctrldev, sh_desc,
desc_bytes(sh_desc),
DMA_TO_DEVICE);
if (dma_mapping_error(ctrldev, dma_sh_desc->desc_dma)) {
dev_err(dev, "unable to map dma descriptor\n");
goto map_err;
}
INIT_LIST_HEAD(&dma_dev->channels);
bonds = caam_dma_jr_chan_bind();
if (!bonds) {
err = -ENODEV;
goto jr_bind_err;
}
dma_dev->dev = dev;
dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
dma_dev->device_tx_status = dma_cookie_status;
dma_dev->device_issue_pending = caam_dma_issue_pending;
dma_dev->device_prep_dma_memcpy = caam_dma_prep_memcpy;
dma_dev->device_free_chan_resources = caam_dma_free_chan_resources;
err = dma_async_device_register(dma_dev);
if (err) {
dev_err(dev, "Failed to register CAAM DMA engine\n");
goto jr_bind_err;
}
dev_info(dev, "caam dma support with %d job rings\n", bonds);
return err;
jr_bind_err:
list_for_each_entry_safe(chan, _chan, &dma_dev->channels, device_node)
caam_jr_dma_free(chan);
dma_unmap_single(ctrldev, dma_sh_desc->desc_dma, desc_bytes(sh_desc),
DMA_TO_DEVICE);
map_err:
kfree(dma_sh_desc);
desc_err:
kfree(dma_dev);
return err;
}
static int caam_dma_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device *ctrldev = dev->parent;
struct caam_dma_ctx *ctx, *_ctx;
dma_async_device_unregister(dma_dev);
list_for_each_entry_safe(ctx, _ctx, &dma_ctx_list, node) {
list_del(&ctx->node);
caam_jr_free(ctx->jrdev);
kfree(ctx);
}
dma_unmap_single(ctrldev, dma_sh_desc->desc_dma,
desc_bytes(dma_sh_desc->desc), DMA_TO_DEVICE);
kfree(dma_sh_desc);
kfree(dma_dev);
dev_info(dev, "caam dma support disabled\n");
return 0;
}
static struct platform_driver caam_dma_driver = {
.driver = {
.name = "caam-dma",
},
.probe = caam_dma_probe,
.remove = caam_dma_remove,
};
module_platform_driver(caam_dma_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("NXP CAAM support for DMA engine");
MODULE_AUTHOR("NXP Semiconductors");
MODULE_ALIAS("platform:caam-dma");

View File

@ -0,0 +1,9 @@
menuconfig FSL_DPAA2_QDMA
tristate "NXP DPAA2 QDMA"
depends on ARM64
depends on FSL_MC_BUS && FSL_MC_DPIO
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
NXP Data Path Acceleration Architecture 2 QDMA driver,
using the NXP MC bus driver.

View File

@ -0,0 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for the NXP DPAA2 qDMA controllers
obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma.o dpdmai.o

View File

@ -0,0 +1,825 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright 2019 NXP
#include <linux/init.h>
#include <linux/module.h>
#include <linux/dmapool.h>
#include <linux/of_irq.h>
#include <linux/iommu.h>
#include <linux/sys_soc.h>
#include <linux/fsl/mc.h>
#include <soc/fsl/dpaa2-io.h>
#include "../virt-dma.h"
#include "dpdmai.h"
#include "dpaa2-qdma.h"
static bool smmu_disable = true;
static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
{
return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
}
static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
{
return container_of(vd, struct dpaa2_qdma_comp, vdesc);
}
static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
{
struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
dpaa2_chan->fd_pool = dma_pool_create("fd_pool", dev,
sizeof(struct dpaa2_fd),
sizeof(struct dpaa2_fd), 0);
if (!dpaa2_chan->fd_pool)
goto err;
dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
sizeof(struct dpaa2_fl_entry),
sizeof(struct dpaa2_fl_entry), 0);
if (!dpaa2_chan->fl_pool)
goto err_fd;
dpaa2_chan->sdd_pool =
dma_pool_create("sdd_pool", dev,
sizeof(struct dpaa2_qdma_sd_d),
sizeof(struct dpaa2_qdma_sd_d), 0);
if (!dpaa2_chan->sdd_pool)
goto err_fl;
return dpaa2_qdma->desc_allocated++;
err_fl:
dma_pool_destroy(dpaa2_chan->fl_pool);
err_fd:
dma_pool_destroy(dpaa2_chan->fd_pool);
err:
return -ENOMEM;
}
static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
{
struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_used);
dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_free);
dma_pool_destroy(dpaa2_chan->fd_pool);
dma_pool_destroy(dpaa2_chan->fl_pool);
dma_pool_destroy(dpaa2_chan->sdd_pool);
dpaa2_qdma->desc_allocated--;
}
/*
* Request a command descriptor for enqueue.
*/
static struct dpaa2_qdma_comp *
dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
{
struct dpaa2_qdma_priv *qdma_priv = dpaa2_chan->qdma->priv;
struct device *dev = &qdma_priv->dpdmai_dev->dev;
struct dpaa2_qdma_comp *comp_temp = NULL;
unsigned long flags;
spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
if (list_empty(&dpaa2_chan->comp_free)) {
spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
comp_temp = kzalloc(sizeof(*comp_temp), GFP_NOWAIT);
if (!comp_temp)
goto err;
comp_temp->fd_virt_addr =
dma_pool_alloc(dpaa2_chan->fd_pool, GFP_NOWAIT,
&comp_temp->fd_bus_addr);
if (!comp_temp->fd_virt_addr)
goto err_comp;
comp_temp->fl_virt_addr =
dma_pool_alloc(dpaa2_chan->fl_pool, GFP_NOWAIT,
&comp_temp->fl_bus_addr);
if (!comp_temp->fl_virt_addr)
goto err_fd_virt;
comp_temp->desc_virt_addr =
dma_pool_alloc(dpaa2_chan->sdd_pool, GFP_NOWAIT,
&comp_temp->desc_bus_addr);
if (!comp_temp->desc_virt_addr)
goto err_fl_virt;
comp_temp->qchan = dpaa2_chan;
return comp_temp;
}
comp_temp = list_first_entry(&dpaa2_chan->comp_free,
struct dpaa2_qdma_comp, list);
list_del(&comp_temp->list);
spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
comp_temp->qchan = dpaa2_chan;
return comp_temp;
err_fl_virt:
dma_pool_free(dpaa2_chan->fl_pool,
comp_temp->fl_virt_addr,
comp_temp->fl_bus_addr);
err_fd_virt:
dma_pool_free(dpaa2_chan->fd_pool,
comp_temp->fd_virt_addr,
comp_temp->fd_bus_addr);
err_comp:
kfree(comp_temp);
err:
dev_err(dev, "Failed to request descriptor\n");
return NULL;
}
static void
dpaa2_qdma_populate_fd(u32 format, struct dpaa2_qdma_comp *dpaa2_comp)
{
struct dpaa2_fd *fd;
fd = dpaa2_comp->fd_virt_addr;
memset(fd, 0, sizeof(struct dpaa2_fd));
/* fd populated */
dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
/*
* Bypass memory translation, Frame list format, short length disable
* we need to disable BMT if fsl-mc use iova addr
*/
if (smmu_disable)
dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
}
/* first frame list for descriptor buffer */
static void
dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry *f_list,
struct dpaa2_qdma_comp *dpaa2_comp,
bool wrt_changed)
{
struct dpaa2_qdma_sd_d *sdd;
sdd = dpaa2_comp->desc_virt_addr;
memset(sdd, 0, 2 * (sizeof(*sdd)));
/* source descriptor CMD */
sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT);
sdd++;
/* dest descriptor CMD */
if (wrt_changed)
sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT);
else
sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT);
memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
/* first frame list to source descriptor */
dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
dpaa2_fl_set_len(f_list, 0x20);
dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
/* bypass memory translation */
if (smmu_disable)
f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
}
/* source and destination frame list */
static void
dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
dma_addr_t dst, dma_addr_t src,
size_t len, uint8_t fmt)
{
/* source frame list to source buffer */
memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
dpaa2_fl_set_addr(f_list, src);
dpaa2_fl_set_len(f_list, len);
/* single buffer frame or scatter gather frame */
dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
/* bypass memory translation */
if (smmu_disable)
f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
f_list++;
/* destination frame list to destination buffer */
memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
dpaa2_fl_set_addr(f_list, dst);
dpaa2_fl_set_len(f_list, len);
dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
/* single buffer frame or scatter gather frame */
dpaa2_fl_set_final(f_list, QDMA_FL_F);
/* bypass memory translation */
if (smmu_disable)
f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
}
static struct dma_async_tx_descriptor
*dpaa2_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
dma_addr_t src, size_t len, ulong flags)
{
struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
struct dpaa2_qdma_engine *dpaa2_qdma;
struct dpaa2_qdma_comp *dpaa2_comp;
struct dpaa2_fl_entry *f_list;
bool wrt_changed;
dpaa2_qdma = dpaa2_chan->qdma;
dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
if (!dpaa2_comp)
return NULL;
wrt_changed = (bool)dpaa2_qdma->qdma_wrtype_fixup;
/* populate Frame descriptor */
dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
f_list = dpaa2_comp->fl_virt_addr;
/* first frame list for descriptor buffer (logn format) */
dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed);
f_list++;
dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
}
static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
{
struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
struct dpaa2_qdma_comp *dpaa2_comp;
struct virt_dma_desc *vdesc;
struct dpaa2_fd *fd;
unsigned long flags;
int err;
spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
spin_lock(&dpaa2_chan->vchan.lock);
if (vchan_issue_pending(&dpaa2_chan->vchan)) {
vdesc = vchan_next_desc(&dpaa2_chan->vchan);
if (!vdesc)
goto err_enqueue;
dpaa2_comp = to_fsl_qdma_comp(vdesc);
fd = dpaa2_comp->fd_virt_addr;
list_del(&vdesc->node);
list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd);
if (err) {
list_del(&dpaa2_comp->list);
list_add_tail(&dpaa2_comp->list,
&dpaa2_chan->comp_free);
}
}
err_enqueue:
spin_unlock(&dpaa2_chan->vchan.lock);
spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
}
static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
{
struct dpaa2_qdma_priv_per_prio *ppriv;
struct device *dev = &ls_dev->dev;
struct dpaa2_qdma_priv *priv;
u8 prio_def = DPDMAI_PRIO_NUM;
int err = -EINVAL;
int i;
priv = dev_get_drvdata(dev);
priv->dev = dev;
priv->dpqdma_id = ls_dev->obj_desc.id;
/* Get the handle for the DPDMAI this interface is associate with */
err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
if (err) {
dev_err(dev, "dpdmai_open() failed\n");
return err;
}
dev_dbg(dev, "Opened dpdmai object successfully\n");
err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
&priv->dpdmai_attr);
if (err) {
dev_err(dev, "dpdmai_get_attributes() failed\n");
goto exit;
}
if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
dev_err(dev, "DPDMAI major version mismatch\n"
"Found %u.%u, supported version is %u.%u\n",
priv->dpdmai_attr.version.major,
priv->dpdmai_attr.version.minor,
DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
goto exit;
}
if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
dev_err(dev, "DPDMAI minor version mismatch\n"
"Found %u.%u, supported version is %u.%u\n",
priv->dpdmai_attr.version.major,
priv->dpdmai_attr.version.minor,
DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
goto exit;
}
priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
if (!ppriv) {
err = -ENOMEM;
goto exit;
}
priv->ppriv = ppriv;
for (i = 0; i < priv->num_pairs; i++) {
err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
i, &priv->rx_queue_attr[i]);
if (err) {
dev_err(dev, "dpdmai_get_rx_queue() failed\n");
goto exit;
}
ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
i, &priv->tx_fqid[i]);
if (err) {
dev_err(dev, "dpdmai_get_tx_queue() failed\n");
goto exit;
}
ppriv->req_fqid = priv->tx_fqid[i];
ppriv->prio = i;
ppriv->priv = priv;
ppriv++;
}
return 0;
exit:
dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
return err;
}
static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
{
struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
struct dpaa2_qdma_priv_per_prio, nctx);
struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
struct dpaa2_qdma_priv *priv = ppriv->priv;
u32 n_chans = priv->dpaa2_qdma->n_chans;
struct dpaa2_qdma_chan *qchan;
const struct dpaa2_fd *fd_eq;
const struct dpaa2_fd *fd;
struct dpaa2_dq *dq;
int is_last = 0;
int found;
u8 status;
int err;
int i;
do {
err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
ppriv->store);
} while (err);
while (!is_last) {
do {
dq = dpaa2_io_store_next(ppriv->store, &is_last);
} while (!is_last && !dq);
if (!dq) {
dev_err(priv->dev, "FQID returned no valid frames!\n");
continue;
}
/* obtain FD and process the error */
fd = dpaa2_dq_fd(dq);
status = dpaa2_fd_get_ctrl(fd) & 0xff;
if (status)
dev_err(priv->dev, "FD error occurred\n");
found = 0;
for (i = 0; i < n_chans; i++) {
qchan = &priv->dpaa2_qdma->chans[i];
spin_lock(&qchan->queue_lock);
if (list_empty(&qchan->comp_used)) {
spin_unlock(&qchan->queue_lock);
continue;
}
list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
&qchan->comp_used, list) {
fd_eq = dpaa2_comp->fd_virt_addr;
if (le64_to_cpu(fd_eq->simple.addr) ==
le64_to_cpu(fd->simple.addr)) {
spin_lock(&qchan->vchan.lock);
vchan_cookie_complete(&
dpaa2_comp->vdesc);
spin_unlock(&qchan->vchan.lock);
found = 1;
break;
}
}
spin_unlock(&qchan->queue_lock);
if (found)
break;
}
}
dpaa2_io_service_rearm(NULL, ctx);
}
static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
{
struct dpaa2_qdma_priv_per_prio *ppriv;
struct device *dev = priv->dev;
int err = -EINVAL;
int i, num;
num = priv->num_pairs;
ppriv = priv->ppriv;
for (i = 0; i < num; i++) {
ppriv->nctx.is_cdan = 0;
ppriv->nctx.desired_cpu = DPAA2_IO_ANY_CPU;
ppriv->nctx.id = ppriv->rsp_fqid;
ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev);
if (err) {
dev_err(dev, "Notification register failed\n");
goto err_service;
}
ppriv->store =
dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
if (!ppriv->store) {
dev_err(dev, "dpaa2_io_store_create() failed\n");
goto err_store;
}
ppriv++;
}
return 0;
err_store:
dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
err_service:
ppriv--;
while (ppriv >= priv->ppriv) {
dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
dpaa2_io_store_destroy(ppriv->store);
ppriv--;
}
return err;
}
static void dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
{
struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
int i;
for (i = 0; i < priv->num_pairs; i++) {
dpaa2_io_store_destroy(ppriv->store);
ppriv++;
}
}
static void dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
{
struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
struct device *dev = priv->dev;
int i;
for (i = 0; i < priv->num_pairs; i++) {
dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
ppriv++;
}
}
static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
{
struct dpdmai_rx_queue_cfg rx_queue_cfg;
struct dpaa2_qdma_priv_per_prio *ppriv;
struct device *dev = priv->dev;
struct fsl_mc_device *ls_dev;
int i, num;
int err;
ls_dev = to_fsl_mc_device(dev);
num = priv->num_pairs;
ppriv = priv->ppriv;
for (i = 0; i < num; i++) {
rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
DPDMAI_QUEUE_OPT_DEST;
rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
rx_queue_cfg.dest_cfg.priority = ppriv->prio;
err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
rx_queue_cfg.dest_cfg.priority,
&rx_queue_cfg);
if (err) {
dev_err(dev, "dpdmai_set_rx_queue() failed\n");
return err;
}
ppriv++;
}
return 0;
}
static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
{
struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
struct device *dev = priv->dev;
struct fsl_mc_device *ls_dev;
int err = 0;
int i;
ls_dev = to_fsl_mc_device(dev);
for (i = 0; i < priv->num_pairs; i++) {
ppriv->nctx.qman64 = 0;
ppriv->nctx.dpio_id = 0;
ppriv++;
}
err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
if (err)
dev_err(dev, "dpdmai_reset() failed\n");
return err;
}
static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
struct list_head *head)
{
struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
unsigned long flags;
list_for_each_entry_safe(comp_tmp, _comp_tmp,
head, list) {
spin_lock_irqsave(&qchan->queue_lock, flags);
list_del(&comp_tmp->list);
spin_unlock_irqrestore(&qchan->queue_lock, flags);
dma_pool_free(qchan->fd_pool,
comp_tmp->fd_virt_addr,
comp_tmp->fd_bus_addr);
dma_pool_free(qchan->fl_pool,
comp_tmp->fl_virt_addr,
comp_tmp->fl_bus_addr);
dma_pool_free(qchan->sdd_pool,
comp_tmp->desc_virt_addr,
comp_tmp->desc_bus_addr);
kfree(comp_tmp);
}
}
static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
{
struct dpaa2_qdma_chan *qchan;
int num, i;
num = dpaa2_qdma->n_chans;
for (i = 0; i < num; i++) {
qchan = &dpaa2_qdma->chans[i];
dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
dma_pool_destroy(qchan->fd_pool);
dma_pool_destroy(qchan->fl_pool);
dma_pool_destroy(qchan->sdd_pool);
}
}
static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
{
struct dpaa2_qdma_comp *dpaa2_comp;
struct dpaa2_qdma_chan *qchan;
unsigned long flags;
dpaa2_comp = to_fsl_qdma_comp(vdesc);
qchan = dpaa2_comp->qchan;
spin_lock_irqsave(&qchan->queue_lock, flags);
list_del(&dpaa2_comp->list);
list_add_tail(&dpaa2_comp->list, &qchan->comp_free);
spin_unlock_irqrestore(&qchan->queue_lock, flags);
}
static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
{
struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
struct dpaa2_qdma_chan *dpaa2_chan;
int num = priv->num_pairs;
int i;
INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
for (i = 0; i < dpaa2_qdma->n_chans; i++) {
dpaa2_chan = &dpaa2_qdma->chans[i];
dpaa2_chan->qdma = dpaa2_qdma;
dpaa2_chan->fqid = priv->tx_fqid[i % num];
dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
spin_lock_init(&dpaa2_chan->queue_lock);
INIT_LIST_HEAD(&dpaa2_chan->comp_used);
INIT_LIST_HEAD(&dpaa2_chan->comp_free);
}
return 0;
}
static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
{
struct device *dev = &dpdmai_dev->dev;
struct dpaa2_qdma_engine *dpaa2_qdma;
struct dpaa2_qdma_priv *priv;
int err;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
dev_set_drvdata(dev, priv);
priv->dpdmai_dev = dpdmai_dev;
priv->iommu_domain = iommu_get_domain_for_dev(dev);
if (priv->iommu_domain)
smmu_disable = false;
/* obtain a MC portal */
err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
if (err) {
if (err == -ENXIO)
err = -EPROBE_DEFER;
else
dev_err(dev, "MC portal allocation failed\n");
goto err_mcportal;
}
/* DPDMAI initialization */
err = dpaa2_qdma_setup(dpdmai_dev);
if (err) {
dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
goto err_dpdmai_setup;
}
/* DPIO */
err = dpaa2_qdma_dpio_setup(priv);
if (err) {
dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
goto err_dpio_setup;
}
/* DPDMAI binding to DPIO */
err = dpaa2_dpdmai_bind(priv);
if (err) {
dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
goto err_bind;
}
/* DPDMAI enable */
err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
if (err) {
dev_err(dev, "dpdmai_enable() faile\n");
goto err_enable;
}
dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
if (!dpaa2_qdma) {
err = -ENOMEM;
goto err_eng;
}
priv->dpaa2_qdma = dpaa2_qdma;
dpaa2_qdma->priv = priv;
dpaa2_qdma->desc_allocated = 0;
dpaa2_qdma->n_chans = NUM_CH;
dpaa2_dpdmai_init_channels(dpaa2_qdma);
if (soc_device_match(soc_fixup_tuning))
dpaa2_qdma->qdma_wrtype_fixup = true;
else
dpaa2_qdma->qdma_wrtype_fixup = false;
dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
dpaa2_qdma->dma_dev.dev = dev;
dpaa2_qdma->dma_dev.device_alloc_chan_resources =
dpaa2_qdma_alloc_chan_resources;
dpaa2_qdma->dma_dev.device_free_chan_resources =
dpaa2_qdma_free_chan_resources;
dpaa2_qdma->dma_dev.device_tx_status = dma_cookie_status;
dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
err = dma_async_device_register(&dpaa2_qdma->dma_dev);
if (err) {
dev_err(dev, "Can't register NXP QDMA engine.\n");
goto err_dpaa2_qdma;
}
return 0;
err_dpaa2_qdma:
kfree(dpaa2_qdma);
err_eng:
dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
err_enable:
dpaa2_dpdmai_dpio_unbind(priv);
err_bind:
dpaa2_dpmai_store_free(priv);
dpaa2_dpdmai_dpio_free(priv);
err_dpio_setup:
kfree(priv->ppriv);
dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
err_dpdmai_setup:
fsl_mc_portal_free(priv->mc_io);
err_mcportal:
kfree(priv);
dev_set_drvdata(dev, NULL);
return err;
}
static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
{
struct dpaa2_qdma_engine *dpaa2_qdma;
struct dpaa2_qdma_priv *priv;
struct device *dev;
dev = &ls_dev->dev;
priv = dev_get_drvdata(dev);
dpaa2_qdma = priv->dpaa2_qdma;
dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
dpaa2_dpdmai_dpio_unbind(priv);
dpaa2_dpmai_store_free(priv);
dpaa2_dpdmai_dpio_free(priv);
dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
fsl_mc_portal_free(priv->mc_io);
dev_set_drvdata(dev, NULL);
dpaa2_dpdmai_free_channels(dpaa2_qdma);
dma_async_device_unregister(&dpaa2_qdma->dma_dev);
kfree(priv);
kfree(dpaa2_qdma);
return 0;
}
static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dpdmai",
},
{ .vendor = 0x0 }
};
static struct fsl_mc_driver dpaa2_qdma_driver = {
.driver = {
.name = "dpaa2-qdma",
.owner = THIS_MODULE,
},
.probe = dpaa2_qdma_probe,
.remove = dpaa2_qdma_remove,
.match_id_table = dpaa2_qdma_id_table
};
static int __init dpaa2_qdma_driver_init(void)
{
return fsl_mc_driver_register(&(dpaa2_qdma_driver));
}
late_initcall(dpaa2_qdma_driver_init);
static void __exit fsl_qdma_exit(void)
{
fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
}
module_exit(fsl_qdma_exit);
MODULE_ALIAS("platform:fsl-dpaa2-qdma");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("NXP Layerscape DPAA2 qDMA engine driver");

View File

@ -0,0 +1,153 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright 2019 NXP */
#ifndef __DPAA2_QDMA_H
#define __DPAA2_QDMA_H
#define DPAA2_QDMA_STORE_SIZE 16
#define NUM_CH 8
struct dpaa2_qdma_sd_d {
u32 rsv:32;
union {
struct {
u32 ssd:12; /* souce stride distance */
u32 sss:12; /* souce stride size */
u32 rsv1:8;
} sdf;
struct {
u32 dsd:12; /* Destination stride distance */
u32 dss:12; /* Destination stride size */
u32 rsv2:8;
} ddf;
} df;
u32 rbpcmd; /* Route-by-port command */
u32 cmd;
} __attribute__((__packed__));
/* Source descriptor command read transaction type for RBP=0: */
/* coherent copy of cacheable memory */
#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28)
/* Destination descriptor command write transaction type for RBP=0: */
/* coherent copy of cacheable memory */
#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
#define LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT (0xb << 28)
#define QMAN_FD_FMT_ENABLE BIT(0) /* frame list table enable */
#define QMAN_FD_BMT_ENABLE BIT(15) /* bypass memory translation */
#define QMAN_FD_BMT_DISABLE (0) /* bypass memory translation */
#define QMAN_FD_SL_DISABLE (0) /* short lengthe disabled */
#define QMAN_FD_SL_ENABLE BIT(14) /* short lengthe enabled */
#define QDMA_FINAL_BIT_DISABLE (0) /* final bit disable */
#define QDMA_FINAL_BIT_ENABLE BIT(31) /* final bit enable */
#define QDMA_FD_SHORT_FORMAT BIT(11) /* short format */
#define QDMA_FD_LONG_FORMAT (0) /* long format */
#define QDMA_SER_DISABLE (8) /* no notification */
#define QDMA_SER_CTX BIT(8) /* notification by FQD_CTX[fqid] */
#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
#define QDMA_FD_SPF_ENALBE BIT(30) /* source prefetch enable */
#define QMAN_FD_VA_ENABLE BIT(14) /* Address used is virtual address */
#define QMAN_FD_VA_DISABLE (0)/* Address used is a real address */
/* Flow Context: 49bit physical address */
#define QMAN_FD_CBMT_ENABLE BIT(15)
#define QMAN_FD_CBMT_DISABLE (0) /* Flow Context: 64bit virtual address */
#define QMAN_FD_SC_DISABLE (0) /* stashing control */
#define QDMA_FL_FMT_SBF (0x0) /* Single buffer frame */
#define QDMA_FL_FMT_SGE (0x2) /* Scatter gather frame */
#define QDMA_FL_BMT_ENABLE BIT(15) /* enable bypass memory translation */
#define QDMA_FL_BMT_DISABLE (0x0) /* enable bypass memory translation */
#define QDMA_FL_SL_LONG (0x0)/* long length */
#define QDMA_FL_SL_SHORT (0x1) /* short length */
#define QDMA_FL_F (0x1)/* last frame list bit */
/*Description of Frame list table structure*/
struct dpaa2_qdma_chan {
struct dpaa2_qdma_engine *qdma;
struct virt_dma_chan vchan;
struct virt_dma_desc vdesc;
enum dma_status status;
u32 fqid;
/* spinlock used by dpaa2 qdma driver */
spinlock_t queue_lock;
struct dma_pool *fd_pool;
struct dma_pool *fl_pool;
struct dma_pool *sdd_pool;
struct list_head comp_used;
struct list_head comp_free;
};
struct dpaa2_qdma_comp {
dma_addr_t fd_bus_addr;
dma_addr_t fl_bus_addr;
dma_addr_t desc_bus_addr;
struct dpaa2_fd *fd_virt_addr;
struct dpaa2_fl_entry *fl_virt_addr;
struct dpaa2_qdma_sd_d *desc_virt_addr;
struct dpaa2_qdma_chan *qchan;
struct virt_dma_desc vdesc;
struct list_head list;
};
struct dpaa2_qdma_engine {
struct dma_device dma_dev;
u32 n_chans;
struct dpaa2_qdma_chan chans[NUM_CH];
int qdma_wrtype_fixup;
int desc_allocated;
struct dpaa2_qdma_priv *priv;
};
/*
* dpaa2_qdma_priv - driver private data
*/
struct dpaa2_qdma_priv {
int dpqdma_id;
struct iommu_domain *iommu_domain;
struct dpdmai_attr dpdmai_attr;
struct device *dev;
struct fsl_mc_io *mc_io;
struct fsl_mc_device *dpdmai_dev;
u8 num_pairs;
struct dpaa2_qdma_engine *dpaa2_qdma;
struct dpaa2_qdma_priv_per_prio *ppriv;
struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
u32 tx_fqid[DPDMAI_PRIO_NUM];
};
struct dpaa2_qdma_priv_per_prio {
int req_fqid;
int rsp_fqid;
int prio;
struct dpaa2_io_store *store;
struct dpaa2_io_notification_ctx nctx;
struct dpaa2_qdma_priv *priv;
};
static struct soc_device_attribute soc_fixup_tuning[] = {
{ .family = "QorIQ LX2160A"},
{ },
};
/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
sizeof(struct dpaa2_fl_entry) * 3 + \
sizeof(struct dpaa2_qdma_sd_d) * 2)
static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma);
static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
struct list_head *head);
#endif /* __DPAA2_QDMA_H */

View File

@ -0,0 +1,366 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright 2019 NXP
#include <linux/types.h>
#include <linux/io.h>
#include <linux/fsl/mc.h>
#include "dpdmai.h"
struct dpdmai_rsp_get_attributes {
__le32 id;
u8 num_of_priorities;
u8 pad0[3];
__le16 major;
__le16 minor;
};
struct dpdmai_cmd_queue {
__le32 dest_id;
u8 priority;
u8 queue;
u8 dest_type;
u8 pad;
__le64 user_ctx;
union {
__le32 options;
__le32 fqid;
};
};
struct dpdmai_rsp_get_tx_queue {
__le64 pad;
__le32 fqid;
};
#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
/* cmd, param, offset, width, type, arg_name */
#define DPDMAI_CMD_CREATE(_cmd, _cfg) \
do { \
typeof(_cmd) (cmd) = (_cmd); \
typeof(_cfg) (cfg) = (_cfg); \
MC_CMD_OP(cmd, 0, 8, 8, u8, (cfg)->priorities[0]);\
MC_CMD_OP(cmd, 0, 16, 8, u8, (cfg)->priorities[1]);\
} while (0)
static inline u64 mc_enc(int lsoffset, int width, u64 val)
{
return (val & MAKE_UMASK64(width)) << lsoffset;
}
/**
* dpdmai_open() - Open a control session for the specified object
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @dpdmai_id: DPDMAI unique ID
* @token: Returned token; use in subsequent API calls
*
* This function can be used to open a control session for an
* already created object; an object may have been declared in
* the DPL or by calling the dpdmai_create() function.
* This function returns a unique authentication token,
* associated with the specific object ID and the specific MC
* portal; this token must be used in all subsequent commands for
* this specific object.
*
* Return: '0' on Success; Error code otherwise.
*/
int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
int dpdmai_id, u16 *token)
{
struct fsl_mc_command cmd = { 0 };
__le64 *cmd_dpdmai_id;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
cmd_flags, 0);
cmd_dpdmai_id = cmd.params;
*cmd_dpdmai_id = cpu_to_le32(dpdmai_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
*token = mc_cmd_hdr_read_token(&cmd);
return 0;
}
/**
* dpdmai_close() - Close the control session of the object
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
*
* After this function is called, no further operations are
* allowed on the object without opening a new control session.
*
* Return: '0' on Success; Error code otherwise.
*/
int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
cmd_flags, token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpdmai_create() - Create the DPDMAI object
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @cfg: Configuration structure
* @token: Returned token; use in subsequent API calls
*
* Create the DPDMAI object, allocate required resources and
* perform required initialization.
*
* The object can be created either by declaring it in the
* DPL file, or by calling this function.
*
* This function returns a unique authentication token,
* associated with the specific object ID and the specific MC
* portal; this token must be used in all subsequent calls to
* this specific object. For objects that are created using the
* DPL file, call dpdmai_open() function to get an authentication
* token first.
*
* Return: '0' on Success; Error code otherwise.
*/
int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
const struct dpdmai_cfg *cfg, u16 *token)
{
struct fsl_mc_command cmd = { 0 };
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
cmd_flags, 0);
DPDMAI_CMD_CREATE(cmd, cfg);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
*token = mc_cmd_hdr_read_token(&cmd);
return 0;
}
/**
* dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
*
* Return: '0' on Success; Error code otherwise.
*/
int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
cmd_flags, token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
*
* Return: '0' on Success; Error code otherwise.
*/
int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
cmd_flags, token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
*
* Return: '0' on Success; Error code otherwise.
*/
int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
cmd_flags, token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpdmai_get_attributes() - Retrieve DPDMAI attributes.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
* @attr: Returned object's attributes
*
* Return: '0' on Success; Error code otherwise.
*/
int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
u16 token, struct dpdmai_attr *attr)
{
struct dpdmai_rsp_get_attributes *rsp_params;
struct fsl_mc_command cmd = { 0 };
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
cmd_flags, token);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
rsp_params = (struct dpdmai_rsp_get_attributes *)cmd.params;
attr->id = le32_to_cpu(rsp_params->id);
attr->version.major = le16_to_cpu(rsp_params->major);
attr->version.minor = le16_to_cpu(rsp_params->minor);
attr->num_of_priorities = rsp_params->num_of_priorities;
return 0;
}
/**
* dpdmai_set_rx_queue() - Set Rx queue configuration
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
* @priority: Select the queue relative to number of
* priorities configured at DPDMAI creation
* @cfg: Rx queue configuration
*
* Return: '0' on Success; Error code otherwise.
*/
int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
u8 priority, const struct dpdmai_rx_queue_cfg *cfg)
{
struct dpdmai_cmd_queue *cmd_params;
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
cmd_flags, token);
cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
cmd_params->priority = cfg->dest_cfg.priority;
cmd_params->queue = priority;
cmd_params->dest_type = cfg->dest_cfg.dest_type;
cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
cmd_params->options = cpu_to_le32(cfg->options);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
* @priority: Select the queue relative to number of
* priorities configured at DPDMAI creation
* @attr: Returned Rx queue attributes
*
* Return: '0' on Success; Error code otherwise.
*/
int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
u8 priority, struct dpdmai_rx_queue_attr *attr)
{
struct dpdmai_cmd_queue *cmd_params;
struct fsl_mc_command cmd = { 0 };
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
cmd_flags, token);
cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
cmd_params->queue = priority;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
attr->dest_cfg.priority = cmd_params->priority;
attr->dest_cfg.dest_type = cmd_params->dest_type;
attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
attr->fqid = le32_to_cpu(cmd_params->fqid);
return 0;
}
/**
* dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
* @priority: Select the queue relative to number of
* priorities configured at DPDMAI creation
* @fqid: Returned Tx queue
*
* Return: '0' on Success; Error code otherwise.
*/
int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
u16 token, u8 priority, u32 *fqid)
{
struct dpdmai_rsp_get_tx_queue *rsp_params;
struct dpdmai_cmd_queue *cmd_params;
struct fsl_mc_command cmd = { 0 };
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
cmd_flags, token);
cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
cmd_params->queue = priority;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
*fqid = le32_to_cpu(rsp_params->fqid);
return 0;
}

View File

@ -0,0 +1,177 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright 2019 NXP */
#ifndef __FSL_DPDMAI_H
#define __FSL_DPDMAI_H
/* DPDMAI Version */
#define DPDMAI_VER_MAJOR 2
#define DPDMAI_VER_MINOR 2
#define DPDMAI_CMD_BASE_VERSION 0
#define DPDMAI_CMD_ID_OFFSET 4
#define DPDMAI_CMDID_FORMAT(x) (((x) << DPDMAI_CMD_ID_OFFSET) | \
DPDMAI_CMD_BASE_VERSION)
/* Command IDs */
#define DPDMAI_CMDID_CLOSE DPDMAI_CMDID_FORMAT(0x800)
#define DPDMAI_CMDID_OPEN DPDMAI_CMDID_FORMAT(0x80E)
#define DPDMAI_CMDID_CREATE DPDMAI_CMDID_FORMAT(0x90E)
#define DPDMAI_CMDID_ENABLE DPDMAI_CMDID_FORMAT(0x002)
#define DPDMAI_CMDID_DISABLE DPDMAI_CMDID_FORMAT(0x003)
#define DPDMAI_CMDID_GET_ATTR DPDMAI_CMDID_FORMAT(0x004)
#define DPDMAI_CMDID_RESET DPDMAI_CMDID_FORMAT(0x005)
#define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMDID_FORMAT(0x006)
#define DPDMAI_CMDID_SET_IRQ DPDMAI_CMDID_FORMAT(0x010)
#define DPDMAI_CMDID_GET_IRQ DPDMAI_CMDID_FORMAT(0x011)
#define DPDMAI_CMDID_SET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x012)
#define DPDMAI_CMDID_GET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x013)
#define DPDMAI_CMDID_SET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x014)
#define DPDMAI_CMDID_GET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x015)
#define DPDMAI_CMDID_GET_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x016)
#define DPDMAI_CMDID_CLEAR_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x017)
#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A0)
#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A1)
#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMDID_FORMAT(0x1A2)
#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */
#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */
#define MAKE_UMASK64(_width) \
((u64)((_width) < 64 ? ((u64)1 << (_width)) - 1 : (u64)-1))
/* Data Path DMA Interface API
* Contains initialization APIs and runtime control APIs for DPDMAI
*/
/**
* Maximum number of Tx/Rx priorities per DPDMAI object
*/
#define DPDMAI_PRIO_NUM 2
/* DPDMAI queue modification options */
/**
* Select to modify the user's context associated with the queue
*/
#define DPDMAI_QUEUE_OPT_USER_CTX 0x1
/**
* Select to modify the queue's destination
*/
#define DPDMAI_QUEUE_OPT_DEST 0x2
/**
* struct dpdmai_cfg - Structure representing DPDMAI configuration
* @priorities: Priorities for the DMA hardware processing; valid priorities are
* configured with values 1-8; the entry following last valid entry
* should be configured with 0
*/
struct dpdmai_cfg {
u8 priorities[DPDMAI_PRIO_NUM];
};
/**
* struct dpdmai_attr - Structure representing DPDMAI attributes
* @id: DPDMAI object ID
* @version: DPDMAI version
* @num_of_priorities: number of priorities
*/
struct dpdmai_attr {
int id;
/**
* struct version - DPDMAI version
* @major: DPDMAI major version
* @minor: DPDMAI minor version
*/
struct {
u16 major;
u16 minor;
} version;
u8 num_of_priorities;
};
/**
* enum dpdmai_dest - DPDMAI destination types
* @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
* and does not generate FQDAN notifications; user is expected to dequeue
* from the queue based on polling or other user-defined method
* @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
* notifications to the specified DPIO; user is expected to dequeue
* from the queue only after notification is received
* @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
* FQDAN notifications, but is connected to the specified DPCON object;
* user is expected to dequeue from the DPCON channel
*/
enum dpdmai_dest {
DPDMAI_DEST_NONE = 0,
DPDMAI_DEST_DPIO = 1,
DPDMAI_DEST_DPCON = 2
};
/**
* struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
* @dest_type: Destination type
* @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
* @priority: Priority selection within the DPIO or DPCON channel; valid values
* are 0-1 or 0-7, depending on the number of priorities in that
* channel; not relevant for 'DPDMAI_DEST_NONE' option
*/
struct dpdmai_dest_cfg {
enum dpdmai_dest dest_type;
int dest_id;
u8 priority;
};
/**
* struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
* @options: Flags representing the suggested modifications to the queue;
* Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
* @user_ctx: User context value provided in the frame descriptor of each
* dequeued frame;
* valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
* @dest_cfg: Queue destination parameters;
* valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
*/
struct dpdmai_rx_queue_cfg {
struct dpdmai_dest_cfg dest_cfg;
u32 options;
u64 user_ctx;
};
/**
* struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
* @user_ctx: User context value provided in the frame descriptor of each
* dequeued frame
* @dest_cfg: Queue destination configuration
* @fqid: Virtual FQID value to be used for dequeue operations
*/
struct dpdmai_rx_queue_attr {
struct dpdmai_dest_cfg dest_cfg;
u64 user_ctx;
u32 fqid;
};
int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
int dpdmai_id, u16 *token);
int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
const struct dpdmai_cfg *cfg, u16 *token);
int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
u16 token, struct dpdmai_attr *attr);
int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
u8 priority, const struct dpdmai_rx_queue_cfg *cfg);
int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
u8 priority, struct dpdmai_rx_queue_attr *attr);
int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
u16 token, u8 priority, u32 *fqid);
#endif /* __FSL_DPDMAI_H */

View File

@ -305,6 +305,11 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
return len; return len;
} }
void fsl_edma_get_realcnt(struct fsl_edma_chan *fsl_chan)
{
fsl_chan->chn_real_count = fsl_edma_desc_residue(fsl_chan, NULL, true);
}
enum dma_status fsl_edma_tx_status(struct dma_chan *chan, enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate) dma_cookie_t cookie, struct dma_tx_state *txstate)
{ {
@ -314,8 +319,12 @@ enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
unsigned long flags; unsigned long flags;
status = dma_cookie_status(chan, cookie, txstate); status = dma_cookie_status(chan, cookie, txstate);
if (status == DMA_COMPLETE) if (status == DMA_COMPLETE) {
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
txstate->residue = fsl_chan->chn_real_count;
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return status; return status;
}
if (!txstate) if (!txstate)
return fsl_chan->status; return fsl_chan->status;

View File

@ -126,6 +126,7 @@ struct fsl_edma_chan {
u32 dma_dev_size; u32 dma_dev_size;
enum dma_data_direction dma_dir; enum dma_data_direction dma_dir;
char chan_name[16]; char chan_name[16];
u32 chn_real_count;
}; };
struct fsl_edma_desc { struct fsl_edma_desc {
@ -229,6 +230,7 @@ int fsl_edma_pause(struct dma_chan *chan);
int fsl_edma_resume(struct dma_chan *chan); int fsl_edma_resume(struct dma_chan *chan);
int fsl_edma_slave_config(struct dma_chan *chan, int fsl_edma_slave_config(struct dma_chan *chan,
struct dma_slave_config *cfg); struct dma_slave_config *cfg);
void fsl_edma_get_realcnt(struct fsl_edma_chan *fsl_chan);
enum dma_status fsl_edma_tx_status(struct dma_chan *chan, enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate); dma_cookie_t cookie, struct dma_tx_state *txstate);
struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(

File diff suppressed because it is too large Load Diff

View File

@ -46,6 +46,7 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
spin_lock(&fsl_chan->vchan.lock); spin_lock(&fsl_chan->vchan.lock);
if (!fsl_chan->edesc->iscyclic) { if (!fsl_chan->edesc->iscyclic) {
fsl_edma_get_realcnt(fsl_chan);
list_del(&fsl_chan->edesc->vdesc.node); list_del(&fsl_chan->edesc->vdesc.node);
vchan_cookie_complete(&fsl_chan->edesc->vdesc); vchan_cookie_complete(&fsl_chan->edesc->vdesc);
fsl_chan->edesc = NULL; fsl_chan->edesc = NULL;

View File

@ -8,7 +8,8 @@
// //
// Based on code from Freescale: // Based on code from Freescale:
// //
// Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved. // Copyright 2004-2016 Freescale Semiconductor, Inc. All Rights Reserved.
// Copyright 2018 NXP.
#include <linux/init.h> #include <linux/init.h>
#include <linux/iopoll.h> #include <linux/iopoll.h>
@ -23,6 +24,7 @@
#include <linux/semaphore.h> #include <linux/semaphore.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/genalloc.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -74,6 +76,9 @@
#define SDMA_CHNENBL0_IMX35 0x200 #define SDMA_CHNENBL0_IMX35 0x200
#define SDMA_CHNENBL0_IMX31 0x080 #define SDMA_CHNENBL0_IMX31 0x080
#define SDMA_CHNPRI_0 0x100 #define SDMA_CHNPRI_0 0x100
#define SDMA_DONE0_CONFIG 0x1000
#define SDMA_DONE0_CONFIG_DONE_SEL 0x7
#define SDMA_DONE0_CONFIG_DONE_DIS 0x6
/* /*
* Buffer descriptor status values. * Buffer descriptor status values.
@ -168,6 +173,8 @@
#define SDMA_WATERMARK_LEVEL_SPDIF BIT(10) #define SDMA_WATERMARK_LEVEL_SPDIF BIT(10)
#define SDMA_WATERMARK_LEVEL_SP BIT(11) #define SDMA_WATERMARK_LEVEL_SP BIT(11)
#define SDMA_WATERMARK_LEVEL_DP BIT(12) #define SDMA_WATERMARK_LEVEL_DP BIT(12)
#define SDMA_WATERMARK_LEVEL_SD BIT(13)
#define SDMA_WATERMARK_LEVEL_DD BIT(14)
#define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16) #define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16)
#define SDMA_WATERMARK_LEVEL_LWE BIT(28) #define SDMA_WATERMARK_LEVEL_LWE BIT(28)
#define SDMA_WATERMARK_LEVEL_HWE BIT(29) #define SDMA_WATERMARK_LEVEL_HWE BIT(29)
@ -175,12 +182,17 @@
#define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ #define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
#define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \ #define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \
BIT(DMA_MEM_TO_DEV) | \ BIT(DMA_MEM_TO_DEV) | \
BIT(DMA_DEV_TO_DEV)) BIT(DMA_DEV_TO_DEV))
#define SDMA_WATERMARK_LEVEL_FIFOS_OFF 12
#define SDMA_WATERMARK_LEVEL_SW_DONE BIT(23)
#define SDMA_WATERMARK_LEVEL_SW_DONE_SEL_OFF 24
/* /*
* Mode/Count of data node descriptors - IPCv2 * Mode/Count of data node descriptors - IPCv2
*/ */
@ -377,9 +389,14 @@ struct sdma_channel {
unsigned long watermark_level; unsigned long watermark_level;
u32 shp_addr, per_addr; u32 shp_addr, per_addr;
enum dma_status status; enum dma_status status;
bool context_loaded;
struct imx_dma_data data; struct imx_dma_data data;
struct work_struct terminate_worker; struct work_struct terminate_worker;
bool is_ram_script;
bool src_dualfifo;
bool dst_dualfifo;
unsigned int fifo_num;
bool sw_done;
u32 sw_done_sel;
}; };
#define IMX_DMA_SG_LOOP BIT(0) #define IMX_DMA_SG_LOOP BIT(0)
@ -389,6 +406,15 @@ struct sdma_channel {
#define MXC_SDMA_MIN_PRIORITY 1 #define MXC_SDMA_MIN_PRIORITY 1
#define MXC_SDMA_MAX_PRIORITY 7 #define MXC_SDMA_MAX_PRIORITY 7
/*
* 0x78(SDMA_XTRIG_CONF2+4)~0x100(SDMA_CHNPRI_O) registers are reserved and
* can't be accessed. Skip these register touch in suspend/resume. Also below
* two macros are only used on i.mx6sx.
*/
#define MXC_SDMA_RESERVED_REG (SDMA_CHNPRI_0 - SDMA_XTRIG_CONF2 - 4)
#define MXC_SDMA_SAVED_REG_NUM (((SDMA_CHNENBL0_IMX35 + 4 * 48) - \
MXC_SDMA_RESERVED_REG) / 4)
#define SDMA_FIRMWARE_MAGIC 0x414d4453 #define SDMA_FIRMWARE_MAGIC 0x414d4453
/** /**
@ -420,6 +446,13 @@ struct sdma_driver_data {
int num_events; int num_events;
struct sdma_script_start_addrs *script_addrs; struct sdma_script_start_addrs *script_addrs;
bool check_ratio; bool check_ratio;
/*
* ecspi ERR009165 fixed should be done in sdma script
* and it be fixed in soc from i.mx6ul.
* please get more information from below link:
* https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf
*/
bool ecspi_fixed;
}; };
struct sdma_engine { struct sdma_engine {
@ -427,6 +460,8 @@ struct sdma_engine {
struct device_dma_parameters dma_parms; struct device_dma_parameters dma_parms;
struct sdma_channel channel[MAX_DMA_CHANNELS]; struct sdma_channel channel[MAX_DMA_CHANNELS];
struct sdma_channel_control *channel_control; struct sdma_channel_control *channel_control;
u32 save_regs[MXC_SDMA_SAVED_REG_NUM];
const char *fw_name;
void __iomem *regs; void __iomem *regs;
struct sdma_context_data *context; struct sdma_context_data *context;
dma_addr_t context_phys; dma_addr_t context_phys;
@ -444,6 +479,10 @@ struct sdma_engine {
struct sdma_buffer_descriptor *bd0; struct sdma_buffer_descriptor *bd0;
/* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/ /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
bool clk_ratio; bool clk_ratio;
struct gen_pool *iram_pool;
bool fw_loaded;
u32 fw_fail;
unsigned short ram_code_start;
}; };
static int sdma_config_write(struct dma_chan *chan, static int sdma_config_write(struct dma_chan *chan,
@ -540,6 +579,31 @@ static struct sdma_driver_data sdma_imx6q = {
.script_addrs = &sdma_script_imx6q, .script_addrs = &sdma_script_imx6q,
}; };
static struct sdma_script_start_addrs sdma_script_imx6sx = {
.ap_2_ap_addr = 642,
.uart_2_mcu_addr = 817,
.mcu_2_app_addr = 747,
.uartsh_2_mcu_addr = 1032,
.mcu_2_shp_addr = 960,
.app_2_mcu_addr = 683,
.shp_2_mcu_addr = 891,
.spdif_2_mcu_addr = 1100,
.mcu_2_spdif_addr = 1134,
};
static struct sdma_driver_data sdma_imx6sx = {
.chnenbl0 = SDMA_CHNENBL0_IMX35,
.num_events = 48,
.script_addrs = &sdma_script_imx6sx,
};
static struct sdma_driver_data sdma_imx6ul = {
.chnenbl0 = SDMA_CHNENBL0_IMX35,
.num_events = 48,
.script_addrs = &sdma_script_imx6sx,
.ecspi_fixed = true,
};
static struct sdma_script_start_addrs sdma_script_imx7d = { static struct sdma_script_start_addrs sdma_script_imx7d = {
.ap_2_ap_addr = 644, .ap_2_ap_addr = 644,
.uart_2_mcu_addr = 819, .uart_2_mcu_addr = 819,
@ -563,6 +627,7 @@ static struct sdma_driver_data sdma_imx8mq = {
.num_events = 48, .num_events = 48,
.script_addrs = &sdma_script_imx7d, .script_addrs = &sdma_script_imx7d,
.check_ratio = 1, .check_ratio = 1,
.ecspi_fixed = true,
}; };
static const struct platform_device_id sdma_devtypes[] = { static const struct platform_device_id sdma_devtypes[] = {
@ -584,9 +649,15 @@ static const struct platform_device_id sdma_devtypes[] = {
}, { }, {
.name = "imx6q-sdma", .name = "imx6q-sdma",
.driver_data = (unsigned long)&sdma_imx6q, .driver_data = (unsigned long)&sdma_imx6q,
}, {
.name = "imx6sx-sdma",
.driver_data = (unsigned long)&sdma_imx6sx,
}, { }, {
.name = "imx7d-sdma", .name = "imx7d-sdma",
.driver_data = (unsigned long)&sdma_imx7d, .driver_data = (unsigned long)&sdma_imx7d,
}, {
.name = "imx6ul-sdma",
.driver_data = (unsigned long)&sdma_imx6ul,
}, { }, {
.name = "imx8mq-sdma", .name = "imx8mq-sdma",
.driver_data = (unsigned long)&sdma_imx8mq, .driver_data = (unsigned long)&sdma_imx8mq,
@ -603,7 +674,9 @@ static const struct of_device_id sdma_dt_ids[] = {
{ .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, }, { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, }, { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
{ .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, }, { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
{ .compatible = "fsl,imx6sx-sdma", .data = &sdma_imx6sx, },
{ .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, }, { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
{ .compatible = "fsl,imx6ul-sdma", .data = &sdma_imx6ul, },
{ .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, }, { .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, },
{ /* sentinel */ } { /* sentinel */ }
}; };
@ -695,10 +768,13 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
int ret; int ret;
unsigned long flags; unsigned long flags;
buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL); if (sdma->iram_pool)
if (!buf_virt) { buf_virt = gen_pool_dma_alloc(sdma->iram_pool, size, &buf_phys);
else
buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys,
GFP_KERNEL);
if (!buf_virt)
return -ENOMEM; return -ENOMEM;
}
spin_lock_irqsave(&sdma->channel_0_lock, flags); spin_lock_irqsave(&sdma->channel_0_lock, flags);
@ -714,7 +790,10 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
spin_unlock_irqrestore(&sdma->channel_0_lock, flags); spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
dma_free_coherent(sdma->dev, size, buf_virt, buf_phys); if (sdma->iram_pool)
gen_pool_free(sdma->iram_pool, (unsigned long)buf_virt, size);
else
dma_free_coherent(sdma->dev, size, buf_virt, buf_phys);
return ret; return ret;
} }
@ -729,6 +808,21 @@ static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
val = readl_relaxed(sdma->regs + chnenbl); val = readl_relaxed(sdma->regs + chnenbl);
__set_bit(channel, &val); __set_bit(channel, &val);
writel_relaxed(val, sdma->regs + chnenbl); writel_relaxed(val, sdma->regs + chnenbl);
/* Set SDMA_DONEx_CONFIG is sw_done enabled */
if (sdmac->sw_done) {
u32 offset = SDMA_DONE0_CONFIG + sdmac->sw_done_sel / 4;
u32 done_sel = SDMA_DONE0_CONFIG_DONE_SEL +
((sdmac->sw_done_sel % 4) << 3);
u32 sw_done_dis = SDMA_DONE0_CONFIG_DONE_DIS +
((sdmac->sw_done_sel % 4) << 3);
val = readl_relaxed(sdma->regs + offset);
__set_bit(done_sel, &val);
__clear_bit(sw_done_dis, &val);
writel_relaxed(val, sdma->regs + offset);
}
} }
static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
@ -866,7 +960,10 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
desc = sdmac->desc; desc = sdmac->desc;
if (desc) { if (desc) {
if (sdmac->flags & IMX_DMA_SG_LOOP) { if (sdmac->flags & IMX_DMA_SG_LOOP) {
sdma_update_channel_loop(sdmac); if (sdmac->peripheral_type != IMX_DMATYPE_HDMI)
sdma_update_channel_loop(sdmac);
else
vchan_cyclic_callback(&desc->vd);
} else { } else {
mxc_sdma_handle_channel_normal(sdmac); mxc_sdma_handle_channel_normal(sdmac);
vchan_cookie_complete(&desc->vd); vchan_cookie_complete(&desc->vd);
@ -925,6 +1022,10 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
emi_2_per = sdma->script_addrs->mcu_2_ata_addr; emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
break; break;
case IMX_DMATYPE_CSPI: case IMX_DMATYPE_CSPI:
per_2_emi = sdma->script_addrs->app_2_mcu_addr;
emi_2_per = sdma->script_addrs->mcu_2_ecspi_addr;
sdmac->is_ram_script = true;
break;
case IMX_DMATYPE_EXT: case IMX_DMATYPE_EXT:
case IMX_DMATYPE_SSI: case IMX_DMATYPE_SSI:
case IMX_DMATYPE_SAI: case IMX_DMATYPE_SAI:
@ -934,6 +1035,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
case IMX_DMATYPE_SSI_DUAL: case IMX_DMATYPE_SSI_DUAL:
per_2_emi = sdma->script_addrs->ssish_2_mcu_addr; per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
emi_2_per = sdma->script_addrs->mcu_2_ssish_addr; emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
sdmac->is_ram_script = true;
break; break;
case IMX_DMATYPE_SSI_SP: case IMX_DMATYPE_SSI_SP:
case IMX_DMATYPE_MMC: case IMX_DMATYPE_MMC:
@ -948,11 +1050,13 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
per_2_emi = sdma->script_addrs->asrc_2_mcu_addr; per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
emi_2_per = sdma->script_addrs->asrc_2_mcu_addr; emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
per_2_per = sdma->script_addrs->per_2_per_addr; per_2_per = sdma->script_addrs->per_2_per_addr;
sdmac->is_ram_script = true;
break; break;
case IMX_DMATYPE_ASRC_SP: case IMX_DMATYPE_ASRC_SP:
per_2_emi = sdma->script_addrs->shp_2_mcu_addr; per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
emi_2_per = sdma->script_addrs->mcu_2_shp_addr; emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
per_2_per = sdma->script_addrs->per_2_per_addr; per_2_per = sdma->script_addrs->per_2_per_addr;
sdmac->is_ram_script = true;
break; break;
case IMX_DMATYPE_MSHC: case IMX_DMATYPE_MSHC:
per_2_emi = sdma->script_addrs->mshc_2_mcu_addr; per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
@ -968,6 +1072,14 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
case IMX_DMATYPE_IPU_MEMORY: case IMX_DMATYPE_IPU_MEMORY:
emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr; emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
break; break;
case IMX_DMATYPE_HDMI:
emi_2_per = sdma->script_addrs->hdmi_dma_addr;
sdmac->is_ram_script = true;
break;
case IMX_DMATYPE_MULTI_SAI:
per_2_emi = sdma->script_addrs->sai_2_mcu_addr;
emi_2_per = sdma->script_addrs->mcu_2_sai_addr;
sdmac->is_ram_script = true;
default: default:
break; break;
} }
@ -988,9 +1100,6 @@ static int sdma_load_context(struct sdma_channel *sdmac)
int ret; int ret;
unsigned long flags; unsigned long flags;
if (sdmac->context_loaded)
return 0;
if (sdmac->direction == DMA_DEV_TO_MEM) if (sdmac->direction == DMA_DEV_TO_MEM)
load_address = sdmac->pc_from_device; load_address = sdmac->pc_from_device;
else if (sdmac->direction == DMA_DEV_TO_DEV) else if (sdmac->direction == DMA_DEV_TO_DEV)
@ -1018,11 +1127,16 @@ static int sdma_load_context(struct sdma_channel *sdmac)
/* Send by context the event mask,base address for peripheral /* Send by context the event mask,base address for peripheral
* and watermark level * and watermark level
*/ */
context->gReg[0] = sdmac->event_mask[1]; if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) {
context->gReg[1] = sdmac->event_mask[0]; context->gReg[4] = sdmac->per_addr;
context->gReg[2] = sdmac->per_addr; context->gReg[6] = sdmac->shp_addr;
context->gReg[6] = sdmac->shp_addr; } else {
context->gReg[7] = sdmac->watermark_level; context->gReg[0] = sdmac->event_mask[1];
context->gReg[1] = sdmac->event_mask[0];
context->gReg[2] = sdmac->per_addr;
context->gReg[6] = sdmac->shp_addr;
context->gReg[7] = sdmac->watermark_level;
}
bd0->mode.command = C0_SETDM; bd0->mode.command = C0_SETDM;
bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD; bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
@ -1033,7 +1147,30 @@ static int sdma_load_context(struct sdma_channel *sdmac)
spin_unlock_irqrestore(&sdma->channel_0_lock, flags); spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
sdmac->context_loaded = true; return ret;
}
static int sdma_save_restore_context(struct sdma_engine *sdma, bool save)
{
struct sdma_context_data *context = sdma->context;
struct sdma_buffer_descriptor *bd0 = sdma->bd0;
unsigned long flags;
int ret;
spin_lock_irqsave(&sdma->channel_0_lock, flags);
if (save)
bd0->mode.command = C0_GETDM;
else
bd0->mode.command = C0_SETDM;
bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
bd0->mode.count = MAX_DMA_CHANNELS * sizeof(*context) / 4;
bd0->buffer_addr = sdma->context_phys;
bd0->ext_buffer_addr = 2048;
ret = sdma_run_channel0(sdma);
spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
return ret; return ret;
} }
@ -1074,7 +1211,6 @@ static void sdma_channel_terminate_work(struct work_struct *work)
sdmac->desc = NULL; sdmac->desc = NULL;
spin_unlock_irqrestore(&sdmac->vc.lock, flags); spin_unlock_irqrestore(&sdmac->vc.lock, flags);
vchan_dma_desc_free_list(&sdmac->vc, &head); vchan_dma_desc_free_list(&sdmac->vc, &head);
sdmac->context_loaded = false;
} }
static int sdma_disable_channel_async(struct dma_chan *chan) static int sdma_disable_channel_async(struct dma_chan *chan)
@ -1136,12 +1272,36 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP; sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT; sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
if (sdmac->src_dualfifo)
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SD;
if (sdmac->dst_dualfifo)
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DD;
}
static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac)
{
sdmac->watermark_level &= ~(0xFF << SDMA_WATERMARK_LEVEL_FIFOS_OFF |
SDMA_WATERMARK_LEVEL_SW_DONE |
0xf << SDMA_WATERMARK_LEVEL_SW_DONE_SEL_OFF);
if (sdmac->sw_done)
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SW_DONE |
sdmac->sw_done_sel <<
SDMA_WATERMARK_LEVEL_SW_DONE_SEL_OFF;
/* For fifo_num
* bit 12-15 is the fifo number;
* bit 16-19 is the fifo offset,
* so here only need to shift left fifo_num 12 bit for watermake_level
*/
sdmac->watermark_level |= sdmac->fifo_num<<
SDMA_WATERMARK_LEVEL_FIFOS_OFF;
} }
static int sdma_config_channel(struct dma_chan *chan) static int sdma_config_channel(struct dma_chan *chan)
{ {
struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_channel *sdmac = to_sdma_chan(chan);
int ret;
sdma_disable_channel(chan); sdma_disable_channel(chan);
@ -1171,8 +1331,21 @@ static int sdma_config_channel(struct dma_chan *chan)
if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP || if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
sdmac->peripheral_type == IMX_DMATYPE_ASRC) sdmac->peripheral_type == IMX_DMATYPE_ASRC)
sdma_set_watermarklevel_for_p2p(sdmac); sdma_set_watermarklevel_for_p2p(sdmac);
} else } else {
/*
* ERR009165 fixed from i.mx6ul, no errata need,
* set bit31 to let sdma script skip the errata.
*/
if (sdmac->peripheral_type == IMX_DMATYPE_CSPI &&
sdmac->direction == DMA_MEM_TO_DEV &&
sdmac->sdma->drvdata->ecspi_fixed)
__set_bit(31, &sdmac->watermark_level);
else if (sdmac->peripheral_type ==
IMX_DMATYPE_MULTI_SAI)
sdma_set_watermarklevel_for_sais(sdmac);
__set_bit(sdmac->event_id0, sdmac->event_mask); __set_bit(sdmac->event_id0, sdmac->event_mask);
}
/* Address */ /* Address */
sdmac->shp_addr = sdmac->per_address; sdmac->shp_addr = sdmac->per_address;
@ -1181,9 +1354,7 @@ static int sdma_config_channel(struct dma_chan *chan)
sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */ sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
} }
ret = sdma_load_context(sdmac); return 0;
return ret;
} }
static int sdma_set_channel_priority(struct sdma_channel *sdmac, static int sdma_set_channel_priority(struct sdma_channel *sdmac,
@ -1206,8 +1377,12 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
{ {
int ret = -EBUSY; int ret = -EBUSY;
sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys, if (sdma->iram_pool)
GFP_NOWAIT); sdma->bd0 = gen_pool_dma_alloc(sdma->iram_pool, PAGE_SIZE,
&sdma->bd0_phys);
else
sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE,
&sdma->bd0_phys, GFP_NOWAIT);
if (!sdma->bd0) { if (!sdma->bd0) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
@ -1227,10 +1402,15 @@ out:
static int sdma_alloc_bd(struct sdma_desc *desc) static int sdma_alloc_bd(struct sdma_desc *desc)
{ {
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
struct sdma_engine *sdma = desc->sdmac->sdma;
int ret = 0; int ret = 0;
desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size, if (sdma->iram_pool)
&desc->bd_phys, GFP_NOWAIT); desc->bd = gen_pool_dma_alloc(sdma->iram_pool, PAGE_SIZE,
&desc->bd_phys);
else
desc->bd = dma_alloc_coherent(sdma->dev, bd_size,
&desc->bd_phys, GFP_NOWAIT);
if (!desc->bd) { if (!desc->bd) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
@ -1242,9 +1422,14 @@ out:
static void sdma_free_bd(struct sdma_desc *desc) static void sdma_free_bd(struct sdma_desc *desc)
{ {
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
struct sdma_engine *sdma = desc->sdmac->sdma;
dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd, if (sdma->iram_pool)
desc->bd_phys); gen_pool_free(sdma->iram_pool, (unsigned long)desc->bd,
PAGE_SIZE);
else
dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd,
desc->bd_phys);
} }
static void sdma_desc_free(struct virt_dma_desc *vd) static void sdma_desc_free(struct virt_dma_desc *vd)
@ -1298,6 +1483,13 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
sdmac->peripheral_type = data->peripheral_type; sdmac->peripheral_type = data->peripheral_type;
sdmac->event_id0 = data->dma_request; sdmac->event_id0 = data->dma_request;
sdmac->event_id1 = data->dma_request2; sdmac->event_id1 = data->dma_request2;
sdmac->src_dualfifo = data->src_dualfifo;
sdmac->dst_dualfifo = data->dst_dualfifo;
/* Get software done selector if sw_done enabled */
if (data->done_sel & BIT(31)) {
sdmac->sw_done = true;
sdmac->sw_done_sel = (data->done_sel >> 8) & 0xff;
}
ret = clk_enable(sdmac->sdma->clk_ipg); ret = clk_enable(sdmac->sdma->clk_ipg);
if (ret) if (ret)
@ -1328,8 +1520,8 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
sdma_channel_synchronize(chan); sdma_channel_synchronize(chan);
if (sdmac->event_id0) sdma_event_disable(sdmac, sdmac->event_id0);
sdma_event_disable(sdmac, sdmac->event_id0);
if (sdmac->event_id1) if (sdmac->event_id1)
sdma_event_disable(sdmac, sdmac->event_id1); sdma_event_disable(sdmac, sdmac->event_id1);
@ -1347,6 +1539,11 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
{ {
struct sdma_desc *desc; struct sdma_desc *desc;
if (!sdmac->sdma->fw_loaded && sdmac->is_ram_script) {
dev_err(sdmac->sdma->dev, "sdma firmware not ready!\n");
goto err_out;
}
desc = kzalloc((sizeof(*desc)), GFP_NOWAIT); desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
if (!desc) if (!desc)
goto err_out; goto err_out;
@ -1362,7 +1559,7 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
desc->sdmac = sdmac; desc->sdmac = sdmac;
desc->num_bd = bds; desc->num_bd = bds;
if (sdma_alloc_bd(desc)) if (bds && sdma_alloc_bd(desc))
goto err_desc_out; goto err_desc_out;
/* No slave_config called in MEMCPY case, so do here */ /* No slave_config called in MEMCPY case, so do here */
@ -1483,6 +1680,9 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
if (count & 3 || sg->dma_address & 3) if (count & 3 || sg->dma_address & 3)
goto err_bd_out; goto err_bd_out;
break; break;
case DMA_SLAVE_BUSWIDTH_3_BYTES:
bd->mode.command = 3;
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES: case DMA_SLAVE_BUSWIDTH_2_BYTES:
bd->mode.command = 2; bd->mode.command = 2;
if (count & 1 || sg->dma_address & 1) if (count & 1 || sg->dma_address & 1)
@ -1527,13 +1727,16 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
{ {
struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma; struct sdma_engine *sdma = sdmac->sdma;
int num_periods = buf_len / period_len; int num_periods = 0;
int channel = sdmac->channel; int channel = sdmac->channel;
int i = 0, buf = 0; int i = 0, buf = 0;
struct sdma_desc *desc; struct sdma_desc *desc;
dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
if (sdmac->peripheral_type != IMX_DMATYPE_HDMI)
num_periods = buf_len / period_len;
sdma_config_write(chan, &sdmac->slave_config, direction); sdma_config_write(chan, &sdmac->slave_config, direction);
desc = sdma_transfer_init(sdmac, direction, num_periods); desc = sdma_transfer_init(sdmac, direction, num_periods);
@ -1550,6 +1753,9 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
goto err_bd_out; goto err_bd_out;
} }
if (sdmac->peripheral_type == IMX_DMATYPE_HDMI)
return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
while (buf < buf_len) { while (buf < buf_len) {
struct sdma_buffer_descriptor *bd = &desc->bd[i]; struct sdma_buffer_descriptor *bd = &desc->bd[i];
int param; int param;
@ -1597,11 +1803,15 @@ static int sdma_config_write(struct dma_chan *chan,
{ {
struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_channel *sdmac = to_sdma_chan(chan);
sdmac->watermark_level = 0;
sdmac->is_ram_script = false;
if (direction == DMA_DEV_TO_MEM) { if (direction == DMA_DEV_TO_MEM) {
sdmac->per_address = dmaengine_cfg->src_addr; sdmac->per_address = dmaengine_cfg->src_addr;
sdmac->watermark_level = dmaengine_cfg->src_maxburst * sdmac->watermark_level = dmaengine_cfg->src_maxburst *
dmaengine_cfg->src_addr_width; dmaengine_cfg->src_addr_width;
sdmac->word_size = dmaengine_cfg->src_addr_width; sdmac->word_size = dmaengine_cfg->src_addr_width;
sdmac->fifo_num = dmaengine_cfg->src_fifo_num;
} else if (direction == DMA_DEV_TO_DEV) { } else if (direction == DMA_DEV_TO_DEV) {
sdmac->per_address2 = dmaengine_cfg->src_addr; sdmac->per_address2 = dmaengine_cfg->src_addr;
sdmac->per_address = dmaengine_cfg->dst_addr; sdmac->per_address = dmaengine_cfg->dst_addr;
@ -1610,11 +1820,16 @@ static int sdma_config_write(struct dma_chan *chan,
sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) & sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
SDMA_WATERMARK_LEVEL_HWML; SDMA_WATERMARK_LEVEL_HWML;
sdmac->word_size = dmaengine_cfg->dst_addr_width; sdmac->word_size = dmaengine_cfg->dst_addr_width;
} else if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) {
sdmac->per_address = dmaengine_cfg->dst_addr;
sdmac->per_address2 = dmaengine_cfg->src_addr;
sdmac->watermark_level = 0;
} else { } else {
sdmac->per_address = dmaengine_cfg->dst_addr; sdmac->per_address = dmaengine_cfg->dst_addr;
sdmac->watermark_level = dmaengine_cfg->dst_maxburst * sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
dmaengine_cfg->dst_addr_width; dmaengine_cfg->dst_addr_width;
sdmac->word_size = dmaengine_cfg->dst_addr_width; sdmac->word_size = dmaengine_cfg->dst_addr_width;
sdmac->fifo_num = dmaengine_cfg->dst_fifo_num;
} }
sdmac->direction = direction; sdmac->direction = direction;
return sdma_config_channel(chan); return sdma_config_channel(chan);
@ -1628,11 +1843,9 @@ static int sdma_config(struct dma_chan *chan,
memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
/* Set ENBLn earlier to make sure dma request triggered after that */ /* Set ENBLn earlier to make sure dma request triggered after that */
if (sdmac->event_id0) { if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) return -EINVAL;
return -EINVAL; sdma_event_enable(sdmac, sdmac->event_id0);
sdma_event_enable(sdmac, sdmac->event_id0);
}
if (sdmac->event_id1) { if (sdmac->event_id1) {
if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
@ -1693,8 +1906,8 @@ static void sdma_issue_pending(struct dma_chan *chan)
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 45
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 42 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 46
static void sdma_add_scripts(struct sdma_engine *sdma, static void sdma_add_scripts(struct sdma_engine *sdma,
const struct sdma_script_start_addrs *addr) const struct sdma_script_start_addrs *addr)
@ -1728,12 +1941,21 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
unsigned short *ram_code; unsigned short *ram_code;
if (!fw) { if (!fw) {
dev_info(sdma->dev, "external firmware not found, using ROM firmware\n"); /* Load firmware once more time if timeout */
/* In this case we just use the ROM firmware. */ if (sdma->fw_fail)
dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
else {
request_firmware_nowait(THIS_MODULE,
FW_ACTION_HOTPLUG, sdma->fw_name,
sdma->dev, GFP_KERNEL, sdma,
sdma_load_firmware);
sdma->fw_fail++;
}
return; return;
} }
if (fw->size < sizeof(*header)) if (fw->size < sizeof(*header) || sdma->fw_loaded)
goto err_firmware; goto err_firmware;
header = (struct sdma_firmware_header *)fw->data; header = (struct sdma_firmware_header *)fw->data;
@ -1762,6 +1984,7 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
addr = (void *)header + header->script_addrs_start; addr = (void *)header + header->script_addrs_start;
ram_code = (void *)header + header->ram_code_start; ram_code = (void *)header + header->ram_code_start;
sdma->ram_code_start = header->ram_code_start;
clk_enable(sdma->clk_ipg); clk_enable(sdma->clk_ipg);
clk_enable(sdma->clk_ahb); clk_enable(sdma->clk_ahb);
@ -1774,6 +1997,8 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
sdma_add_scripts(sdma, addr); sdma_add_scripts(sdma, addr);
sdma->fw_loaded = true;
dev_info(sdma->dev, "loaded firmware %d.%d\n", dev_info(sdma->dev, "loaded firmware %d.%d\n",
header->version_major, header->version_major,
header->version_minor); header->version_minor);
@ -1862,7 +2087,7 @@ static int sdma_get_firmware(struct sdma_engine *sdma,
static int sdma_init(struct sdma_engine *sdma) static int sdma_init(struct sdma_engine *sdma)
{ {
int i, ret; int i, ret, ccbsize;
dma_addr_t ccb_phys; dma_addr_t ccb_phys;
ret = clk_enable(sdma->clk_ipg); ret = clk_enable(sdma->clk_ipg);
@ -1879,11 +2104,15 @@ static int sdma_init(struct sdma_engine *sdma)
/* Be sure SDMA has not started yet */ /* Be sure SDMA has not started yet */
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
sdma->channel_control = dma_alloc_coherent(sdma->dev, ccbsize = MAX_DMA_CHANNELS * (sizeof(struct sdma_channel_control)
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + + sizeof(struct sdma_context_data));
sizeof(struct sdma_context_data),
&ccb_phys, GFP_KERNEL);
if (sdma->iram_pool)
sdma->channel_control = gen_pool_dma_alloc(sdma->iram_pool,
ccbsize, &ccb_phys);
else
sdma->channel_control = dma_alloc_coherent(sdma->dev, ccbsize,
&ccb_phys, GFP_KERNEL);
if (!sdma->channel_control) { if (!sdma->channel_control) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_dma_alloc; goto err_dma_alloc;
@ -1959,9 +2188,14 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
if (dma_spec->args_count != 3) if (dma_spec->args_count != 3)
return NULL; return NULL;
memset(&data, 0, sizeof(data));
data.dma_request = dma_spec->args[0]; data.dma_request = dma_spec->args[0];
data.peripheral_type = dma_spec->args[1]; data.peripheral_type = dma_spec->args[1];
data.priority = dma_spec->args[2]; /* Get sw_done setting if sw_done enabled */
if (dma_spec->args[2] & BIT(31))
data.done_sel = dma_spec->args[2];
data.priority = dma_spec->args[2] & 0xff;
/* /*
* init dma_request2 to zero, which is not used by the dts. * init dma_request2 to zero, which is not used by the dts.
* For P2P, dma_request2 is init from dma_request_channel(), * For P2P, dma_request2 is init from dma_request_channel(),
@ -2137,6 +2371,10 @@ static int sdma_probe(struct platform_device *pdev)
sdma->spba_end_addr = spba_res.end; sdma->spba_end_addr = spba_res.end;
} }
of_node_put(spba_bus); of_node_put(spba_bus);
sdma->iram_pool = of_gen_pool_get(np, "iram", 0);
if (sdma->iram_pool)
dev_info(&pdev->dev, "alloc bd from iram. \n");
} }
/* /*
@ -2166,6 +2404,8 @@ static int sdma_probe(struct platform_device *pdev)
} }
} }
sdma->fw_name = fw_name;
return 0; return 0;
err_register: err_register:
@ -2201,10 +2441,126 @@ static int sdma_remove(struct platform_device *pdev)
return 0; return 0;
} }
#ifdef CONFIG_PM_SLEEP
static int sdma_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct sdma_engine *sdma = platform_get_drvdata(pdev);
int i, ret = 0;
/* Do nothing if not i.MX6SX or i.MX7D*/
if (sdma->drvdata != &sdma_imx6sx && sdma->drvdata != &sdma_imx7d
&& sdma->drvdata != &sdma_imx6ul)
return 0;
clk_enable(sdma->clk_ipg);
clk_enable(sdma->clk_ahb);
ret = sdma_save_restore_context(sdma, true);
if (ret) {
dev_err(sdma->dev, "save context error!\n");
return ret;
}
/* save regs */
for (i = 0; i < MXC_SDMA_SAVED_REG_NUM; i++) {
/*
* 0x78(SDMA_XTRIG_CONF2+4)~0x100(SDMA_CHNPRI_O) registers are
* reserved and can't be touched. Skip these regs.
*/
if (i > SDMA_XTRIG_CONF2 / 4)
sdma->save_regs[i] = readl_relaxed(sdma->regs +
MXC_SDMA_RESERVED_REG
+ 4 * i);
else
sdma->save_regs[i] = readl_relaxed(sdma->regs + 4 * i);
}
clk_disable(sdma->clk_ipg);
clk_disable(sdma->clk_ahb);
return 0;
}
static int sdma_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct sdma_engine *sdma = platform_get_drvdata(pdev);
unsigned long timeout = jiffies + msecs_to_jiffies(2);
int i, ret;
/* Do nothing if not i.MX6SX or i.MX7D*/
if (sdma->drvdata != &sdma_imx6sx && sdma->drvdata != &sdma_imx7d
&& sdma->drvdata != &sdma_imx6ul)
return 0;
clk_enable(sdma->clk_ipg);
clk_enable(sdma->clk_ahb);
/* Do nothing if mega/fast mix not turned off */
if (readl_relaxed(sdma->regs + SDMA_H_C0PTR)) {
clk_disable(sdma->clk_ipg);
clk_disable(sdma->clk_ahb);
return 0;
}
/* Firmware was lost, mark as "not ready" */
sdma->fw_loaded = false;
/* restore regs and load firmware */
for (i = 0; i < MXC_SDMA_SAVED_REG_NUM; i++) {
/*
* 0x78(SDMA_XTRIG_CONF2+4)~0x100(SDMA_CHNPRI_O) registers are
* reserved and can't be touched. Skip these regs.
*/
if (i > SDMA_XTRIG_CONF2 / 4)
writel_relaxed(sdma->save_regs[i], sdma->regs +
MXC_SDMA_RESERVED_REG + 4 * i);
/* set static context switch mode before channel0 running */
else if (i == SDMA_H_CONFIG / 4)
writel_relaxed(sdma->save_regs[i] & ~SDMA_H_CONFIG_CSM,
sdma->regs + SDMA_H_CONFIG);
else
writel_relaxed(sdma->save_regs[i], sdma->regs + 4 * i);
}
/* prepare priority for channel0 to start */
sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
ret = sdma_get_firmware(sdma, sdma->fw_name);
if (ret) {
dev_warn(&pdev->dev, "failed to get firmware\n");
goto out;
}
/* wait firmware loaded */
do {
if (time_after(jiffies, timeout)) {
dev_warn(&pdev->dev, "failed to load firmware\n");
break;
}
usleep_range(50, 500);
} while (!sdma->fw_loaded);
ret = sdma_save_restore_context(sdma, false);
if (ret) {
dev_err(sdma->dev, "restore context error!\n");
goto out;
}
out:
clk_disable(sdma->clk_ipg);
clk_disable(sdma->clk_ahb);
return ret;
}
#endif
static const struct dev_pm_ops sdma_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS(sdma_suspend, sdma_resume)
};
static struct platform_driver sdma_driver = { static struct platform_driver sdma_driver = {
.driver = { .driver = {
.name = "imx-sdma", .name = "imx-sdma",
.of_match_table = sdma_dt_ids, .of_match_table = sdma_dt_ids,
.pm = &sdma_pm_ops,
}, },
.id_table = sdma_devtypes, .id_table = sdma_devtypes,
.remove = sdma_remove, .remove = sdma_remove,

View File

@ -729,6 +729,12 @@ static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_irq; int chan_irq;
if (strcmp(chan->device->dev->driver->name, "mxs-dma"))
return false;
if (!mxs_dma)
return false;
if (chan->chan_id != param->chan_id) if (chan->chan_id != param->chan_id)
return false; return false;
@ -760,7 +766,7 @@ static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
ofdma->of_node); ofdma->of_node);
} }
static int __init mxs_dma_probe(struct platform_device *pdev) static int mxs_dma_probe(struct platform_device *pdev)
{ {
struct device_node *np = pdev->dev.of_node; struct device_node *np = pdev->dev.of_node;
const struct platform_device_id *id_entry; const struct platform_device_id *id_entry;
@ -869,10 +875,6 @@ static struct platform_driver mxs_dma_driver = {
.of_match_table = mxs_dma_dt_ids, .of_match_table = mxs_dma_dt_ids,
}, },
.id_table = mxs_dma_ids, .id_table = mxs_dma_ids,
.probe = mxs_dma_probe,
}; };
module_platform_driver(mxs_dma_driver);
static int __init mxs_dma_module_init(void)
{
return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
}
subsys_initcall(mxs_dma_module_init);

View File

@ -333,6 +333,8 @@ enum dma_slave_buswidth {
* loops in this area in order to transfer the data. * loops in this area in order to transfer the data.
* @dst_port_window_size: same as src_port_window_size but for the destination * @dst_port_window_size: same as src_port_window_size but for the destination
* port. * port.
* @src_fifo_num: bit 0-7 is the fifo number, bit:8-11 is the fifo offset;
* @dst_fifo_num: same as src_fifo_num
* @device_fc: Flow Controller Settings. Only valid for slave channels. Fill * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
* with 'true' if peripheral should be flow controller. Direction will be * with 'true' if peripheral should be flow controller. Direction will be
* selected at Runtime. * selected at Runtime.
@ -362,6 +364,8 @@ struct dma_slave_config {
u32 dst_maxburst; u32 dst_maxburst;
u32 src_port_window_size; u32 src_port_window_size;
u32 dst_port_window_size; u32 dst_port_window_size;
u32 src_fifo_num;
u32 dst_fifo_num;
bool device_fc; bool device_fc;
unsigned int slave_id; unsigned int slave_id;
}; };

View File

@ -20,12 +20,12 @@ struct sdma_script_start_addrs {
s32 per_2_firi_addr; s32 per_2_firi_addr;
s32 mcu_2_firi_addr; s32 mcu_2_firi_addr;
s32 uart_2_per_addr; s32 uart_2_per_addr;
s32 uart_2_mcu_addr; s32 uart_2_mcu_ram_addr;
s32 per_2_app_addr; s32 per_2_app_addr;
s32 mcu_2_app_addr; s32 mcu_2_app_addr;
s32 per_2_per_addr; s32 per_2_per_addr;
s32 uartsh_2_per_addr; s32 uartsh_2_per_addr;
s32 uartsh_2_mcu_addr; s32 uartsh_2_mcu_ram_addr;
s32 per_2_shp_addr; s32 per_2_shp_addr;
s32 mcu_2_shp_addr; s32 mcu_2_shp_addr;
s32 ata_2_mcu_addr; s32 ata_2_mcu_addr;
@ -52,6 +52,10 @@ struct sdma_script_start_addrs {
s32 zcanfd_2_mcu_addr; s32 zcanfd_2_mcu_addr;
s32 zqspi_2_mcu_addr; s32 zqspi_2_mcu_addr;
s32 mcu_2_ecspi_addr; s32 mcu_2_ecspi_addr;
s32 mcu_2_sai_addr;
s32 sai_2_mcu_addr;
s32 uart_2_mcu_addr;
s32 uartsh_2_mcu_addr;
/* End of v3 array */ /* End of v3 array */
s32 mcu_2_zqspi_addr; s32 mcu_2_zqspi_addr;
/* End of v4 array */ /* End of v4 array */

View File

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* /*
* Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright 2004-2015 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright 2018 NXP.
*/ */
#ifndef __ASM_ARCH_MXC_DMA_H__ #ifndef __ASM_ARCH_MXC_DMA_H__
@ -39,6 +40,8 @@ enum sdma_peripheral_type {
IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
IMX_DMATYPE_ASRC_SP, /* Shared ASRC */ IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
IMX_DMATYPE_SAI, /* SAI */ IMX_DMATYPE_SAI, /* SAI */
IMX_DMATYPE_MULTI_SAI, /* MULTI FIFOs For Audio */
IMX_DMATYPE_HDMI, /* HDMI Audio */
}; };
enum imx_dma_prio { enum imx_dma_prio {
@ -52,6 +55,9 @@ struct imx_dma_data {
int dma_request2; /* secondary DMA request line */ int dma_request2; /* secondary DMA request line */
enum sdma_peripheral_type peripheral_type; enum sdma_peripheral_type peripheral_type;
int priority; int priority;
bool src_dualfifo;
bool dst_dualfifo;
int done_sel;
}; };
static inline int imx_dma_is_ipu(struct dma_chan *chan) static inline int imx_dma_is_ipu(struct dma_chan *chan)