Merge branch 'topic/st_fdma' into for-linus

This commit is contained in:
Vinod Koul 2016-12-14 09:07:07 +05:30
commit 4625d2a513
11 changed files with 1688 additions and 9 deletions

View file

@ -0,0 +1,87 @@
* STMicroelectronics Flexible Direct Memory Access Device Tree bindings
The FDMA is a general-purpose direct memory access controller capable of
supporting 16 independent DMA channels. It accepts up to 32 DMA requests.
The FDMA is based on a Slim processor which requires a firmware.
* FDMA Controller
Required properties:
- compatible : Should be one of
- st,stih407-fdma-mpe31-11, "st,slim-rproc";
- st,stih407-fdma-mpe31-12, "st,slim-rproc";
- st,stih407-fdma-mpe31-13, "st,slim-rproc";
- reg : Should contain an entry for each name in reg-names
- reg-names : Must contain "slimcore", "dmem", "peripherals", "imem" entries
- interrupts : Should contain one interrupt shared by all channels
- dma-channels : Number of channels supported by the controller
- #dma-cells : Must be <3>. See DMA client section below
- clocks : Must contain an entry for each clock
See: Documentation/devicetree/bindings/clock/clock-bindings.txt
Example:
fdma0: dma-controller@8e20000 {
compatible = "st,stih407-fdma-mpe31-11", "st,slim-rproc";
reg = <0x8e20000 0x8000>,
<0x8e30000 0x3000>,
<0x8e37000 0x1000>,
<0x8e38000 0x8000>;
reg-names = "slimcore", "dmem", "peripherals", "imem";
clocks = <&clk_s_c0_flexgen CLK_FDMA>,
<&clk_s_c0_flexgen CLK_EXT2F_A9>,
<&clk_s_c0_flexgen CLK_EXT2F_A9>,
<&clk_s_c0_flexgen CLK_EXT2F_A9>;
interrupts = <GIC_SPI 5 IRQ_TYPE_NONE>;
dma-channels = <16>;
#dma-cells = <3>;
};
* DMA client
Required properties:
- dmas: Comma separated list of dma channel requests
- dma-names: Names of the aforementioned requested channels
Each dmas request consists of 4 cells:
1. A phandle pointing to the FDMA controller
2. The request line number
3. A 32bit mask specifying (see include/linux/platform_data/dma-st-fdma.h)
-bit 2-0: Holdoff value, dreq will be masked for
0x0: 0-0.5us
0x1: 0.5-1us
0x2: 1-1.5us
-bit 17: data swap
0x0: disabled
0x1: enabled
-bit 21: Increment Address
0x0: no address increment between transfers
0x1: increment address between transfers
-bit 22: 2 STBus Initiator Coprocessor interface
0x0: high priority port
0x1: low priority port
4. transfers type
0 free running
1 paced
Example:
sti_uni_player2: sti-uni-player@2 {
compatible = "st,sti-uni-player";
status = "disabled";
#sound-dai-cells = <0>;
st,syscfg = <&syscfg_core>;
clocks = <&clk_s_d0_flexgen CLK_PCM_2>;
assigned-clocks = <&clk_s_d0_flexgen CLK_PCM_2>;
assigned-clock-parents = <&clk_s_d0_quadfs 2>;
assigned-clock-rates = <50000000>;
reg = <0x8D82000 0x158>;
interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
dmas = <&fdma0 4 0 1>;
dai-name = "Uni Player #1 (DAC)";
dma-names = "tx";
st,uniperiph-id = <2>;
st,version = <5>;
st,mode = "PCM";
};

View file

@ -1774,6 +1774,7 @@ F: drivers/char/hw_random/st-rng.c
F: drivers/clocksource/arm_global_timer.c
F: drivers/clocksource/clksrc_st_lpc.c
F: drivers/cpufreq/sti-cpufreq.c
F: drivers/dma/st_fdma*
F: drivers/i2c/busses/i2c-st.c
F: drivers/media/rc/st_rc.c
F: drivers/media/platform/sti/c8sectpfe/
@ -1784,6 +1785,7 @@ F: drivers/phy/phy-stih407-usb.c
F: drivers/phy/phy-stih41x-usb.c
F: drivers/pinctrl/pinctrl-st.c
F: drivers/remoteproc/st_remoteproc.c
F: drivers/remoteproc/st_slim_rproc.c
F: drivers/reset/sti/
F: drivers/rtc/rtc-st-lpc.c
F: drivers/tty/serial/st-asc.c
@ -1792,6 +1794,7 @@ F: drivers/usb/host/ehci-st.c
F: drivers/usb/host/ohci-st.c
F: drivers/watchdog/st_lpc_wdt.c
F: drivers/ata/ahci_st.c
F: include/linux/remoteproc/st_slim_rproc.h
ARM/STM32 ARCHITECTURE
M: Maxime Coquelin <mcoquelin.stm32@gmail.com>

View file

@ -649,6 +649,9 @@ CONFIG_SND_SOC_AK4642=m
CONFIG_SND_SOC_SGTL5000=m
CONFIG_SND_SOC_SPDIF=m
CONFIG_SND_SOC_WM8978=m
CONFIG_SND_SOC_STI=m
CONFIG_SND_SOC_STI_SAS=m
CONFIG_SND_SIMPLE_CARD=m
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_MVEBU=y
@ -790,6 +793,7 @@ CONFIG_DMA_OMAP=y
CONFIG_QCOM_BAM_DMA=y
CONFIG_XILINX_DMA=y
CONFIG_DMA_SUN6I=y
CONFIG_ST_FDMA=m
CONFIG_STAGING=y
CONFIG_SENSORS_ISL29018=y
CONFIG_SENSORS_ISL29028=y
@ -823,6 +827,8 @@ CONFIG_HWSPINLOCK_QCOM=y
CONFIG_ROCKCHIP_IOMMU=y
CONFIG_TEGRA_IOMMU_GART=y
CONFIG_TEGRA_IOMMU_SMMU=y
CONFIG_REMOTEPROC=m
CONFIG_ST_REMOTEPROC=m
CONFIG_PM_DEVFREQ=y
CONFIG_ARM_TEGRA_DEVFREQ=m
CONFIG_MEMORY=y

View file

@ -435,6 +435,20 @@ config STE_DMA40
help
Support for ST-Ericsson DMA40 controller
config ST_FDMA
tristate "ST FDMA dmaengine support"
depends on ARCH_STI
depends on REMOTEPROC
select ST_SLIM_REMOTEPROC
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
Enable support for ST FDMA controller.
It supports 16 independent DMA channels, accepts up to 32 DMA requests
Say Y here if you have such a chipset.
If unsure, say N.
config STM32_DMA
bool "STMicroelectronics STM32 DMA support"
depends on ARCH_STM32 || COMPILE_TEST

View file

@ -67,6 +67,7 @@ obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
obj-$(CONFIG_ST_FDMA) += st_fdma.o
obj-y += qcom/
obj-y += xilinx/

889
drivers/dma/st_fdma.c Normal file
View file

@ -0,0 +1,889 @@
/*
* DMA driver for STMicroelectronics STi FDMA controller
*
* Copyright (C) 2014 STMicroelectronics
*
* Author: Ludovic Barre <Ludovic.barre@st.com>
* Peter Griffin <peter.griffin@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/remoteproc.h>
#include "st_fdma.h"
static inline struct st_fdma_chan *to_st_fdma_chan(struct dma_chan *c)
{
return container_of(c, struct st_fdma_chan, vchan.chan);
}
static struct st_fdma_desc *to_st_fdma_desc(struct virt_dma_desc *vd)
{
return container_of(vd, struct st_fdma_desc, vdesc);
}
static int st_fdma_dreq_get(struct st_fdma_chan *fchan)
{
struct st_fdma_dev *fdev = fchan->fdev;
u32 req_line_cfg = fchan->cfg.req_line;
u32 dreq_line;
int try = 0;
/*
* dreq_mask is shared for n channels of fdma, so all accesses must be
* atomic. if the dreq_mask is changed between ffz and set_bit,
* we retry
*/
do {
if (fdev->dreq_mask == ~0L) {
dev_err(fdev->dev, "No req lines available\n");
return -EINVAL;
}
if (try || req_line_cfg >= ST_FDMA_NR_DREQS) {
dev_err(fdev->dev, "Invalid or used req line\n");
return -EINVAL;
} else {
dreq_line = req_line_cfg;
}
try++;
} while (test_and_set_bit(dreq_line, &fdev->dreq_mask));
dev_dbg(fdev->dev, "get dreq_line:%d mask:%#lx\n",
dreq_line, fdev->dreq_mask);
return dreq_line;
}
static void st_fdma_dreq_put(struct st_fdma_chan *fchan)
{
struct st_fdma_dev *fdev = fchan->fdev;
dev_dbg(fdev->dev, "put dreq_line:%#x\n", fchan->dreq_line);
clear_bit(fchan->dreq_line, &fdev->dreq_mask);
}
static void st_fdma_xfer_desc(struct st_fdma_chan *fchan)
{
struct virt_dma_desc *vdesc;
unsigned long nbytes, ch_cmd, cmd;
vdesc = vchan_next_desc(&fchan->vchan);
if (!vdesc)
return;
fchan->fdesc = to_st_fdma_desc(vdesc);
nbytes = fchan->fdesc->node[0].desc->nbytes;
cmd = FDMA_CMD_START(fchan->vchan.chan.chan_id);
ch_cmd = fchan->fdesc->node[0].pdesc | FDMA_CH_CMD_STA_START;
/* start the channel for the descriptor */
fnode_write(fchan, nbytes, FDMA_CNTN_OFST);
fchan_write(fchan, ch_cmd, FDMA_CH_CMD_OFST);
writel(cmd,
fchan->fdev->slim_rproc->peri + FDMA_CMD_SET_OFST);
dev_dbg(fchan->fdev->dev, "start chan:%d\n", fchan->vchan.chan.chan_id);
}
static void st_fdma_ch_sta_update(struct st_fdma_chan *fchan,
unsigned long int_sta)
{
unsigned long ch_sta, ch_err;
int ch_id = fchan->vchan.chan.chan_id;
struct st_fdma_dev *fdev = fchan->fdev;
ch_sta = fchan_read(fchan, FDMA_CH_CMD_OFST);
ch_err = ch_sta & FDMA_CH_CMD_ERR_MASK;
ch_sta &= FDMA_CH_CMD_STA_MASK;
if (int_sta & FDMA_INT_STA_ERR) {
dev_warn(fdev->dev, "chan:%d, error:%ld\n", ch_id, ch_err);
fchan->status = DMA_ERROR;
return;
}
switch (ch_sta) {
case FDMA_CH_CMD_STA_PAUSED:
fchan->status = DMA_PAUSED;
break;
case FDMA_CH_CMD_STA_RUNNING:
fchan->status = DMA_IN_PROGRESS;
break;
}
}
static irqreturn_t st_fdma_irq_handler(int irq, void *dev_id)
{
struct st_fdma_dev *fdev = dev_id;
irqreturn_t ret = IRQ_NONE;
struct st_fdma_chan *fchan = &fdev->chans[0];
unsigned long int_sta, clr;
int_sta = fdma_read(fdev, FDMA_INT_STA_OFST);
clr = int_sta;
for (; int_sta != 0 ; int_sta >>= 2, fchan++) {
if (!(int_sta & (FDMA_INT_STA_CH | FDMA_INT_STA_ERR)))
continue;
spin_lock(&fchan->vchan.lock);
st_fdma_ch_sta_update(fchan, int_sta);
if (fchan->fdesc) {
if (!fchan->fdesc->iscyclic) {
list_del(&fchan->fdesc->vdesc.node);
vchan_cookie_complete(&fchan->fdesc->vdesc);
fchan->fdesc = NULL;
fchan->status = DMA_COMPLETE;
} else {
vchan_cyclic_callback(&fchan->fdesc->vdesc);
}
/* Start the next descriptor (if available) */
if (!fchan->fdesc)
st_fdma_xfer_desc(fchan);
}
spin_unlock(&fchan->vchan.lock);
ret = IRQ_HANDLED;
}
fdma_write(fdev, clr, FDMA_INT_CLR_OFST);
return ret;
}
static struct dma_chan *st_fdma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct st_fdma_dev *fdev = ofdma->of_dma_data;
struct dma_chan *chan;
struct st_fdma_chan *fchan;
int ret;
if (dma_spec->args_count < 1)
return ERR_PTR(-EINVAL);
if (fdev->dma_device.dev->of_node != dma_spec->np)
return ERR_PTR(-EINVAL);
ret = rproc_boot(fdev->slim_rproc->rproc);
if (ret == -ENOENT)
return ERR_PTR(-EPROBE_DEFER);
else if (ret)
return ERR_PTR(ret);
chan = dma_get_any_slave_channel(&fdev->dma_device);
if (!chan)
goto err_chan;
fchan = to_st_fdma_chan(chan);
fchan->cfg.of_node = dma_spec->np;
fchan->cfg.req_line = dma_spec->args[0];
fchan->cfg.req_ctrl = 0;
fchan->cfg.type = ST_FDMA_TYPE_FREE_RUN;
if (dma_spec->args_count > 1)
fchan->cfg.req_ctrl = dma_spec->args[1]
& FDMA_REQ_CTRL_CFG_MASK;
if (dma_spec->args_count > 2)
fchan->cfg.type = dma_spec->args[2];
if (fchan->cfg.type == ST_FDMA_TYPE_FREE_RUN) {
fchan->dreq_line = 0;
} else {
fchan->dreq_line = st_fdma_dreq_get(fchan);
if (IS_ERR_VALUE(fchan->dreq_line)) {
chan = ERR_PTR(fchan->dreq_line);
goto err_chan;
}
}
dev_dbg(fdev->dev, "xlate req_line:%d type:%d req_ctrl:%#lx\n",
fchan->cfg.req_line, fchan->cfg.type, fchan->cfg.req_ctrl);
return chan;
err_chan:
rproc_shutdown(fdev->slim_rproc->rproc);
return chan;
}
static void st_fdma_free_desc(struct virt_dma_desc *vdesc)
{
struct st_fdma_desc *fdesc;
int i;
fdesc = to_st_fdma_desc(vdesc);
for (i = 0; i < fdesc->n_nodes; i++)
dma_pool_free(fdesc->fchan->node_pool, fdesc->node[i].desc,
fdesc->node[i].pdesc);
kfree(fdesc);
}
static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan,
int sg_len)
{
struct st_fdma_desc *fdesc;
int i;
fdesc = kzalloc(sizeof(*fdesc) +
sizeof(struct st_fdma_sw_node) * sg_len, GFP_NOWAIT);
if (!fdesc)
return NULL;
fdesc->fchan = fchan;
fdesc->n_nodes = sg_len;
for (i = 0; i < sg_len; i++) {
fdesc->node[i].desc = dma_pool_alloc(fchan->node_pool,
GFP_NOWAIT, &fdesc->node[i].pdesc);
if (!fdesc->node[i].desc)
goto err;
}
return fdesc;
err:
while (--i >= 0)
dma_pool_free(fchan->node_pool, fdesc->node[i].desc,
fdesc->node[i].pdesc);
kfree(fdesc);
return NULL;
}
static int st_fdma_alloc_chan_res(struct dma_chan *chan)
{
struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
/* Create the dma pool for descriptor allocation */
fchan->node_pool = dma_pool_create(dev_name(&chan->dev->device),
fchan->fdev->dev,
sizeof(struct st_fdma_hw_node),
__alignof__(struct st_fdma_hw_node),
0);
if (!fchan->node_pool) {
dev_err(fchan->fdev->dev, "unable to allocate desc pool\n");
return -ENOMEM;
}
dev_dbg(fchan->fdev->dev, "alloc ch_id:%d type:%d\n",
fchan->vchan.chan.chan_id, fchan->cfg.type);
return 0;
}
static void st_fdma_free_chan_res(struct dma_chan *chan)
{
struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
struct rproc *rproc = fchan->fdev->slim_rproc->rproc;
unsigned long flags;
LIST_HEAD(head);
dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n",
__func__, fchan->vchan.chan.chan_id);
if (fchan->cfg.type != ST_FDMA_TYPE_FREE_RUN)
st_fdma_dreq_put(fchan);
spin_lock_irqsave(&fchan->vchan.lock, flags);
fchan->fdesc = NULL;
spin_unlock_irqrestore(&fchan->vchan.lock, flags);
dma_pool_destroy(fchan->node_pool);
fchan->node_pool = NULL;
memset(&fchan->cfg, 0, sizeof(struct st_fdma_cfg));
rproc_shutdown(rproc);
}
static struct dma_async_tx_descriptor *st_fdma_prep_dma_memcpy(
struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
size_t len, unsigned long flags)
{
struct st_fdma_chan *fchan;
struct st_fdma_desc *fdesc;
struct st_fdma_hw_node *hw_node;
if (!len)
return NULL;
fchan = to_st_fdma_chan(chan);
/* We only require a single descriptor */
fdesc = st_fdma_alloc_desc(fchan, 1);
if (!fdesc) {
dev_err(fchan->fdev->dev, "no memory for desc\n");
return NULL;
}
hw_node = fdesc->node[0].desc;
hw_node->next = 0;
hw_node->control = FDMA_NODE_CTRL_REQ_MAP_FREE_RUN;
hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
hw_node->control |= FDMA_NODE_CTRL_INT_EON;
hw_node->nbytes = len;
hw_node->saddr = src;
hw_node->daddr = dst;
hw_node->generic.length = len;
hw_node->generic.sstride = 0;
hw_node->generic.dstride = 0;
return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
}
static int config_reqctrl(struct st_fdma_chan *fchan,
enum dma_transfer_direction direction)
{
u32 maxburst = 0, addr = 0;
enum dma_slave_buswidth width;
int ch_id = fchan->vchan.chan.chan_id;
struct st_fdma_dev *fdev = fchan->fdev;
switch (direction) {
case DMA_DEV_TO_MEM:
fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_WNR;
maxburst = fchan->scfg.src_maxburst;
width = fchan->scfg.src_addr_width;
addr = fchan->scfg.src_addr;
break;
case DMA_MEM_TO_DEV:
fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_WNR;
maxburst = fchan->scfg.dst_maxburst;
width = fchan->scfg.dst_addr_width;
addr = fchan->scfg.dst_addr;
break;
default:
return -EINVAL;
}
fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_OPCODE_MASK;
switch (width) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST1;
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST2;
break;
case DMA_SLAVE_BUSWIDTH_4_BYTES:
fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST4;
break;
case DMA_SLAVE_BUSWIDTH_8_BYTES:
fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST8;
break;
default:
return -EINVAL;
}
fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_NUM_OPS_MASK;
fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_NUM_OPS(maxburst-1);
dreq_write(fchan, fchan->cfg.req_ctrl, FDMA_REQ_CTRL_OFST);
fchan->cfg.dev_addr = addr;
fchan->cfg.dir = direction;
dev_dbg(fdev->dev, "chan:%d config_reqctrl:%#x req_ctrl:%#lx\n",
ch_id, addr, fchan->cfg.req_ctrl);
return 0;
}
static void fill_hw_node(struct st_fdma_hw_node *hw_node,
struct st_fdma_chan *fchan,
enum dma_transfer_direction direction)
{
if (direction == DMA_MEM_TO_DEV) {
hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
hw_node->control |= FDMA_NODE_CTRL_DST_STATIC;
hw_node->daddr = fchan->cfg.dev_addr;
} else {
hw_node->control |= FDMA_NODE_CTRL_SRC_STATIC;
hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
hw_node->saddr = fchan->cfg.dev_addr;
}
hw_node->generic.sstride = 0;
hw_node->generic.dstride = 0;
}
static inline struct st_fdma_chan *st_fdma_prep_common(struct dma_chan *chan,
size_t len, enum dma_transfer_direction direction)
{
struct st_fdma_chan *fchan;
if (!chan || !len)
return NULL;
fchan = to_st_fdma_chan(chan);
if (!is_slave_direction(direction)) {
dev_err(fchan->fdev->dev, "bad direction?\n");
return NULL;
}
return fchan;
}
static struct dma_async_tx_descriptor *st_fdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t len,
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct st_fdma_chan *fchan;
struct st_fdma_desc *fdesc;
int sg_len, i;
fchan = st_fdma_prep_common(chan, len, direction);
if (!fchan)
return NULL;
if (!period_len)
return NULL;
if (config_reqctrl(fchan, direction)) {
dev_err(fchan->fdev->dev, "bad width or direction\n");
return NULL;
}
/* the buffer length must be a multiple of period_len */
if (len % period_len != 0) {
dev_err(fchan->fdev->dev, "len is not multiple of period\n");
return NULL;
}
sg_len = len / period_len;
fdesc = st_fdma_alloc_desc(fchan, sg_len);
if (!fdesc) {
dev_err(fchan->fdev->dev, "no memory for desc\n");
return NULL;
}
fdesc->iscyclic = true;
for (i = 0; i < sg_len; i++) {
struct st_fdma_hw_node *hw_node = fdesc->node[i].desc;
hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
hw_node->control =
FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
hw_node->control |= FDMA_NODE_CTRL_INT_EON;
fill_hw_node(hw_node, fchan, direction);
if (direction == DMA_MEM_TO_DEV)
hw_node->saddr = buf_addr + (i * period_len);
else
hw_node->daddr = buf_addr + (i * period_len);
hw_node->nbytes = period_len;
hw_node->generic.length = period_len;
}
return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
}
static struct dma_async_tx_descriptor *st_fdma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags, void *context)
{
struct st_fdma_chan *fchan;
struct st_fdma_desc *fdesc;
struct st_fdma_hw_node *hw_node;
struct scatterlist *sg;
int i;
fchan = st_fdma_prep_common(chan, sg_len, direction);
if (!fchan)
return NULL;
if (!sgl)
return NULL;
fdesc = st_fdma_alloc_desc(fchan, sg_len);
if (!fdesc) {
dev_err(fchan->fdev->dev, "no memory for desc\n");
return NULL;
}
fdesc->iscyclic = false;
for_each_sg(sgl, sg, sg_len, i) {
hw_node = fdesc->node[i].desc;
hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
hw_node->control = FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
fill_hw_node(hw_node, fchan, direction);
if (direction == DMA_MEM_TO_DEV)
hw_node->saddr = sg_dma_address(sg);
else
hw_node->daddr = sg_dma_address(sg);
hw_node->nbytes = sg_dma_len(sg);
hw_node->generic.length = sg_dma_len(sg);
}
/* interrupt at end of last node */
hw_node->control |= FDMA_NODE_CTRL_INT_EON;
return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
}
static size_t st_fdma_desc_residue(struct st_fdma_chan *fchan,
struct virt_dma_desc *vdesc,
bool in_progress)
{
struct st_fdma_desc *fdesc = fchan->fdesc;
size_t residue = 0;
dma_addr_t cur_addr = 0;
int i;
if (in_progress) {
cur_addr = fchan_read(fchan, FDMA_CH_CMD_OFST);
cur_addr &= FDMA_CH_CMD_DATA_MASK;
}
for (i = fchan->fdesc->n_nodes - 1 ; i >= 0; i--) {
if (cur_addr == fdesc->node[i].pdesc) {
residue += fnode_read(fchan, FDMA_CNTN_OFST);
break;
}
residue += fdesc->node[i].desc->nbytes;
}
return residue;
}
static enum dma_status st_fdma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
struct virt_dma_desc *vd;
enum dma_status ret;
unsigned long flags;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_COMPLETE || !txstate)
return ret;
spin_lock_irqsave(&fchan->vchan.lock, flags);
vd = vchan_find_desc(&fchan->vchan, cookie);
if (fchan->fdesc && cookie == fchan->fdesc->vdesc.tx.cookie)
txstate->residue = st_fdma_desc_residue(fchan, vd, true);
else if (vd)
txstate->residue = st_fdma_desc_residue(fchan, vd, false);
else
txstate->residue = 0;
spin_unlock_irqrestore(&fchan->vchan.lock, flags);
return ret;
}
static void st_fdma_issue_pending(struct dma_chan *chan)
{
struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
unsigned long flags;
spin_lock_irqsave(&fchan->vchan.lock, flags);
if (vchan_issue_pending(&fchan->vchan) && !fchan->fdesc)
st_fdma_xfer_desc(fchan);
spin_unlock_irqrestore(&fchan->vchan.lock, flags);
}
static int st_fdma_pause(struct dma_chan *chan)
{
unsigned long flags;
LIST_HEAD(head);
struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
int ch_id = fchan->vchan.chan.chan_id;
unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
dev_dbg(fchan->fdev->dev, "pause chan:%d\n", ch_id);
spin_lock_irqsave(&fchan->vchan.lock, flags);
if (fchan->fdesc)
fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
spin_unlock_irqrestore(&fchan->vchan.lock, flags);
return 0;
}
static int st_fdma_resume(struct dma_chan *chan)
{
unsigned long flags;
unsigned long val;
struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
int ch_id = fchan->vchan.chan.chan_id;
dev_dbg(fchan->fdev->dev, "resume chan:%d\n", ch_id);
spin_lock_irqsave(&fchan->vchan.lock, flags);
if (fchan->fdesc) {
val = fchan_read(fchan, FDMA_CH_CMD_OFST);
val &= FDMA_CH_CMD_DATA_MASK;
fchan_write(fchan, val, FDMA_CH_CMD_OFST);
}
spin_unlock_irqrestore(&fchan->vchan.lock, flags);
return 0;
}
static int st_fdma_terminate_all(struct dma_chan *chan)
{
unsigned long flags;
LIST_HEAD(head);
struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
int ch_id = fchan->vchan.chan.chan_id;
unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
dev_dbg(fchan->fdev->dev, "terminate chan:%d\n", ch_id);
spin_lock_irqsave(&fchan->vchan.lock, flags);
fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
fchan->fdesc = NULL;
vchan_get_all_descriptors(&fchan->vchan, &head);
spin_unlock_irqrestore(&fchan->vchan.lock, flags);
vchan_dma_desc_free_list(&fchan->vchan, &head);
return 0;
}
static int st_fdma_slave_config(struct dma_chan *chan,
struct dma_slave_config *slave_cfg)
{
struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
memcpy(&fchan->scfg, slave_cfg, sizeof(fchan->scfg));
return 0;
}
static const struct st_fdma_driverdata fdma_mpe31_stih407_11 = {
.name = "STiH407",
.id = 0,
};
static const struct st_fdma_driverdata fdma_mpe31_stih407_12 = {
.name = "STiH407",
.id = 1,
};
static const struct st_fdma_driverdata fdma_mpe31_stih407_13 = {
.name = "STiH407",
.id = 2,
};
static const struct of_device_id st_fdma_match[] = {
{ .compatible = "st,stih407-fdma-mpe31-11"
, .data = &fdma_mpe31_stih407_11 },
{ .compatible = "st,stih407-fdma-mpe31-12"
, .data = &fdma_mpe31_stih407_12 },
{ .compatible = "st,stih407-fdma-mpe31-13"
, .data = &fdma_mpe31_stih407_13 },
{},
};
MODULE_DEVICE_TABLE(of, st_fdma_match);
static int st_fdma_parse_dt(struct platform_device *pdev,
const struct st_fdma_driverdata *drvdata,
struct st_fdma_dev *fdev)
{
snprintf(fdev->fw_name, FW_NAME_SIZE, "fdma_%s_%d.elf",
drvdata->name, drvdata->id);
return of_property_read_u32(pdev->dev.of_node, "dma-channels",
&fdev->nr_channels);
}
#define FDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
static void st_fdma_free(struct st_fdma_dev *fdev)
{
struct st_fdma_chan *fchan;
int i;
for (i = 0; i < fdev->nr_channels; i++) {
fchan = &fdev->chans[i];
list_del(&fchan->vchan.chan.device_node);
tasklet_kill(&fchan->vchan.task);
}
}
static int st_fdma_probe(struct platform_device *pdev)
{
struct st_fdma_dev *fdev;
const struct of_device_id *match;
struct device_node *np = pdev->dev.of_node;
const struct st_fdma_driverdata *drvdata;
int ret, i;
match = of_match_device((st_fdma_match), &pdev->dev);
if (!match || !match->data) {
dev_err(&pdev->dev, "No device match found\n");
return -ENODEV;
}
drvdata = match->data;
fdev = devm_kzalloc(&pdev->dev, sizeof(*fdev), GFP_KERNEL);
if (!fdev)
return -ENOMEM;
ret = st_fdma_parse_dt(pdev, drvdata, fdev);
if (ret) {
dev_err(&pdev->dev, "unable to find platform data\n");
goto err;
}
fdev->chans = devm_kcalloc(&pdev->dev, fdev->nr_channels,
sizeof(struct st_fdma_chan), GFP_KERNEL);
if (!fdev->chans)
return -ENOMEM;
fdev->dev = &pdev->dev;
fdev->drvdata = drvdata;
platform_set_drvdata(pdev, fdev);
fdev->irq = platform_get_irq(pdev, 0);
if (fdev->irq < 0) {
dev_err(&pdev->dev, "Failed to get irq resource\n");
return -EINVAL;
}
ret = devm_request_irq(&pdev->dev, fdev->irq, st_fdma_irq_handler, 0,
dev_name(&pdev->dev), fdev);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq (%d)\n", ret);
goto err;
}
fdev->slim_rproc = st_slim_rproc_alloc(pdev, fdev->fw_name);
if (IS_ERR(fdev->slim_rproc)) {
ret = PTR_ERR(fdev->slim_rproc);
dev_err(&pdev->dev, "slim_rproc_alloc failed (%d)\n", ret);
goto err;
}
/* Initialise list of FDMA channels */
INIT_LIST_HEAD(&fdev->dma_device.channels);
for (i = 0; i < fdev->nr_channels; i++) {
struct st_fdma_chan *fchan = &fdev->chans[i];
fchan->fdev = fdev;
fchan->vchan.desc_free = st_fdma_free_desc;
vchan_init(&fchan->vchan, &fdev->dma_device);
}
/* Initialise the FDMA dreq (reserve 0 & 31 for FDMA use) */
fdev->dreq_mask = BIT(0) | BIT(31);
dma_cap_set(DMA_SLAVE, fdev->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, fdev->dma_device.cap_mask);
dma_cap_set(DMA_MEMCPY, fdev->dma_device.cap_mask);
fdev->dma_device.dev = &pdev->dev;
fdev->dma_device.device_alloc_chan_resources = st_fdma_alloc_chan_res;
fdev->dma_device.device_free_chan_resources = st_fdma_free_chan_res;
fdev->dma_device.device_prep_dma_cyclic = st_fdma_prep_dma_cyclic;
fdev->dma_device.device_prep_slave_sg = st_fdma_prep_slave_sg;
fdev->dma_device.device_prep_dma_memcpy = st_fdma_prep_dma_memcpy;
fdev->dma_device.device_tx_status = st_fdma_tx_status;
fdev->dma_device.device_issue_pending = st_fdma_issue_pending;
fdev->dma_device.device_terminate_all = st_fdma_terminate_all;
fdev->dma_device.device_config = st_fdma_slave_config;
fdev->dma_device.device_pause = st_fdma_pause;
fdev->dma_device.device_resume = st_fdma_resume;
fdev->dma_device.src_addr_widths = FDMA_DMA_BUSWIDTHS;
fdev->dma_device.dst_addr_widths = FDMA_DMA_BUSWIDTHS;
fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
ret = dma_async_device_register(&fdev->dma_device);
if (ret) {
dev_err(&pdev->dev,
"Failed to register DMA device (%d)\n", ret);
goto err_rproc;
}
ret = of_dma_controller_register(np, st_fdma_of_xlate, fdev);
if (ret) {
dev_err(&pdev->dev,
"Failed to register controller (%d)\n", ret);
goto err_dma_dev;
}
dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq);
return 0;
err_dma_dev:
dma_async_device_unregister(&fdev->dma_device);
err_rproc:
st_fdma_free(fdev);
st_slim_rproc_put(fdev->slim_rproc);
err:
return ret;
}
static int st_fdma_remove(struct platform_device *pdev)
{
struct st_fdma_dev *fdev = platform_get_drvdata(pdev);
devm_free_irq(&pdev->dev, fdev->irq, fdev);
st_slim_rproc_put(fdev->slim_rproc);
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&fdev->dma_device);
return 0;
}
static struct platform_driver st_fdma_platform_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = st_fdma_match,
},
.probe = st_fdma_probe,
.remove = st_fdma_remove,
};
module_platform_driver(st_fdma_platform_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
MODULE_ALIAS("platform: " DRIVER_NAME);

249
drivers/dma/st_fdma.h Normal file
View file

@ -0,0 +1,249 @@
/*
* DMA driver header for STMicroelectronics STi FDMA controller
*
* Copyright (C) 2014 STMicroelectronics
*
* Author: Ludovic Barre <Ludovic.barre@st.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef __DMA_ST_FDMA_H
#define __DMA_ST_FDMA_H
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
#include <linux/io.h>
#include <linux/remoteproc/st_slim_rproc.h>
#include "virt-dma.h"
#define ST_FDMA_NR_DREQS 32
#define FW_NAME_SIZE 30
#define DRIVER_NAME "st-fdma"
/**
* struct st_fdma_generic_node - Free running/paced generic node
*
* @length: Length in bytes of a line in a 2D mem to mem
* @sstride: Stride, in bytes, between source lines in a 2D data move
* @dstride: Stride, in bytes, between destination lines in a 2D data move
*/
struct st_fdma_generic_node {
u32 length;
u32 sstride;
u32 dstride;
};
/**
* struct st_fdma_hw_node - Node structure used by fdma hw
*
* @next: Pointer to next node
* @control: Transfer Control Parameters
* @nbytes: Number of Bytes to read
* @saddr: Source address
* @daddr: Destination address
*
* @generic: generic node for free running/paced transfert type
* 2 others transfert type are possible, but not yet implemented
*
* The NODE structures must be aligned to a 32 byte boundary
*/
struct st_fdma_hw_node {
u32 next;
u32 control;
u32 nbytes;
u32 saddr;
u32 daddr;
union {
struct st_fdma_generic_node generic;
};
} __aligned(32);
/*
* node control parameters
*/
#define FDMA_NODE_CTRL_REQ_MAP_MASK GENMASK(4, 0)
#define FDMA_NODE_CTRL_REQ_MAP_FREE_RUN 0x0
#define FDMA_NODE_CTRL_REQ_MAP_DREQ(n) ((n)&FDMA_NODE_CTRL_REQ_MAP_MASK)
#define FDMA_NODE_CTRL_REQ_MAP_EXT FDMA_NODE_CTRL_REQ_MAP_MASK
#define FDMA_NODE_CTRL_SRC_MASK GENMASK(6, 5)
#define FDMA_NODE_CTRL_SRC_STATIC BIT(5)
#define FDMA_NODE_CTRL_SRC_INCR BIT(6)
#define FDMA_NODE_CTRL_DST_MASK GENMASK(8, 7)
#define FDMA_NODE_CTRL_DST_STATIC BIT(7)
#define FDMA_NODE_CTRL_DST_INCR BIT(8)
#define FDMA_NODE_CTRL_SECURE BIT(15)
#define FDMA_NODE_CTRL_PAUSE_EON BIT(30)
#define FDMA_NODE_CTRL_INT_EON BIT(31)
/**
* struct st_fdma_sw_node - descriptor structure for link list
*
* @pdesc: Physical address of desc
* @node: link used for putting this into a channel queue
*/
struct st_fdma_sw_node {
dma_addr_t pdesc;
struct st_fdma_hw_node *desc;
};
#define NAME_SZ 10
struct st_fdma_driverdata {
u32 id;
char name[NAME_SZ];
};
struct st_fdma_desc {
struct virt_dma_desc vdesc;
struct st_fdma_chan *fchan;
bool iscyclic;
unsigned int n_nodes;
struct st_fdma_sw_node node[];
};
enum st_fdma_type {
ST_FDMA_TYPE_FREE_RUN,
ST_FDMA_TYPE_PACED,
};
struct st_fdma_cfg {
struct device_node *of_node;
enum st_fdma_type type;
dma_addr_t dev_addr;
enum dma_transfer_direction dir;
int req_line; /* request line */
long req_ctrl; /* Request control */
};
struct st_fdma_chan {
struct st_fdma_dev *fdev;
struct dma_pool *node_pool;
struct dma_slave_config scfg;
struct st_fdma_cfg cfg;
int dreq_line;
struct virt_dma_chan vchan;
struct st_fdma_desc *fdesc;
enum dma_status status;
};
struct st_fdma_dev {
struct device *dev;
const struct st_fdma_driverdata *drvdata;
struct dma_device dma_device;
struct st_slim_rproc *slim_rproc;
int irq;
struct st_fdma_chan *chans;
spinlock_t dreq_lock;
unsigned long dreq_mask;
u32 nr_channels;
char fw_name[FW_NAME_SIZE];
};
/* Peripheral Registers*/
#define FDMA_CMD_STA_OFST 0xFC0
#define FDMA_CMD_SET_OFST 0xFC4
#define FDMA_CMD_CLR_OFST 0xFC8
#define FDMA_CMD_MASK_OFST 0xFCC
#define FDMA_CMD_START(ch) (0x1 << (ch << 1))
#define FDMA_CMD_PAUSE(ch) (0x2 << (ch << 1))
#define FDMA_CMD_FLUSH(ch) (0x3 << (ch << 1))
#define FDMA_INT_STA_OFST 0xFD0
#define FDMA_INT_STA_CH 0x1
#define FDMA_INT_STA_ERR 0x2
#define FDMA_INT_SET_OFST 0xFD4
#define FDMA_INT_CLR_OFST 0xFD8
#define FDMA_INT_MASK_OFST 0xFDC
#define fdma_read(fdev, name) \
readl((fdev)->slim_rproc->peri + name)
#define fdma_write(fdev, val, name) \
writel((val), (fdev)->slim_rproc->peri + name)
/* fchan interface (dmem) */
#define FDMA_CH_CMD_OFST 0x200
#define FDMA_CH_CMD_STA_MASK GENMASK(1, 0)
#define FDMA_CH_CMD_STA_IDLE (0x0)
#define FDMA_CH_CMD_STA_START (0x1)
#define FDMA_CH_CMD_STA_RUNNING (0x2)
#define FDMA_CH_CMD_STA_PAUSED (0x3)
#define FDMA_CH_CMD_ERR_MASK GENMASK(4, 2)
#define FDMA_CH_CMD_ERR_INT (0x0 << 2)
#define FDMA_CH_CMD_ERR_NAND (0x1 << 2)
#define FDMA_CH_CMD_ERR_MCHI (0x2 << 2)
#define FDMA_CH_CMD_DATA_MASK GENMASK(31, 5)
#define fchan_read(fchan, name) \
readl((fchan)->fdev->slim_rproc->mem[ST_SLIM_DMEM].cpu_addr \
+ (fchan)->vchan.chan.chan_id * 0x4 \
+ name)
#define fchan_write(fchan, val, name) \
writel((val), (fchan)->fdev->slim_rproc->mem[ST_SLIM_DMEM].cpu_addr \
+ (fchan)->vchan.chan.chan_id * 0x4 \
+ name)
/* req interface */
#define FDMA_REQ_CTRL_OFST 0x240
#define dreq_write(fchan, val, name) \
writel((val), (fchan)->fdev->slim_rproc->mem[ST_SLIM_DMEM].cpu_addr \
+ fchan->dreq_line * 0x04 \
+ name)
/* node interface */
#define FDMA_NODE_SZ 128
#define FDMA_PTRN_OFST 0x800
#define FDMA_CNTN_OFST 0x808
#define FDMA_SADDRN_OFST 0x80c
#define FDMA_DADDRN_OFST 0x810
#define fnode_read(fchan, name) \
readl((fchan)->fdev->slim_rproc->mem[ST_SLIM_DMEM].cpu_addr \
+ (fchan)->vchan.chan.chan_id * FDMA_NODE_SZ \
+ name)
#define fnode_write(fchan, val, name) \
writel((val), (fchan)->fdev->slim_rproc->mem[ST_SLIM_DMEM].cpu_addr \
+ (fchan)->vchan.chan.chan_id * FDMA_NODE_SZ \
+ name)
/*
* request control bits
*/
#define FDMA_REQ_CTRL_NUM_OPS_MASK GENMASK(31, 24)
#define FDMA_REQ_CTRL_NUM_OPS(n) (FDMA_REQ_CTRL_NUM_OPS_MASK & \
((n) << 24))
#define FDMA_REQ_CTRL_INITIATOR_MASK BIT(22)
#define FDMA_REQ_CTRL_INIT0 (0x0 << 22)
#define FDMA_REQ_CTRL_INIT1 (0x1 << 22)
#define FDMA_REQ_CTRL_INC_ADDR_ON BIT(21)
#define FDMA_REQ_CTRL_DATA_SWAP_ON BIT(17)
#define FDMA_REQ_CTRL_WNR BIT(14)
#define FDMA_REQ_CTRL_OPCODE_MASK GENMASK(7, 4)
#define FDMA_REQ_CTRL_OPCODE_LD_ST1 (0x0 << 4)
#define FDMA_REQ_CTRL_OPCODE_LD_ST2 (0x1 << 4)
#define FDMA_REQ_CTRL_OPCODE_LD_ST4 (0x2 << 4)
#define FDMA_REQ_CTRL_OPCODE_LD_ST8 (0x3 << 4)
#define FDMA_REQ_CTRL_OPCODE_LD_ST16 (0x4 << 4)
#define FDMA_REQ_CTRL_OPCODE_LD_ST32 (0x5 << 4)
#define FDMA_REQ_CTRL_OPCODE_LD_ST64 (0x6 << 4)
#define FDMA_REQ_CTRL_HOLDOFF_MASK GENMASK(2, 0)
#define FDMA_REQ_CTRL_HOLDOFF(n) ((n) & FDMA_REQ_CTRL_HOLDOFF_MASK)
/* bits used by client to configure request control */
#define FDMA_REQ_CTRL_CFG_MASK (FDMA_REQ_CTRL_HOLDOFF_MASK | \
FDMA_REQ_CTRL_DATA_SWAP_ON | \
FDMA_REQ_CTRL_INC_ADDR_ON | \
FDMA_REQ_CTRL_INITIATOR_MASK)
#endif /* __DMA_ST_FDMA_H */

View file

@ -1,20 +1,21 @@
menu "Remoteproc drivers"
# REMOTEPROC gets selected by whoever wants it
config REMOTEPROC
tristate
tristate "Support for Remote Processor subsystem"
depends on HAS_DMA
select CRC32
select FW_LOADER
select VIRTIO
select VIRTUALIZATION
if REMOTEPROC
config OMAP_REMOTEPROC
tristate "OMAP remoteproc support"
depends on HAS_DMA
depends on ARCH_OMAP4 || SOC_OMAP5
depends on OMAP_IOMMU
select REMOTEPROC
depends on REMOTEPROC
select MAILBOX
select OMAP2PLUS_MBOX
select RPMSG_VIRTIO
@ -34,7 +35,7 @@ config OMAP_REMOTEPROC
config STE_MODEM_RPROC
tristate "STE-Modem remoteproc support"
depends on HAS_DMA
select REMOTEPROC
depends on REMOTEPROC
default n
help
Say y or m here to support STE-Modem shared memory driver.
@ -44,7 +45,7 @@ config STE_MODEM_RPROC
config WKUP_M3_RPROC
tristate "AMx3xx Wakeup M3 remoteproc support"
depends on SOC_AM33XX || SOC_AM43XX
select REMOTEPROC
depends on REMOTEPROC
help
Say y here to support Wakeup M3 remote processor on TI AM33xx
and AM43xx family of SoCs.
@ -57,8 +58,8 @@ config WKUP_M3_RPROC
config DA8XX_REMOTEPROC
tristate "DA8xx/OMAP-L13x remoteproc support"
depends on ARCH_DAVINCI_DA8XX
depends on REMOTEPROC
select CMA if MMU
select REMOTEPROC
select RPMSG_VIRTIO
help
Say y here to support DA8xx/OMAP-L13x remote processors via the
@ -84,9 +85,9 @@ config QCOM_Q6V5_PIL
tristate "Qualcomm Hexagon V5 Peripherial Image Loader"
depends on OF && ARCH_QCOM
depends on QCOM_SMEM
depends on REMOTEPROC
select MFD_SYSCON
select QCOM_MDT_LOADER
select REMOTEPROC
help
Say y here to support the Qualcomm Peripherial Image Loader for the
Hexagon V5 based remote processors.
@ -99,10 +100,10 @@ config QCOM_WCNSS_PIL
tristate "Qualcomm WCNSS Peripheral Image Loader"
depends on OF && ARCH_QCOM
depends on QCOM_SMEM
depends on REMOTEPROC
select QCOM_MDT_LOADER
select QCOM_SCM
select QCOM_WCNSS_IRIS
select REMOTEPROC
help
Say y here to support the Peripheral Image Loader for the Qualcomm
Wireless Connectivity Subsystem.
@ -110,10 +111,16 @@ config QCOM_WCNSS_PIL
config ST_REMOTEPROC
tristate "ST remoteproc support"
depends on ARCH_STI
select REMOTEPROC
depends on REMOTEPROC
help
Say y here to support ST's adjunct processors via the remote
processor framework.
This can be either built-in or a loadable module.
config ST_SLIM_REMOTEPROC
tristate
depends on REMOTEPROC
endif # REMOTEPROC
endmenu

View file

@ -16,3 +16,4 @@ obj-$(CONFIG_QCOM_Q6V5_PIL) += qcom_q6v5_pil.o
obj-$(CONFIG_QCOM_WCNSS_IRIS) += qcom_wcnss_iris.o
obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss.o
obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o

View file

@ -0,0 +1,364 @@
/*
* SLIM core rproc driver
*
* Copyright (C) 2016 STMicroelectronics
*
* Author: Peter Griffin <peter.griffin@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include <linux/remoteproc/st_slim_rproc.h>
#include "remoteproc_internal.h"
/* SLIM core registers */
#define SLIM_ID_OFST 0x0
#define SLIM_VER_OFST 0x4
#define SLIM_EN_OFST 0x8
#define SLIM_EN_RUN BIT(0)
#define SLIM_CLK_GATE_OFST 0xC
#define SLIM_CLK_GATE_DIS BIT(0)
#define SLIM_CLK_GATE_RESET BIT(2)
#define SLIM_SLIM_PC_OFST 0x20
/* DMEM registers */
#define SLIM_REV_ID_OFST 0x0
#define SLIM_REV_ID_MIN_MASK GENMASK(15, 8)
#define SLIM_REV_ID_MIN(id) ((id & SLIM_REV_ID_MIN_MASK) >> 8)
#define SLIM_REV_ID_MAJ_MASK GENMASK(23, 16)
#define SLIM_REV_ID_MAJ(id) ((id & SLIM_REV_ID_MAJ_MASK) >> 16)
/* peripherals registers */
#define SLIM_STBUS_SYNC_OFST 0xF88
#define SLIM_STBUS_SYNC_DIS BIT(0)
#define SLIM_INT_SET_OFST 0xFD4
#define SLIM_INT_CLR_OFST 0xFD8
#define SLIM_INT_MASK_OFST 0xFDC
#define SLIM_CMD_CLR_OFST 0xFC8
#define SLIM_CMD_MASK_OFST 0xFCC
static const char *mem_names[ST_SLIM_MEM_MAX] = {
[ST_SLIM_DMEM] = "dmem",
[ST_SLIM_IMEM] = "imem",
};
static int slim_clk_get(struct st_slim_rproc *slim_rproc, struct device *dev)
{
int clk, err;
for (clk = 0; clk < ST_SLIM_MAX_CLK; clk++) {
slim_rproc->clks[clk] = of_clk_get(dev->of_node, clk);
if (IS_ERR(slim_rproc->clks[clk])) {
err = PTR_ERR(slim_rproc->clks[clk]);
if (err == -EPROBE_DEFER)
goto err_put_clks;
slim_rproc->clks[clk] = NULL;
break;
}
}
return 0;
err_put_clks:
while (--clk >= 0)
clk_put(slim_rproc->clks[clk]);
return err;
}
static void slim_clk_disable(struct st_slim_rproc *slim_rproc)
{
int clk;
for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++)
clk_disable_unprepare(slim_rproc->clks[clk]);
}
static int slim_clk_enable(struct st_slim_rproc *slim_rproc)
{
int clk, ret;
for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++) {
ret = clk_prepare_enable(slim_rproc->clks[clk]);
if (ret)
goto err_disable_clks;
}
return 0;
err_disable_clks:
while (--clk >= 0)
clk_disable_unprepare(slim_rproc->clks[clk]);
return ret;
}
/*
* Remoteproc slim specific device handlers
*/
static int slim_rproc_start(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
struct st_slim_rproc *slim_rproc = rproc->priv;
unsigned long hw_id, hw_ver, fw_rev;
u32 val;
/* disable CPU pipeline clock & reset CPU pipeline */
val = SLIM_CLK_GATE_DIS | SLIM_CLK_GATE_RESET;
writel(val, slim_rproc->slimcore + SLIM_CLK_GATE_OFST);
/* disable SLIM core STBus sync */
writel(SLIM_STBUS_SYNC_DIS, slim_rproc->peri + SLIM_STBUS_SYNC_OFST);
/* enable cpu pipeline clock */
writel(!SLIM_CLK_GATE_DIS,
slim_rproc->slimcore + SLIM_CLK_GATE_OFST);
/* clear int & cmd mailbox */
writel(~0U, slim_rproc->peri + SLIM_INT_CLR_OFST);
writel(~0U, slim_rproc->peri + SLIM_CMD_CLR_OFST);
/* enable all channels cmd & int */
writel(~0U, slim_rproc->peri + SLIM_INT_MASK_OFST);
writel(~0U, slim_rproc->peri + SLIM_CMD_MASK_OFST);
/* enable cpu */
writel(SLIM_EN_RUN, slim_rproc->slimcore + SLIM_EN_OFST);
hw_id = readl_relaxed(slim_rproc->slimcore + SLIM_ID_OFST);
hw_ver = readl_relaxed(slim_rproc->slimcore + SLIM_VER_OFST);
fw_rev = readl(slim_rproc->mem[ST_SLIM_DMEM].cpu_addr +
SLIM_REV_ID_OFST);
dev_info(dev, "fw rev:%ld.%ld on SLIM %ld.%ld\n",
SLIM_REV_ID_MAJ(fw_rev), SLIM_REV_ID_MIN(fw_rev),
hw_id, hw_ver);
return 0;
}
static int slim_rproc_stop(struct rproc *rproc)
{
struct st_slim_rproc *slim_rproc = rproc->priv;
u32 val;
/* mask all (cmd & int) channels */
writel(0UL, slim_rproc->peri + SLIM_INT_MASK_OFST);
writel(0UL, slim_rproc->peri + SLIM_CMD_MASK_OFST);
/* disable cpu pipeline clock */
writel(SLIM_CLK_GATE_DIS, slim_rproc->slimcore + SLIM_CLK_GATE_OFST);
writel(!SLIM_EN_RUN, slim_rproc->slimcore + SLIM_EN_OFST);
val = readl(slim_rproc->slimcore + SLIM_EN_OFST);
if (val & SLIM_EN_RUN)
dev_warn(&rproc->dev, "Failed to disable SLIM");
dev_dbg(&rproc->dev, "slim stopped\n");
return 0;
}
static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
{
struct st_slim_rproc *slim_rproc = rproc->priv;
void *va = NULL;
int i;
for (i = 0; i < ST_SLIM_MEM_MAX; i++) {
if (da != slim_rproc->mem[i].bus_addr)
continue;
if (len <= slim_rproc->mem[i].size) {
/* __force to make sparse happy with type conversion */
va = (__force void *)slim_rproc->mem[i].cpu_addr;
break;
}
}
dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%x va = 0x%p\n", da, len, va);
return va;
}
static struct rproc_ops slim_rproc_ops = {
.start = slim_rproc_start,
.stop = slim_rproc_stop,
.da_to_va = slim_rproc_da_to_va,
};
/*
* Firmware handler operations: sanity, boot address, load ...
*/
static struct resource_table empty_rsc_tbl = {
.ver = 1,
.num = 0,
};
static struct resource_table *slim_rproc_find_rsc_table(struct rproc *rproc,
const struct firmware *fw,
int *tablesz)
{
*tablesz = sizeof(empty_rsc_tbl);
return &empty_rsc_tbl;
}
static struct rproc_fw_ops slim_rproc_fw_ops = {
.find_rsc_table = slim_rproc_find_rsc_table,
};
/**
* st_slim_rproc_alloc() - allocate and initialise slim rproc
* @pdev: Pointer to the platform_device struct
* @fw_name: Name of firmware for rproc to use
*
* Function for allocating and initialising a slim rproc for use by
* device drivers whose IP is based around the SLIM core. It
* obtains and enables any clocks required by the SLIM core and also
* ioremaps the various IO.
*
* Returns st_slim_rproc pointer or PTR_ERR() on error.
*/
struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev,
char *fw_name)
{
struct device *dev = &pdev->dev;
struct st_slim_rproc *slim_rproc;
struct device_node *np = dev->of_node;
struct rproc *rproc;
struct resource *res;
int err, i;
const struct rproc_fw_ops *elf_ops;
if (!fw_name)
return ERR_PTR(-EINVAL);
if (!of_device_is_compatible(np, "st,slim-rproc"))
return ERR_PTR(-EINVAL);
rproc = rproc_alloc(dev, np->name, &slim_rproc_ops,
fw_name, sizeof(*slim_rproc));
if (!rproc)
return ERR_PTR(-ENOMEM);
rproc->has_iommu = false;
slim_rproc = rproc->priv;
slim_rproc->rproc = rproc;
elf_ops = rproc->fw_ops;
/* Use some generic elf ops */
slim_rproc_fw_ops.load = elf_ops->load;
slim_rproc_fw_ops.sanity_check = elf_ops->sanity_check;
rproc->fw_ops = &slim_rproc_fw_ops;
/* get imem and dmem */
for (i = 0; i < ARRAY_SIZE(mem_names); i++) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
mem_names[i]);
slim_rproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(slim_rproc->mem[i].cpu_addr)) {
dev_err(&pdev->dev, "devm_ioremap_resource failed\n");
err = PTR_ERR(slim_rproc->mem[i].cpu_addr);
goto err;
}
slim_rproc->mem[i].bus_addr = res->start;
slim_rproc->mem[i].size = resource_size(res);
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slimcore");
slim_rproc->slimcore = devm_ioremap_resource(dev, res);
if (IS_ERR(slim_rproc->slimcore)) {
dev_err(&pdev->dev, "failed to ioremap slimcore IO\n");
err = PTR_ERR(slim_rproc->slimcore);
goto err;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "peripherals");
slim_rproc->peri = devm_ioremap_resource(dev, res);
if (IS_ERR(slim_rproc->peri)) {
dev_err(&pdev->dev, "failed to ioremap peripherals IO\n");
err = PTR_ERR(slim_rproc->peri);
goto err;
}
err = slim_clk_get(slim_rproc, dev);
if (err)
goto err;
err = slim_clk_enable(slim_rproc);
if (err) {
dev_err(dev, "Failed to enable clocks\n");
goto err_clk_put;
}
/* Register as a remoteproc device */
err = rproc_add(rproc);
if (err) {
dev_err(dev, "registration of slim remoteproc failed\n");
goto err_clk_dis;
}
return slim_rproc;
err_clk_dis:
slim_clk_disable(slim_rproc);
err_clk_put:
for (i = 0; i < ST_SLIM_MAX_CLK && slim_rproc->clks[i]; i++)
clk_put(slim_rproc->clks[i]);
err:
rproc_put(rproc);
return ERR_PTR(err);
}
EXPORT_SYMBOL(st_slim_rproc_alloc);
/**
* st_slim_rproc_put() - put slim rproc resources
* @slim_rproc: Pointer to the st_slim_rproc struct
*
* Function for calling respective _put() functions on slim_rproc resources.
*
*/
void st_slim_rproc_put(struct st_slim_rproc *slim_rproc)
{
int clk;
if (!slim_rproc)
return;
slim_clk_disable(slim_rproc);
for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++)
clk_put(slim_rproc->clks[clk]);
rproc_del(slim_rproc->rproc);
rproc_put(slim_rproc->rproc);
}
EXPORT_SYMBOL(st_slim_rproc_put);
MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
MODULE_DESCRIPTION("STMicroelectronics SLIM core rproc driver");
MODULE_LICENSE("GPL v2");

View file

@ -0,0 +1,58 @@
/*
* SLIM core rproc driver header
*
* Copyright (C) 2016 STMicroelectronics
*
* Author: Peter Griffin <peter.griffin@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef _ST_REMOTEPROC_SLIM_H
#define _ST_REMOTEPROC_SLIM_H
#define ST_SLIM_MEM_MAX 2
#define ST_SLIM_MAX_CLK 4
enum {
ST_SLIM_DMEM,
ST_SLIM_IMEM,
};
/**
* struct st_slim_mem - slim internal memory structure
* @cpu_addr: MPU virtual address of the memory region
* @bus_addr: Bus address used to access the memory region
* @size: Size of the memory region
*/
struct st_slim_mem {
void __iomem *cpu_addr;
phys_addr_t bus_addr;
size_t size;
};
/**
* struct st_slim_rproc - SLIM slim core
* @rproc: rproc handle
* @mem: slim memory information
* @slimcore: slim slimcore regs
* @peri: slim peripheral regs
* @clks: slim clocks
*/
struct st_slim_rproc {
struct rproc *rproc;
struct st_slim_mem mem[ST_SLIM_MEM_MAX];
void __iomem *slimcore;
void __iomem *peri;
/* st_slim_rproc private */
struct clk *clks[ST_SLIM_MAX_CLK];
};
struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev,
char *fw_name);
void st_slim_rproc_put(struct st_slim_rproc *slim_rproc);
#endif